aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/PowerPC
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2015-05-27 18:44:32 +0000
committerDimitry Andric <dim@FreeBSD.org>2015-05-27 18:44:32 +0000
commit5a5ac124e1efaf208671f01c46edb15f29ed2a0b (patch)
treea6140557876943cdd800ee997c9317283394b22c /test/CodeGen/PowerPC
parentf03b5bed27d0d2eafd68562ce14f8b5e3f1f0801 (diff)
downloadsrc-5a5ac124e1efaf208671f01c46edb15f29ed2a0b.tar.gz
src-5a5ac124e1efaf208671f01c46edb15f29ed2a0b.zip
Vendor import of llvm trunk r238337:vendor/llvm/llvm-trunk-r238337
Notes
Notes: svn path=/vendor/llvm/dist/; revision=283625 svn path=/vendor/llvm/llvm-trunk-r238337/; revision=283626; tag=vendor/llvm/llvm-trunk-r238337
Diffstat (limited to 'test/CodeGen/PowerPC')
-rw-r--r--test/CodeGen/PowerPC/2005-11-30-vastart-crash.ll2
-rw-r--r--test/CodeGen/PowerPC/2006-01-20-ShiftPartsCrash.ll4
-rw-r--r--test/CodeGen/PowerPC/2006-04-05-splat-ish.ll2
-rw-r--r--test/CodeGen/PowerPC/2006-05-12-rlwimi-crash.ll28
-rw-r--r--test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll12
-rw-r--r--test/CodeGen/PowerPC/2006-07-19-stwbrx-crash.ll2
-rw-r--r--test/CodeGen/PowerPC/2006-08-15-SelectionCrash.ll2
-rw-r--r--test/CodeGen/PowerPC/2006-10-13-Miscompile.ll2
-rw-r--r--test/CodeGen/PowerPC/2006-10-17-brcc-miscompile.ll2
-rw-r--r--test/CodeGen/PowerPC/2006-12-07-LargeAlloca.ll2
-rw-r--r--test/CodeGen/PowerPC/2006-12-07-SelectCrash.ll2
-rw-r--r--test/CodeGen/PowerPC/2007-01-15-AsmDialect.ll12
-rw-r--r--test/CodeGen/PowerPC/2007-01-31-InlineAsmAddrMode.ll4
-rw-r--r--test/CodeGen/PowerPC/2007-02-23-lr-saved-twice.ll2
-rw-r--r--test/CodeGen/PowerPC/2007-03-24-cntlzd.ll2
-rw-r--r--test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll1594
-rw-r--r--test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll2
-rw-r--r--test/CodeGen/PowerPC/2007-05-03-InlineAsm-S-Constraint.ll2
-rw-r--r--test/CodeGen/PowerPC/2007-05-14-InlineAsmSelectCrash.ll2
-rw-r--r--test/CodeGen/PowerPC/2007-05-22-tailmerge-3.ll26
-rw-r--r--test/CodeGen/PowerPC/2007-06-28-BCCISelBug.ll2
-rw-r--r--test/CodeGen/PowerPC/2007-08-04-CoalescerAssert.ll2
-rw-r--r--test/CodeGen/PowerPC/2007-09-07-LoadStoreIdxForms.ll4
-rw-r--r--test/CodeGen/PowerPC/2007-09-08-unaligned.ll28
-rw-r--r--test/CodeGen/PowerPC/2007-10-18-PtrArithmetic.ll4
-rw-r--r--test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert.ll12
-rw-r--r--test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert2.ll14
-rw-r--r--test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll4
-rw-r--r--test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll2
-rw-r--r--test/CodeGen/PowerPC/2008-02-05-LiveIntervalsAssert.ll2
-rw-r--r--test/CodeGen/PowerPC/2008-02-09-LocalRegAllocAssert.ll2
-rw-r--r--test/CodeGen/PowerPC/2008-03-05-RegScavengerAssert.ll2
-rw-r--r--test/CodeGen/PowerPC/2008-03-17-RegScavengerCrash.ll4
-rw-r--r--test/CodeGen/PowerPC/2008-03-24-AddressRegImm.ll4
-rw-r--r--test/CodeGen/PowerPC/2008-03-26-CoalescerBug.ll2
-rw-r--r--test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll14
-rw-r--r--test/CodeGen/PowerPC/2008-06-21-F128LoadStore.ll2
-rw-r--r--test/CodeGen/PowerPC/2008-06-23-LiveVariablesCrash.ll2
-rw-r--r--test/CodeGen/PowerPC/2008-07-15-Bswap.ll98
-rw-r--r--test/CodeGen/PowerPC/2008-07-15-SignExtendInreg.ll2
-rw-r--r--test/CodeGen/PowerPC/2008-07-24-PPC64-CCBug.ll2
-rw-r--r--test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll110
-rw-r--r--test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll2
-rw-r--r--test/CodeGen/PowerPC/2008-10-31-PPCF128Libcalls.ll10
-rw-r--r--test/CodeGen/PowerPC/2009-01-16-DeclareISelBug.ll2
-rw-r--r--test/CodeGen/PowerPC/2009-03-17-LSRBug.ll6
-rw-r--r--test/CodeGen/PowerPC/2009-08-17-inline-asm-addr-mode-breakage.ll4
-rw-r--r--test/CodeGen/PowerPC/2009-11-15-ProcImpDefsBug.ll2
-rw-r--r--test/CodeGen/PowerPC/2010-02-12-saveCR.ll8
-rw-r--r--test/CodeGen/PowerPC/2010-03-09-indirect-call.ll4
-rw-r--r--test/CodeGen/PowerPC/2010-12-18-PPCStackRefs.ll4
-rw-r--r--test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll70
-rw-r--r--test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll72
-rw-r--r--test/CodeGen/PowerPC/2011-12-08-DemandedBitsMiscompile.ll2
-rw-r--r--test/CodeGen/PowerPC/2012-09-16-TOC-entry-check.ll8
-rw-r--r--test/CodeGen/PowerPC/2013-05-15-preinc-fold.ll8
-rw-r--r--test/CodeGen/PowerPC/2013-07-01-PHIElimBug.ll6
-rw-r--r--test/CodeGen/PowerPC/Atomics-64.ll160
-rw-r--r--test/CodeGen/PowerPC/MergeConsecutiveStores.ll68
-rw-r--r--test/CodeGen/PowerPC/a2-fp-basic.ll28
-rw-r--r--test/CodeGen/PowerPC/add-fi.ll4
-rw-r--r--test/CodeGen/PowerPC/addi-licm.ll19
-rw-r--r--test/CodeGen/PowerPC/addi-reassoc.ll8
-rw-r--r--test/CodeGen/PowerPC/alias.ll12
-rw-r--r--test/CodeGen/PowerPC/and-branch.ll2
-rw-r--r--test/CodeGen/PowerPC/and-elim.ll2
-rw-r--r--test/CodeGen/PowerPC/anon_aggr.ll42
-rw-r--r--test/CodeGen/PowerPC/asm-constraints.ll4
-rw-r--r--test/CodeGen/PowerPC/atomic-2.ll60
-rw-r--r--test/CodeGen/PowerPC/atomics-fences.ll11
-rw-r--r--test/CodeGen/PowerPC/atomics-indexed.ll37
-rw-r--r--test/CodeGen/PowerPC/atomics.ll40
-rw-r--r--test/CodeGen/PowerPC/bdzlr.ll6
-rw-r--r--test/CodeGen/PowerPC/bperm.ll32
-rw-r--r--test/CodeGen/PowerPC/branch-opt.ll10
-rw-r--r--test/CodeGen/PowerPC/bswap-load-store.ll18
-rw-r--r--test/CodeGen/PowerPC/buildvec_canonicalize.ll4
-rw-r--r--test/CodeGen/PowerPC/byval-aliased.ll4
-rw-r--r--test/CodeGen/PowerPC/cmpb-ppc32.ll2
-rw-r--r--test/CodeGen/PowerPC/cmpb.ll6
-rw-r--r--test/CodeGen/PowerPC/code-align.ll28
-rw-r--r--test/CodeGen/PowerPC/compare-simm.ll6
-rw-r--r--test/CodeGen/PowerPC/complex-return.ll36
-rw-r--r--test/CodeGen/PowerPC/cr-spills.ll168
-rw-r--r--test/CodeGen/PowerPC/cr1eq-no-extra-moves.ll4
-rw-r--r--test/CodeGen/PowerPC/cr1eq.ll4
-rw-r--r--test/CodeGen/PowerPC/cr_spilling.ll2
-rw-r--r--test/CodeGen/PowerPC/crbit-asm.ll6
-rw-r--r--test/CodeGen/PowerPC/crbits.ll4
-rw-r--r--test/CodeGen/PowerPC/crsave.ll4
-rw-r--r--test/CodeGen/PowerPC/crypto_bifs.ll275
-rw-r--r--test/CodeGen/PowerPC/ctrloop-cpsgn.ll4
-rw-r--r--test/CodeGen/PowerPC/ctrloop-fp64.ll10
-rw-r--r--test/CodeGen/PowerPC/ctrloop-i64.ll16
-rw-r--r--test/CodeGen/PowerPC/ctrloop-le.ll60
-rw-r--r--test/CodeGen/PowerPC/ctrloop-lt.ll60
-rw-r--r--test/CodeGen/PowerPC/ctrloop-ne.ll60
-rw-r--r--test/CodeGen/PowerPC/ctrloop-s000.ll98
-rw-r--r--test/CodeGen/PowerPC/ctrloop-sh.ll12
-rw-r--r--test/CodeGen/PowerPC/ctrloop-sums.ll14
-rw-r--r--test/CodeGen/PowerPC/ctrloops.ll6
-rw-r--r--test/CodeGen/PowerPC/cttz-ctlz-spec.ll41
-rw-r--r--test/CodeGen/PowerPC/cttz.ll2
-rw-r--r--test/CodeGen/PowerPC/dbg.ll36
-rw-r--r--test/CodeGen/PowerPC/dcbt-sched.ll4
-rw-r--r--test/CodeGen/PowerPC/delete-node.ll6
-rw-r--r--test/CodeGen/PowerPC/div-e-32.ll31
-rw-r--r--test/CodeGen/PowerPC/div-e-all.ll54
-rw-r--r--test/CodeGen/PowerPC/dyn-alloca-aligned.ll10
-rw-r--r--test/CodeGen/PowerPC/early-ret.ll35
-rw-r--r--test/CodeGen/PowerPC/ec-input.ll155
-rw-r--r--test/CodeGen/PowerPC/empty-functions.ll10
-rw-r--r--test/CodeGen/PowerPC/emptystruct.ll2
-rw-r--r--test/CodeGen/PowerPC/eqv-andc-orc-nor.ll8
-rw-r--r--test/CodeGen/PowerPC/extra-toc-reg-deps.ll430
-rw-r--r--test/CodeGen/PowerPC/f32-to-i64.ll23
-rw-r--r--test/CodeGen/PowerPC/fast-isel-GEP-coalesce.ll18
-rw-r--r--test/CodeGen/PowerPC/fast-isel-binary.ll2
-rw-r--r--test/CodeGen/PowerPC/fast-isel-br-const.ll2
-rw-r--r--test/CodeGen/PowerPC/fast-isel-call.ll6
-rw-r--r--test/CodeGen/PowerPC/fast-isel-cmp-imm.ll6
-rw-r--r--test/CodeGen/PowerPC/fast-isel-const.ll2
-rw-r--r--test/CodeGen/PowerPC/fast-isel-conversion-p5.ll2
-rw-r--r--test/CodeGen/PowerPC/fast-isel-conversion.ll16
-rw-r--r--test/CodeGen/PowerPC/fast-isel-crash.ll2
-rw-r--r--test/CodeGen/PowerPC/fast-isel-ext.ll6
-rw-r--r--test/CodeGen/PowerPC/fast-isel-fold.ll28
-rw-r--r--test/CodeGen/PowerPC/fast-isel-icmp-split.ll72
-rw-r--r--test/CodeGen/PowerPC/fast-isel-indirectbr.ll2
-rw-r--r--test/CodeGen/PowerPC/fast-isel-load-store-vsx.ll28
-rw-r--r--test/CodeGen/PowerPC/fast-isel-load-store.ll26
-rw-r--r--test/CodeGen/PowerPC/fast-isel-redefinition.ll6
-rw-r--r--test/CodeGen/PowerPC/fast-isel-ret.ll4
-rw-r--r--test/CodeGen/PowerPC/fast-isel-shifter.ll2
-rw-r--r--test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll6
-rw-r--r--test/CodeGen/PowerPC/floatPSA.ll30
-rw-r--r--test/CodeGen/PowerPC/flt-preinc.ll40
-rw-r--r--test/CodeGen/PowerPC/fma-assoc.ll196
-rw-r--r--test/CodeGen/PowerPC/fma-ext.ll22
-rw-r--r--test/CodeGen/PowerPC/fp-int-conversions-direct-moves.ll426
-rw-r--r--test/CodeGen/PowerPC/fp-to-int-ext.ll8
-rw-r--r--test/CodeGen/PowerPC/frounds.ll4
-rw-r--r--test/CodeGen/PowerPC/glob-comp-aa-crash.ll16
-rw-r--r--test/CodeGen/PowerPC/hello.ll2
-rw-r--r--test/CodeGen/PowerPC/hidden-vis-2.ll4
-rw-r--r--test/CodeGen/PowerPC/hidden-vis.ll2
-rw-r--r--test/CodeGen/PowerPC/htm.ll125
-rw-r--r--test/CodeGen/PowerPC/i64_fp_round.ll2
-rw-r--r--test/CodeGen/PowerPC/ia-mem-r0.ll44
-rw-r--r--test/CodeGen/PowerPC/indexed-load.ll4
-rw-r--r--test/CodeGen/PowerPC/indirectbr.ll6
-rw-r--r--test/CodeGen/PowerPC/inlineasm-i64-reg.ll20
-rw-r--r--test/CodeGen/PowerPC/isel-rc-nox0.ll4
-rw-r--r--test/CodeGen/PowerPC/lbz-from-ld-shift.ll2
-rw-r--r--test/CodeGen/PowerPC/lbzux.ll8
-rw-r--r--test/CodeGen/PowerPC/ld-st-upd.ll4
-rw-r--r--test/CodeGen/PowerPC/ldtoc-inv.ll39
-rw-r--r--test/CodeGen/PowerPC/lha.ll2
-rw-r--r--test/CodeGen/PowerPC/load-constant-addr.ll2
-rw-r--r--test/CodeGen/PowerPC/load-shift-combine.ll16
-rw-r--r--test/CodeGen/PowerPC/long-compare.ll2
-rw-r--r--test/CodeGen/PowerPC/loop-data-prefetch-inner.ll66
-rw-r--r--test/CodeGen/PowerPC/loop-data-prefetch.ll29
-rw-r--r--test/CodeGen/PowerPC/loop-prep-all.ll48
-rw-r--r--test/CodeGen/PowerPC/lsa.ll12
-rw-r--r--test/CodeGen/PowerPC/lsr-postinc-pos.ll8
-rw-r--r--test/CodeGen/PowerPC/mask64.ll4
-rw-r--r--test/CodeGen/PowerPC/mature-mc-support.ll4
-rw-r--r--test/CodeGen/PowerPC/mcm-1.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-10.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-11.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-2.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-3.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-5.ll16
-rw-r--r--test/CodeGen/PowerPC/mcm-6.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-7.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-8.ll4
-rw-r--r--test/CodeGen/PowerPC/mcm-9.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-default.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-obj-2.ll6
-rw-r--r--test/CodeGen/PowerPC/mcm-obj.ll119
-rw-r--r--test/CodeGen/PowerPC/mem-rr-addr-mode.ll10
-rw-r--r--test/CodeGen/PowerPC/mem_update.ll28
-rw-r--r--test/CodeGen/PowerPC/memcpy-vec.ll110
-rw-r--r--test/CodeGen/PowerPC/memset-nc-le.ll24
-rw-r--r--test/CodeGen/PowerPC/memset-nc.ll48
-rw-r--r--test/CodeGen/PowerPC/misched-inorder-latency.ll6
-rw-r--r--test/CodeGen/PowerPC/misched.ll2
-rw-r--r--test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll46
-rw-r--r--test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll46
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r2-64.ll9
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r2.ll6
-rw-r--r--test/CodeGen/PowerPC/no-extra-fp-conv-ldst.ll4
-rw-r--r--test/CodeGen/PowerPC/no-pref-jumps.ll36
-rw-r--r--test/CodeGen/PowerPC/novrsave.ll2
-rw-r--r--test/CodeGen/PowerPC/optnone-crbits-i1-ret.ll37
-rw-r--r--test/CodeGen/PowerPC/or-addressing-mode.ll4
-rw-r--r--test/CodeGen/PowerPC/p8-isel-sched.ll33
-rw-r--r--test/CodeGen/PowerPC/pip-inner.ll52
-rw-r--r--test/CodeGen/PowerPC/post-ra-ec.ll6
-rw-r--r--test/CodeGen/PowerPC/ppc-crbits-onoff.ll43
-rw-r--r--test/CodeGen/PowerPC/ppc-empty-fs.ll32
-rw-r--r--test/CodeGen/PowerPC/ppc-prologue.ll4
-rw-r--r--test/CodeGen/PowerPC/ppc32-cyclecounter.ll4
-rw-r--r--test/CodeGen/PowerPC/ppc32-i1-vaarg.ll2
-rw-r--r--test/CodeGen/PowerPC/ppc32-lshrti3.ll2
-rw-r--r--test/CodeGen/PowerPC/ppc32-pic-large.ll4
-rw-r--r--test/CodeGen/PowerPC/ppc32-pic.ll4
-rw-r--r--test/CodeGen/PowerPC/ppc440-fp-basic.ll28
-rw-r--r--test/CodeGen/PowerPC/ppc64-abi-extend.ll8
-rw-r--r--test/CodeGen/PowerPC/ppc64-align-long-double.ll4
-rw-r--r--test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll2
-rw-r--r--test/CodeGen/PowerPC/ppc64-anyregcc.ll61
-rw-r--r--test/CodeGen/PowerPC/ppc64-byval-align.ll12
-rw-r--r--test/CodeGen/PowerPC/ppc64-calls.ll2
-rw-r--r--test/CodeGen/PowerPC/ppc64-elf-abi.ll8
-rw-r--r--test/CodeGen/PowerPC/ppc64-fastcc-fast-isel.ll56
-rw-r--r--test/CodeGen/PowerPC/ppc64-fastcc.ll540
-rw-r--r--test/CodeGen/PowerPC/ppc64-func-desc-hoist.ll47
-rw-r--r--test/CodeGen/PowerPC/ppc64-gep-opt.ll40
-rw-r--r--test/CodeGen/PowerPC/ppc64-i128-abi.ll274
-rw-r--r--test/CodeGen/PowerPC/ppc64-icbt-pwr7.ll19
-rw-r--r--test/CodeGen/PowerPC/ppc64-icbt-pwr8.ll16
-rw-r--r--test/CodeGen/PowerPC/ppc64-linux-func-size.ll6
-rw-r--r--test/CodeGen/PowerPC/ppc64-nonfunc-calls.ll2
-rw-r--r--test/CodeGen/PowerPC/ppc64-patchpoint.ll56
-rw-r--r--test/CodeGen/PowerPC/ppc64-r2-alloc.ll81
-rw-r--r--test/CodeGen/PowerPC/ppc64-smallarg.ll4
-rw-r--r--test/CodeGen/PowerPC/ppc64-stackmap-nops.ll2
-rw-r--r--test/CodeGen/PowerPC/ppc64-stackmap.ll80
-rw-r--r--test/CodeGen/PowerPC/ppc64-toc.ll11
-rw-r--r--test/CodeGen/PowerPC/ppc64-zext.ll2
-rw-r--r--test/CodeGen/PowerPC/ppc64le-aggregates.ll57
-rw-r--r--test/CodeGen/PowerPC/ppc64le-calls.ll4
-rw-r--r--test/CodeGen/PowerPC/ppc64le-localentry.ll10
-rw-r--r--test/CodeGen/PowerPC/ppc64le-smallarg.ll10
-rw-r--r--test/CodeGen/PowerPC/ppcf128-1.ll32
-rw-r--r--test/CodeGen/PowerPC/ppcf128-3.ll8
-rw-r--r--test/CodeGen/PowerPC/ppcf128-endian.ll6
-rw-r--r--test/CodeGen/PowerPC/pr13891.ll2
-rw-r--r--test/CodeGen/PowerPC/pr15031.ll28
-rw-r--r--test/CodeGen/PowerPC/pr15630.ll2
-rw-r--r--test/CodeGen/PowerPC/pr16556-2.ll12
-rw-r--r--test/CodeGen/PowerPC/pr17168.ll822
-rw-r--r--test/CodeGen/PowerPC/pr17354.ll4
-rw-r--r--test/CodeGen/PowerPC/pr18663.ll18
-rw-r--r--test/CodeGen/PowerPC/pr20442.ll16
-rw-r--r--test/CodeGen/PowerPC/pr22711.ll78
-rw-r--r--test/CodeGen/PowerPC/preinc-ld-sel-crash.ll63
-rw-r--r--test/CodeGen/PowerPC/preincprep-invoke.ll50
-rw-r--r--test/CodeGen/PowerPC/private.ll2
-rw-r--r--test/CodeGen/PowerPC/pwr7-gt-nop.ll6
-rw-r--r--test/CodeGen/PowerPC/qpx-bv-sint.ll33
-rw-r--r--test/CodeGen/PowerPC/qpx-bv.ll37
-rw-r--r--test/CodeGen/PowerPC/qpx-func-clobber.ll22
-rw-r--r--test/CodeGen/PowerPC/qpx-load.ll26
-rw-r--r--test/CodeGen/PowerPC/qpx-recipest.ll194
-rw-r--r--test/CodeGen/PowerPC/qpx-rounding-ops.ll109
-rw-r--r--test/CodeGen/PowerPC/qpx-s-load.ll26
-rw-r--r--test/CodeGen/PowerPC/qpx-s-sel.ll144
-rw-r--r--test/CodeGen/PowerPC/qpx-s-store.ll25
-rw-r--r--test/CodeGen/PowerPC/qpx-sel.ll152
-rw-r--r--test/CodeGen/PowerPC/qpx-split-vsetcc.ll40
-rw-r--r--test/CodeGen/PowerPC/qpx-store.ll25
-rw-r--r--test/CodeGen/PowerPC/qpx-unalperm.ll64
-rw-r--r--test/CodeGen/PowerPC/quadint-return.ll2
-rw-r--r--test/CodeGen/PowerPC/reg-coalesce-simple.ll4
-rw-r--r--test/CodeGen/PowerPC/reloc-align.ll2
-rw-r--r--test/CodeGen/PowerPC/remat-imm.ll2
-rw-r--r--test/CodeGen/PowerPC/resolvefi-basereg.ll350
-rw-r--r--test/CodeGen/PowerPC/resolvefi-disp.ll16
-rw-r--r--test/CodeGen/PowerPC/retaddr2.ll3
-rw-r--r--test/CodeGen/PowerPC/return-val-i128.ll10
-rw-r--r--test/CodeGen/PowerPC/rlwimi-and.ll6
-rw-r--r--test/CodeGen/PowerPC/rlwimi-commute.ll8
-rw-r--r--test/CodeGen/PowerPC/rlwimi-dyn-and.ll8
-rw-r--r--test/CodeGen/PowerPC/rm-zext.ll4
-rw-r--r--test/CodeGen/PowerPC/rotl-2.ll5
-rw-r--r--test/CodeGen/PowerPC/rotl-64.ll4
-rw-r--r--test/CodeGen/PowerPC/rotl.ll6
-rw-r--r--test/CodeGen/PowerPC/rs-undef-use.ll6
-rw-r--r--test/CodeGen/PowerPC/s000-alias-misched.ll30
-rw-r--r--test/CodeGen/PowerPC/sdag-ppcf128.ll2
-rw-r--r--test/CodeGen/PowerPC/seteq-0.ll2
-rw-r--r--test/CodeGen/PowerPC/sjlj.ll8
-rw-r--r--test/CodeGen/PowerPC/small-arguments.ll6
-rw-r--r--test/CodeGen/PowerPC/split-index-tc.ll8
-rw-r--r--test/CodeGen/PowerPC/stack-protector.ll4
-rw-r--r--test/CodeGen/PowerPC/stack-realign.ll56
-rw-r--r--test/CodeGen/PowerPC/std-unal-fi.ll14
-rw-r--r--test/CodeGen/PowerPC/stdux-constuse.ll10
-rw-r--r--test/CodeGen/PowerPC/stfiwx.ll4
-rw-r--r--test/CodeGen/PowerPC/store-load-fwd.ll2
-rw-r--r--test/CodeGen/PowerPC/store-update.ll28
-rw-r--r--test/CodeGen/PowerPC/structsinmem.ll60
-rw-r--r--test/CodeGen/PowerPC/structsinregs.ll60
-rw-r--r--test/CodeGen/PowerPC/stwu-gta.ll4
-rw-r--r--test/CodeGen/PowerPC/stwu8.ll2
-rw-r--r--test/CodeGen/PowerPC/stwux.ll2
-rw-r--r--test/CodeGen/PowerPC/subreg-postra-2.ll8
-rw-r--r--test/CodeGen/PowerPC/subreg-postra.ll12
-rw-r--r--test/CodeGen/PowerPC/subsumes-pred-regs.ll2
-rw-r--r--test/CodeGen/PowerPC/swaps-le-1.ll147
-rw-r--r--test/CodeGen/PowerPC/swaps-le-2.ll91
-rw-r--r--test/CodeGen/PowerPC/tls-cse.ll52
-rw-r--r--test/CodeGen/PowerPC/tls-pic.ll20
-rw-r--r--test/CodeGen/PowerPC/tls-store2.ll11
-rw-r--r--test/CodeGen/PowerPC/tls.ll2
-rw-r--r--test/CodeGen/PowerPC/toc-load-sched-bug.ll152
-rw-r--r--test/CodeGen/PowerPC/trampoline.ll96
-rw-r--r--test/CodeGen/PowerPC/unal-altivec-wint.ll8
-rw-r--r--test/CodeGen/PowerPC/unal-altivec.ll12
-rw-r--r--test/CodeGen/PowerPC/unal-altivec2.ll98
-rw-r--r--test/CodeGen/PowerPC/unaligned.ll12
-rw-r--r--test/CodeGen/PowerPC/unwind-dw2-g.ll16
-rw-r--r--test/CodeGen/PowerPC/vaddsplat.ll24
-rw-r--r--test/CodeGen/PowerPC/varargs-struct-float.ll8
-rw-r--r--test/CodeGen/PowerPC/vcmp-fold.ll8
-rw-r--r--test/CodeGen/PowerPC/vec-abi-align.ll48
-rw-r--r--test/CodeGen/PowerPC/vec_add_sub_doubleword.ll62
-rw-r--r--test/CodeGen/PowerPC/vec_add_sub_quadword.ll130
-rw-r--r--test/CodeGen/PowerPC/vec_auto_constant.ll4
-rw-r--r--test/CodeGen/PowerPC/vec_br_cmp.ll4
-rw-r--r--test/CodeGen/PowerPC/vec_buildvector_loadstore.ll2
-rw-r--r--test/CodeGen/PowerPC/vec_clz.ll40
-rw-r--r--test/CodeGen/PowerPC/vec_cmpd.ll258
-rw-r--r--test/CodeGen/PowerPC/vec_constants.ll12
-rw-r--r--test/CodeGen/PowerPC/vec_conv.ll8
-rw-r--r--test/CodeGen/PowerPC/vec_fneg.ll2
-rw-r--r--test/CodeGen/PowerPC/vec_minmax.ll34
-rw-r--r--test/CodeGen/PowerPC/vec_misaligned.ll18
-rw-r--r--test/CodeGen/PowerPC/vec_mul.ll22
-rw-r--r--test/CodeGen/PowerPC/vec_mul_even_odd.ll42
-rw-r--r--test/CodeGen/PowerPC/vec_perf_shuffle.ll20
-rw-r--r--test/CodeGen/PowerPC/vec_popcnt.ll72
-rw-r--r--test/CodeGen/PowerPC/vec_rotate_shift.ll36
-rw-r--r--test/CodeGen/PowerPC/vec_shuffle.ll56
-rw-r--r--test/CodeGen/PowerPC/vec_shuffle_le.ll54
-rw-r--r--test/CodeGen/PowerPC/vec_shuffle_p8vector.ll54
-rw-r--r--test/CodeGen/PowerPC/vec_shuffle_p8vector_le.ll43
-rw-r--r--test/CodeGen/PowerPC/vec_splat.ll10
-rw-r--r--test/CodeGen/PowerPC/vec_splat_constant.ll4
-rw-r--r--test/CodeGen/PowerPC/vec_veqv_vnand_vorc.ll29
-rw-r--r--test/CodeGen/PowerPC/vec_zero.ll2
-rw-r--r--test/CodeGen/PowerPC/vector-identity-shuffle.ll2
-rw-r--r--test/CodeGen/PowerPC/vector.ll46
-rw-r--r--test/CodeGen/PowerPC/vperm-lowering.ll57
-rw-r--r--test/CodeGen/PowerPC/vsx-div.ll4
-rw-r--r--test/CodeGen/PowerPC/vsx-elementary-arith.ll120
-rw-r--r--test/CodeGen/PowerPC/vsx-fma-m.ll40
-rw-r--r--test/CodeGen/PowerPC/vsx-infl-copy1.ll133
-rw-r--r--test/CodeGen/PowerPC/vsx-infl-copy2.ll114
-rw-r--r--test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll97
-rw-r--r--test/CodeGen/PowerPC/vsx-ldst.ll13
-rw-r--r--test/CodeGen/PowerPC/vsx-minmax.ll22
-rw-r--r--test/CodeGen/PowerPC/vsx-p8.ll4
-rw-r--r--test/CodeGen/PowerPC/vsx-recip-est.ll62
-rw-r--r--test/CodeGen/PowerPC/vsx-spill-norwstore.ll63
-rw-r--r--test/CodeGen/PowerPC/vsx.ll28
-rw-r--r--test/CodeGen/PowerPC/vsx_insert_extract_le.ll28
-rw-r--r--test/CodeGen/PowerPC/vsx_scalar_ld_st.ll139
-rw-r--r--test/CodeGen/PowerPC/vsx_shuffle_le.ll132
-rw-r--r--test/CodeGen/PowerPC/weak_def_can_be_hidden.ll4
-rw-r--r--test/CodeGen/PowerPC/xxleqv_xxlnand_xxlorc.ll52
-rw-r--r--test/CodeGen/PowerPC/zero-not-run.ll2
-rw-r--r--test/CodeGen/PowerPC/zext-free.ll10
366 files changed, 10239 insertions, 3743 deletions
diff --git a/test/CodeGen/PowerPC/2005-11-30-vastart-crash.ll b/test/CodeGen/PowerPC/2005-11-30-vastart-crash.ll
index 047a12bedd81..0f56ac990bbe 100644
--- a/test/CodeGen/PowerPC/2005-11-30-vastart-crash.ll
+++ b/test/CodeGen/PowerPC/2005-11-30-vastart-crash.ll
@@ -7,7 +7,7 @@ define void @bar(i32 %G, i32 %E, i32 %F, i32 %A, i32 %B, i32 %C, i32 %D, i8* %fm
%ap = alloca i8* ; <i8**> [#uses=2]
%va.upgrd.1 = bitcast i8** %ap to i8* ; <i8*> [#uses=1]
call void @llvm.va_start( i8* %va.upgrd.1 )
- %tmp.1 = load i8** %ap ; <i8*> [#uses=1]
+ %tmp.1 = load i8*, i8** %ap ; <i8*> [#uses=1]
%tmp.0 = call double @foo( i8* %tmp.1 ) ; <double> [#uses=0]
ret void
}
diff --git a/test/CodeGen/PowerPC/2006-01-20-ShiftPartsCrash.ll b/test/CodeGen/PowerPC/2006-01-20-ShiftPartsCrash.ll
index fbf254082ee0..fde330321aa4 100644
--- a/test/CodeGen/PowerPC/2006-01-20-ShiftPartsCrash.ll
+++ b/test/CodeGen/PowerPC/2006-01-20-ShiftPartsCrash.ll
@@ -4,11 +4,11 @@ define void @iterative_hash_host_wide_int() {
%zero = alloca i32 ; <i32*> [#uses=2]
%b = alloca i32 ; <i32*> [#uses=1]
store i32 0, i32* %zero
- %tmp = load i32* %zero ; <i32> [#uses=1]
+ %tmp = load i32, i32* %zero ; <i32> [#uses=1]
%tmp5 = bitcast i32 %tmp to i32 ; <i32> [#uses=1]
%tmp6.u = add i32 %tmp5, 32 ; <i32> [#uses=1]
%tmp6 = bitcast i32 %tmp6.u to i32 ; <i32> [#uses=1]
- %tmp7 = load i64* null ; <i64> [#uses=1]
+ %tmp7 = load i64, i64* null ; <i64> [#uses=1]
%tmp6.upgrd.1 = trunc i32 %tmp6 to i8 ; <i8> [#uses=1]
%shift.upgrd.2 = zext i8 %tmp6.upgrd.1 to i64 ; <i64> [#uses=1]
%tmp8 = ashr i64 %tmp7, %shift.upgrd.2 ; <i64> [#uses=1]
diff --git a/test/CodeGen/PowerPC/2006-04-05-splat-ish.ll b/test/CodeGen/PowerPC/2006-04-05-splat-ish.ll
index 7e845382a8e8..80827dc1505d 100644
--- a/test/CodeGen/PowerPC/2006-04-05-splat-ish.ll
+++ b/test/CodeGen/PowerPC/2006-04-05-splat-ish.ll
@@ -2,7 +2,7 @@
; RUN: grep "vspltish v.*, 10"
define void @test(<8 x i16>* %P) {
- %tmp = load <8 x i16>* %P ; <<8 x i16>> [#uses=1]
+ %tmp = load <8 x i16>, <8 x i16>* %P ; <<8 x i16>> [#uses=1]
%tmp1 = add <8 x i16> %tmp, < i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10 > ; <<8 x i16>> [#uses=1]
store <8 x i16> %tmp1, <8 x i16>* %P
ret void
diff --git a/test/CodeGen/PowerPC/2006-05-12-rlwimi-crash.ll b/test/CodeGen/PowerPC/2006-05-12-rlwimi-crash.ll
index 0205d10a795c..50d64f46569a 100644
--- a/test/CodeGen/PowerPC/2006-05-12-rlwimi-crash.ll
+++ b/test/CodeGen/PowerPC/2006-05-12-rlwimi-crash.ll
@@ -20,32 +20,32 @@ bb30: ; preds = %entry
cond_true68: ; preds = %bb30
ret void
cond_next92: ; preds = %bb30
- %tmp173 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
- %tmp174 = load i32* %tmp173 ; <i32> [#uses=1]
+ %tmp173 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
+ %tmp174 = load i32, i32* %tmp173 ; <i32> [#uses=1]
%tmp177 = and i32 %tmp174, -9 ; <i32> [#uses=1]
store i32 %tmp177, i32* %tmp173
- %tmp180 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
- %tmp181 = load i32* %tmp180 ; <i32> [#uses=1]
- %tmp185 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
- %tmp186 = load i32* %tmp185 ; <i32> [#uses=1]
+ %tmp180 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
+ %tmp181 = load i32, i32* %tmp180 ; <i32> [#uses=1]
+ %tmp185 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
+ %tmp186 = load i32, i32* %tmp185 ; <i32> [#uses=1]
%tmp183187 = shl i32 %tmp181, 1 ; <i32> [#uses=1]
%tmp188 = and i32 %tmp183187, 16 ; <i32> [#uses=1]
%tmp190 = and i32 %tmp186, -17 ; <i32> [#uses=1]
%tmp191 = or i32 %tmp190, %tmp188 ; <i32> [#uses=1]
store i32 %tmp191, i32* %tmp185
- %tmp193 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
- %tmp194 = load i32* %tmp193 ; <i32> [#uses=1]
- %tmp198 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
- %tmp199 = load i32* %tmp198 ; <i32> [#uses=1]
+ %tmp193 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
+ %tmp194 = load i32, i32* %tmp193 ; <i32> [#uses=1]
+ %tmp198 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
+ %tmp199 = load i32, i32* %tmp198 ; <i32> [#uses=1]
%tmp196200 = shl i32 %tmp194, 2 ; <i32> [#uses=1]
%tmp201 = and i32 %tmp196200, 64 ; <i32> [#uses=1]
%tmp203 = and i32 %tmp199, -65 ; <i32> [#uses=1]
%tmp204 = or i32 %tmp203, %tmp201 ; <i32> [#uses=1]
store i32 %tmp204, i32* %tmp198
- %tmp206 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
- %tmp207 = load i32* %tmp206 ; <i32> [#uses=1]
- %tmp211 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
- %tmp212 = load i32* %tmp211 ; <i32> [#uses=1]
+ %tmp206 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
+ %tmp207 = load i32, i32* %tmp206 ; <i32> [#uses=1]
+ %tmp211 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
+ %tmp212 = load i32, i32* %tmp211 ; <i32> [#uses=1]
%tmp209213 = shl i32 %tmp207, 1 ; <i32> [#uses=1]
%tmp214 = and i32 %tmp209213, 128 ; <i32> [#uses=1]
%tmp216 = and i32 %tmp212, -129 ; <i32> [#uses=1]
diff --git a/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll b/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll
index 1b8b064ee914..792c271d0c08 100644
--- a/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll
+++ b/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll
@@ -4,14 +4,14 @@
@vals = external global i32* ; <i32**> [#uses=1]
define i32 @test(i32 %i) {
- %tmp = load i8** @lens ; <i8*> [#uses=1]
- %tmp1 = getelementptr i8* %tmp, i32 %i ; <i8*> [#uses=1]
- %tmp.upgrd.1 = load i8* %tmp1 ; <i8> [#uses=1]
+ %tmp = load i8*, i8** @lens ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %tmp, i32 %i ; <i8*> [#uses=1]
+ %tmp.upgrd.1 = load i8, i8* %tmp1 ; <i8> [#uses=1]
%tmp2 = zext i8 %tmp.upgrd.1 to i32 ; <i32> [#uses=1]
- %tmp3 = load i32** @vals ; <i32*> [#uses=1]
+ %tmp3 = load i32*, i32** @vals ; <i32*> [#uses=1]
%tmp5 = sub i32 1, %tmp2 ; <i32> [#uses=1]
- %tmp6 = getelementptr i32* %tmp3, i32 %tmp5 ; <i32*> [#uses=1]
- %tmp7 = load i32* %tmp6 ; <i32> [#uses=1]
+ %tmp6 = getelementptr i32, i32* %tmp3, i32 %tmp5 ; <i32*> [#uses=1]
+ %tmp7 = load i32, i32* %tmp6 ; <i32> [#uses=1]
ret i32 %tmp7
}
diff --git a/test/CodeGen/PowerPC/2006-07-19-stwbrx-crash.ll b/test/CodeGen/PowerPC/2006-07-19-stwbrx-crash.ll
index 65dd568b1ee3..4b287641d55f 100644
--- a/test/CodeGen/PowerPC/2006-07-19-stwbrx-crash.ll
+++ b/test/CodeGen/PowerPC/2006-07-19-stwbrx-crash.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=ppc32
define void @img2buf(i32 %symbol_size_in_bytes, i16* %ui16) nounwind {
- %tmp93 = load i16* null ; <i16> [#uses=1]
+ %tmp93 = load i16, i16* null ; <i16> [#uses=1]
%tmp99 = call i16 @llvm.bswap.i16( i16 %tmp93 ) ; <i16> [#uses=1]
store i16 %tmp99, i16* %ui16
ret void
diff --git a/test/CodeGen/PowerPC/2006-08-15-SelectionCrash.ll b/test/CodeGen/PowerPC/2006-08-15-SelectionCrash.ll
index cb76b5c70cf0..c63fd9ae1700 100644
--- a/test/CodeGen/PowerPC/2006-08-15-SelectionCrash.ll
+++ b/test/CodeGen/PowerPC/2006-08-15-SelectionCrash.ll
@@ -5,7 +5,7 @@
define fastcc void @immed_double_const(i32 %i0, i32 %i1) {
entry:
- %tmp1 = load i32* null ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* null ; <i32> [#uses=1]
switch i32 %tmp1, label %bb103 [
i32 1, label %bb
i32 3, label %bb
diff --git a/test/CodeGen/PowerPC/2006-10-13-Miscompile.ll b/test/CodeGen/PowerPC/2006-10-13-Miscompile.ll
index 002a0644183a..5992ad4481d3 100644
--- a/test/CodeGen/PowerPC/2006-10-13-Miscompile.ll
+++ b/test/CodeGen/PowerPC/2006-10-13-Miscompile.ll
@@ -6,7 +6,7 @@ entry:
%tmp = icmp sgt i64 %tmp1, 2 ; <i1> [#uses=1]
br i1 %tmp, label %UnifiedReturnBlock, label %cond_true
cond_true: ; preds = %entry
- %tmp.upgrd.1 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
+ %tmp.upgrd.1 = tail call i32 (...) @bar( ) ; <i32> [#uses=0]
ret void
UnifiedReturnBlock: ; preds = %entry
ret void
diff --git a/test/CodeGen/PowerPC/2006-10-17-brcc-miscompile.ll b/test/CodeGen/PowerPC/2006-10-17-brcc-miscompile.ll
index 3d462b4d1461..ab5f37d4babe 100644
--- a/test/CodeGen/PowerPC/2006-10-17-brcc-miscompile.ll
+++ b/test/CodeGen/PowerPC/2006-10-17-brcc-miscompile.ll
@@ -10,7 +10,7 @@ entry:
%tmp = icmp eq i32 %tmp2, 0 ; <i1> [#uses=1]
br i1 %tmp, label %UnifiedReturnBlock, label %cond_true
cond_true: ; preds = %entry
- tail call i32 (...)* @bar( ) ; <i32>:0 [#uses=0]
+ tail call i32 (...) @bar( ) ; <i32>:0 [#uses=0]
ret void
UnifiedReturnBlock: ; preds = %entry
ret void
diff --git a/test/CodeGen/PowerPC/2006-12-07-LargeAlloca.ll b/test/CodeGen/PowerPC/2006-12-07-LargeAlloca.ll
index ba863047be99..0e7709857406 100644
--- a/test/CodeGen/PowerPC/2006-12-07-LargeAlloca.ll
+++ b/test/CodeGen/PowerPC/2006-12-07-LargeAlloca.ll
@@ -14,7 +14,7 @@ bb19: ; preds = %entry
bb12.i: ; preds = %bb12.i, %bb19
%i.0.i = phi i32 [ %tmp11.i, %bb12.i ], [ 0, %bb19 ] ; <i32> [#uses=2]
%gep.upgrd.1 = zext i32 %i.0.i to i64 ; <i64> [#uses=1]
- %tmp9.i = getelementptr [256 x i32]* %RMask.i, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
+ %tmp9.i = getelementptr [256 x i32], [256 x i32]* %RMask.i, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
store i32 0, i32* %tmp9.i
%tmp11.i = add i32 %i.0.i, 1 ; <i32> [#uses=1]
br label %bb12.i
diff --git a/test/CodeGen/PowerPC/2006-12-07-SelectCrash.ll b/test/CodeGen/PowerPC/2006-12-07-SelectCrash.ll
index 6d9a3fa7b106..9660d450cb4c 100644
--- a/test/CodeGen/PowerPC/2006-12-07-SelectCrash.ll
+++ b/test/CodeGen/PowerPC/2006-12-07-SelectCrash.ll
@@ -10,7 +10,7 @@ entry:
cond_true: ; preds = %entry
ret void
cond_next71: ; preds = %entry
- %tmp73.b = load i1* @qsz.b ; <i1> [#uses=1]
+ %tmp73.b = load i1, i1* @qsz.b ; <i1> [#uses=1]
%ii.4.ph = select i1 %tmp73.b, i64 4, i64 0 ; <i64> [#uses=1]
br label %bb139
bb82: ; preds = %bb139
diff --git a/test/CodeGen/PowerPC/2007-01-15-AsmDialect.ll b/test/CodeGen/PowerPC/2007-01-15-AsmDialect.ll
index c7792884bb89..ca134fa9be4c 100644
--- a/test/CodeGen/PowerPC/2007-01-15-AsmDialect.ll
+++ b/test/CodeGen/PowerPC/2007-01-15-AsmDialect.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 | \
-; RUN: grep cntlzw
+; RUN: grep cntlz
define i32 @foo() nounwind {
entry:
@@ -8,19 +8,19 @@ entry:
%ctz_x = alloca i32, align 4 ; <i32*> [#uses=3]
%ctz_c = alloca i32, align 4 ; <i32*> [#uses=2]
store i32 61440, i32* %ctz_x
- %tmp = load i32* %ctz_x ; <i32> [#uses=1]
+ %tmp = load i32, i32* %ctz_x ; <i32> [#uses=1]
%tmp1 = sub i32 0, %tmp ; <i32> [#uses=1]
- %tmp2 = load i32* %ctz_x ; <i32> [#uses=1]
+ %tmp2 = load i32, i32* %ctz_x ; <i32> [#uses=1]
%tmp3 = and i32 %tmp1, %tmp2 ; <i32> [#uses=1]
%tmp4 = call i32 asm "$(cntlz$|cntlzw$) $0,$1", "=r,r,~{dirflag},~{fpsr},~{flags}"( i32 %tmp3 ) ; <i32> [#uses=1]
store i32 %tmp4, i32* %ctz_c
- %tmp5 = load i32* %ctz_c ; <i32> [#uses=1]
+ %tmp5 = load i32, i32* %ctz_c ; <i32> [#uses=1]
store i32 %tmp5, i32* %temp
- %tmp6 = load i32* %temp ; <i32> [#uses=1]
+ %tmp6 = load i32, i32* %temp ; <i32> [#uses=1]
store i32 %tmp6, i32* %retval
br label %return
return: ; preds = %entry
- %retval2 = load i32* %retval ; <i32> [#uses=1]
+ %retval2 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval2
}
diff --git a/test/CodeGen/PowerPC/2007-01-31-InlineAsmAddrMode.ll b/test/CodeGen/PowerPC/2007-01-31-InlineAsmAddrMode.ll
index fe5145d15230..6ce32da2f740 100644
--- a/test/CodeGen/PowerPC/2007-01-31-InlineAsmAddrMode.ll
+++ b/test/CodeGen/PowerPC/2007-01-31-InlineAsmAddrMode.ll
@@ -10,7 +10,7 @@
define void @test1() {
entry:
%Out = alloca %struct.A, align 4 ; <%struct.A*> [#uses=1]
- %tmp2 = getelementptr %struct.A* %Out, i32 0, i32 1
+ %tmp2 = getelementptr %struct.A, %struct.A* %Out, i32 0, i32 1
%tmp5 = call i32 asm "lwbrx $0, $1", "=r,m"(i32* %tmp2 )
ret void
}
@@ -18,7 +18,7 @@ entry:
define void @test2() {
entry:
%Out = alloca %struct.A, align 4 ; <%struct.A*> [#uses=1]
- %tmp2 = getelementptr %struct.A* %Out, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp2 = getelementptr %struct.A, %struct.A* %Out, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp5 = call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,m"( i8* null, i32 0, i32* %tmp2 ) ; <i32> [#uses=0]
ret void
}
diff --git a/test/CodeGen/PowerPC/2007-02-23-lr-saved-twice.ll b/test/CodeGen/PowerPC/2007-02-23-lr-saved-twice.ll
index 0473857ae70f..5a6fbf01c1b8 100644
--- a/test/CodeGen/PowerPC/2007-02-23-lr-saved-twice.ll
+++ b/test/CodeGen/PowerPC/2007-02-23-lr-saved-twice.ll
@@ -7,7 +7,7 @@ target triple = "powerpc-apple-darwin8"
define i32 @main() {
entry:
- %tmp = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([18 x i8]* @str, i32 0, i32 0) ) ; <i32> [#uses=0]
+ %tmp = tail call i32 (i8*, ...) @printf( i8* getelementptr ([18 x i8], [18 x i8]* @str, i32 0, i32 0) ) ; <i32> [#uses=0]
ret i32 0
}
diff --git a/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll b/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll
index 3624b5109301..2db87fcb1c88 100644
--- a/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll
+++ b/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll
@@ -3,7 +3,7 @@ target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
define i32 @_ZNK4llvm5APInt17countLeadingZerosEv(i64 *%t) nounwind {
- %tmp19 = load i64* %t
+ %tmp19 = load i64, i64* %t
%tmp22 = tail call i64 @llvm.ctlz.i64( i64 %tmp19, i1 true ) ; <i64> [#uses=1]
%tmp23 = trunc i64 %tmp22 to i32
%tmp89 = add i32 %tmp23, -64 ; <i32> [#uses=1]
diff --git a/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll b/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll
index d43916d4f3c1..de445f4c034a 100644
--- a/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll
+++ b/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll
@@ -2,578 +2,578 @@
define void @test(<4 x float>*, { { i16, i16, i32 } }*) {
xOperationInitMasks.exit:
- %.sub7896 = getelementptr [4 x <4 x i32>]* null, i32 0, i32 0 ; <<4 x i32>*> [#uses=24]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 175, i32 3 ; <<4 x float>*>:2 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 174, i32 2 ; <<4 x float>*>:3 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 174, i32 3 ; <<4 x float>*>:4 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 1 ; <<4 x float>*>:5 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 2 ; <<4 x float>*>:6 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 3 ; <<4 x float>*>:7 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 1 ; <<4 x float>*>:8 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 2 ; <<4 x float>*>:9 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 3 ; <<4 x float>*>:10 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 1 ; <<4 x float>*>:11 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 2 ; <<4 x float>*>:12 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 3 ; <<4 x float>*>:13 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 1 ; <<4 x float>*>:14 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 2 ; <<4 x float>*>:15 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 3 ; <<4 x float>*>:16 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 1 ; <<4 x float>*>:17 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 2 ; <<4 x float>*>:18 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 3 ; <<4 x float>*>:19 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 1 ; <<4 x float>*>:20 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 2 ; <<4 x float>*>:21 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 3 ; <<4 x float>*>:22 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 1 ; <<4 x float>*>:23 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 2 ; <<4 x float>*>:24 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 3 ; <<4 x float>*>:25 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 1 ; <<4 x float>*>:26 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 2 ; <<4 x float>*>:27 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 3 ; <<4 x float>*>:28 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 1 ; <<4 x float>*>:29 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 2 ; <<4 x float>*>:30 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 3 ; <<4 x float>*>:31 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 1 ; <<4 x float>*>:32 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 2 ; <<4 x float>*>:33 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 3 ; <<4 x float>*>:34 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 1 ; <<4 x float>*>:35 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 2 ; <<4 x float>*>:36 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 3 ; <<4 x float>*>:37 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 1 ; <<4 x float>*>:38 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 2 ; <<4 x float>*>:39 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 3 ; <<4 x float>*>:40 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 1 ; <<4 x float>*>:41 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 2 ; <<4 x float>*>:42 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 3 ; <<4 x float>*>:43 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 1 ; <<4 x float>*>:44 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 2 ; <<4 x float>*>:45 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 3 ; <<4 x float>*>:46 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 1 ; <<4 x float>*>:47 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 2 ; <<4 x float>*>:48 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 3 ; <<4 x float>*>:49 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 1 ; <<4 x float>*>:50 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 2 ; <<4 x float>*>:51 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 3 ; <<4 x float>*>:52 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 1 ; <<4 x float>*>:53 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 2 ; <<4 x float>*>:54 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 3 ; <<4 x float>*>:55 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 1 ; <<4 x float>*>:56 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 2 ; <<4 x float>*>:57 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 3 ; <<4 x float>*>:58 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 1 ; <<4 x float>*>:59 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 2 ; <<4 x float>*>:60 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 3 ; <<4 x float>*>:61 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 1 ; <<4 x float>*>:62 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 2 ; <<4 x float>*>:63 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 3 ; <<4 x float>*>:64 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 1 ; <<4 x float>*>:65 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 2 ; <<4 x float>*>:66 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 3 ; <<4 x float>*>:67 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 1 ; <<4 x float>*>:68 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 2 ; <<4 x float>*>:69 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 3 ; <<4 x float>*>:70 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 1 ; <<4 x float>*>:71 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 2 ; <<4 x float>*>:72 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 3 ; <<4 x float>*>:73 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 1 ; <<4 x float>*>:74 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 2 ; <<4 x float>*>:75 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 3 ; <<4 x float>*>:76 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 1 ; <<4 x float>*>:77 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 2 ; <<4 x float>*>:78 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 3 ; <<4 x float>*>:79 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 1 ; <<4 x float>*>:80 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 2 ; <<4 x float>*>:81 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 3 ; <<4 x float>*>:82 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 1 ; <<4 x float>*>:83 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 2 ; <<4 x float>*>:84 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 3 ; <<4 x float>*>:85 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 1 ; <<4 x float>*>:86 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 2 ; <<4 x float>*>:87 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 3 ; <<4 x float>*>:88 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 1 ; <<4 x float>*>:89 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 2 ; <<4 x float>*>:90 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 3 ; <<4 x float>*>:91 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 1 ; <<4 x float>*>:92 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 2 ; <<4 x float>*>:93 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 3 ; <<4 x float>*>:94 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 1 ; <<4 x float>*>:95 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 2 ; <<4 x float>*>:96 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 3 ; <<4 x float>*>:97 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 1 ; <<4 x float>*>:98 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 2 ; <<4 x float>*>:99 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 3 ; <<4 x float>*>:100 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 1 ; <<4 x float>*>:101 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 2 ; <<4 x float>*>:102 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 3 ; <<4 x float>*>:103 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 1 ; <<4 x float>*>:104 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 2 ; <<4 x float>*>:105 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 3 ; <<4 x float>*>:106 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 1 ; <<4 x float>*>:107 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 2 ; <<4 x float>*>:108 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 3 ; <<4 x float>*>:109 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 1 ; <<4 x float>*>:110 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 2 ; <<4 x float>*>:111 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 3 ; <<4 x float>*>:112 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 1 ; <<4 x float>*>:113 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 2 ; <<4 x float>*>:114 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 3 ; <<4 x float>*>:115 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 1 ; <<4 x float>*>:116 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 2 ; <<4 x float>*>:117 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 3 ; <<4 x float>*>:118 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 1 ; <<4 x float>*>:119 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 2 ; <<4 x float>*>:120 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 3 ; <<4 x float>*>:121 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 1 ; <<4 x float>*>:122 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 2 ; <<4 x float>*>:123 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 3 ; <<4 x float>*>:124 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 1 ; <<4 x float>*>:125 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 2 ; <<4 x float>*>:126 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 3 ; <<4 x float>*>:127 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 1 ; <<4 x float>*>:128 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 2 ; <<4 x float>*>:129 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 3 ; <<4 x float>*>:130 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 1 ; <<4 x float>*>:131 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 2 ; <<4 x float>*>:132 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 3 ; <<4 x float>*>:133 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 1 ; <<4 x float>*>:134 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 2 ; <<4 x float>*>:135 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 3 ; <<4 x float>*>:136 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 1 ; <<4 x float>*>:137 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 2 ; <<4 x float>*>:138 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 3 ; <<4 x float>*>:139 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 1 ; <<4 x float>*>:140 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 2 ; <<4 x float>*>:141 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 3 ; <<4 x float>*>:142 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 1 ; <<4 x float>*>:143 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 2 ; <<4 x float>*>:144 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 3 ; <<4 x float>*>:145 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 1 ; <<4 x float>*>:146 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 2 ; <<4 x float>*>:147 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 3 ; <<4 x float>*>:148 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 1 ; <<4 x float>*>:149 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 2 ; <<4 x float>*>:150 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 3 ; <<4 x float>*>:151 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 1 ; <<4 x float>*>:152 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 2 ; <<4 x float>*>:153 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 3 ; <<4 x float>*>:154 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 1 ; <<4 x float>*>:155 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 2 ; <<4 x float>*>:156 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 3 ; <<4 x float>*>:157 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 1 ; <<4 x float>*>:158 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 2 ; <<4 x float>*>:159 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 3 ; <<4 x float>*>:160 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 1 ; <<4 x float>*>:161 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 2 ; <<4 x float>*>:162 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 3 ; <<4 x float>*>:163 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 1 ; <<4 x float>*>:164 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 2 ; <<4 x float>*>:165 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 3 ; <<4 x float>*>:166 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 1 ; <<4 x float>*>:167 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 2 ; <<4 x float>*>:168 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 3 ; <<4 x float>*>:169 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 1 ; <<4 x float>*>:170 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 2 ; <<4 x float>*>:171 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 3 ; <<4 x float>*>:172 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 1 ; <<4 x float>*>:173 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 2 ; <<4 x float>*>:174 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 3 ; <<4 x float>*>:175 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 1 ; <<4 x float>*>:176 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 2 ; <<4 x float>*>:177 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 3 ; <<4 x float>*>:178 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 1 ; <<4 x float>*>:179 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 2 ; <<4 x float>*>:180 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 3 ; <<4 x float>*>:181 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 1 ; <<4 x float>*>:182 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 2 ; <<4 x float>*>:183 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 3 ; <<4 x float>*>:184 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 1 ; <<4 x float>*>:185 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 2 ; <<4 x float>*>:186 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 3 ; <<4 x float>*>:187 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 1 ; <<4 x float>*>:188 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 2 ; <<4 x float>*>:189 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 3 ; <<4 x float>*>:190 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 1 ; <<4 x float>*>:191 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 2 ; <<4 x float>*>:192 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 3 ; <<4 x float>*>:193 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 1 ; <<4 x float>*>:194 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 2 ; <<4 x float>*>:195 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 3 ; <<4 x float>*>:196 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 1 ; <<4 x float>*>:197 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 2 ; <<4 x float>*>:198 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 3 ; <<4 x float>*>:199 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 1 ; <<4 x float>*>:200 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 2 ; <<4 x float>*>:201 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 3 ; <<4 x float>*>:202 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 1 ; <<4 x float>*>:203 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 2 ; <<4 x float>*>:204 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 3 ; <<4 x float>*>:205 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 1 ; <<4 x float>*>:206 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 2 ; <<4 x float>*>:207 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 3 ; <<4 x float>*>:208 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 1 ; <<4 x float>*>:209 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 2 ; <<4 x float>*>:210 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 3 ; <<4 x float>*>:211 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 1 ; <<4 x float>*>:212 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 2 ; <<4 x float>*>:213 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 3 ; <<4 x float>*>:214 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 1 ; <<4 x float>*>:215 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 2 ; <<4 x float>*>:216 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 3 ; <<4 x float>*>:217 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 1 ; <<4 x float>*>:218 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 2 ; <<4 x float>*>:219 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 3 ; <<4 x float>*>:220 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 1 ; <<4 x float>*>:221 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 2 ; <<4 x float>*>:222 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 3 ; <<4 x float>*>:223 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 1 ; <<4 x float>*>:224 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 2 ; <<4 x float>*>:225 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 3 ; <<4 x float>*>:226 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 1 ; <<4 x float>*>:227 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 2 ; <<4 x float>*>:228 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 3 ; <<4 x float>*>:229 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 1 ; <<4 x float>*>:230 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 2 ; <<4 x float>*>:231 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 3 ; <<4 x float>*>:232 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 1 ; <<4 x float>*>:233 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 2 ; <<4 x float>*>:234 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 3 ; <<4 x float>*>:235 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 1 ; <<4 x float>*>:236 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 2 ; <<4 x float>*>:237 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 3 ; <<4 x float>*>:238 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 1 ; <<4 x float>*>:239 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 2 ; <<4 x float>*>:240 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 3 ; <<4 x float>*>:241 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 1 ; <<4 x float>*>:242 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 2 ; <<4 x float>*>:243 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 3 ; <<4 x float>*>:244 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 1 ; <<4 x float>*>:245 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 2 ; <<4 x float>*>:246 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 3 ; <<4 x float>*>:247 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 1 ; <<4 x float>*>:248 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 2 ; <<4 x float>*>:249 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 3 ; <<4 x float>*>:250 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 1 ; <<4 x float>*>:251 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 2 ; <<4 x float>*>:252 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 3 ; <<4 x float>*>:253 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 1 ; <<4 x float>*>:254 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 2 ; <<4 x float>*>:255 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 3 ; <<4 x float>*>:256 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 1 ; <<4 x float>*>:257 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 2 ; <<4 x float>*>:258 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 3 ; <<4 x float>*>:259 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 1 ; <<4 x float>*>:260 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 2 ; <<4 x float>*>:261 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 3 ; <<4 x float>*>:262 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 1 ; <<4 x float>*>:263 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 2 ; <<4 x float>*>:264 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 3 ; <<4 x float>*>:265 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 1 ; <<4 x float>*>:266 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 2 ; <<4 x float>*>:267 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 3 ; <<4 x float>*>:268 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 1 ; <<4 x float>*>:269 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 2 ; <<4 x float>*>:270 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 3 ; <<4 x float>*>:271 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 1 ; <<4 x float>*>:272 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 2 ; <<4 x float>*>:273 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 3 ; <<4 x float>*>:274 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 1 ; <<4 x float>*>:275 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 2 ; <<4 x float>*>:276 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 3 ; <<4 x float>*>:277 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 1 ; <<4 x float>*>:278 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 2 ; <<4 x float>*>:279 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 3 ; <<4 x float>*>:280 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 1 ; <<4 x float>*>:281 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 2 ; <<4 x float>*>:282 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 3 ; <<4 x float>*>:283 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 1 ; <<4 x float>*>:284 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 2 ; <<4 x float>*>:285 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 3 ; <<4 x float>*>:286 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 1 ; <<4 x float>*>:287 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 2 ; <<4 x float>*>:288 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 3 ; <<4 x float>*>:289 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 1 ; <<4 x float>*>:290 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 2 ; <<4 x float>*>:291 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 3 ; <<4 x float>*>:292 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 1 ; <<4 x float>*>:293 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 2 ; <<4 x float>*>:294 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 3 ; <<4 x float>*>:295 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 1 ; <<4 x float>*>:296 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 2 ; <<4 x float>*>:297 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 3 ; <<4 x float>*>:298 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 1 ; <<4 x float>*>:299 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 2 ; <<4 x float>*>:300 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 3 ; <<4 x float>*>:301 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 1 ; <<4 x float>*>:302 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 2 ; <<4 x float>*>:303 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 3 ; <<4 x float>*>:304 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 1 ; <<4 x float>*>:305 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 2 ; <<4 x float>*>:306 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 3 ; <<4 x float>*>:307 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 1 ; <<4 x float>*>:308 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 2 ; <<4 x float>*>:309 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 3 ; <<4 x float>*>:310 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 1 ; <<4 x float>*>:311 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 2 ; <<4 x float>*>:312 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 3 ; <<4 x float>*>:313 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 1 ; <<4 x float>*>:314 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 2 ; <<4 x float>*>:315 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 3 ; <<4 x float>*>:316 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 1 ; <<4 x float>*>:317 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 2 ; <<4 x float>*>:318 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 3 ; <<4 x float>*>:319 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 1 ; <<4 x float>*>:320 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 2 ; <<4 x float>*>:321 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 3 ; <<4 x float>*>:322 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 1 ; <<4 x float>*>:323 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 2 ; <<4 x float>*>:324 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 3 ; <<4 x float>*>:325 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 1 ; <<4 x float>*>:326 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 2 ; <<4 x float>*>:327 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 3 ; <<4 x float>*>:328 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 1 ; <<4 x float>*>:329 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 2 ; <<4 x float>*>:330 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 3 ; <<4 x float>*>:331 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 1 ; <<4 x float>*>:332 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 2 ; <<4 x float>*>:333 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 3 ; <<4 x float>*>:334 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 1 ; <<4 x float>*>:335 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 2 ; <<4 x float>*>:336 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 3 ; <<4 x float>*>:337 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 1 ; <<4 x float>*>:338 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 2 ; <<4 x float>*>:339 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 3 ; <<4 x float>*>:340 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 1 ; <<4 x float>*>:341 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 2 ; <<4 x float>*>:342 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 3 ; <<4 x float>*>:343 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 1 ; <<4 x float>*>:344 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 2 ; <<4 x float>*>:345 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 3 ; <<4 x float>*>:346 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 1 ; <<4 x float>*>:347 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 2 ; <<4 x float>*>:348 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 3 ; <<4 x float>*>:349 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 1 ; <<4 x float>*>:350 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 2 ; <<4 x float>*>:351 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 3 ; <<4 x float>*>:352 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 1 ; <<4 x float>*>:353 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 2 ; <<4 x float>*>:354 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 3 ; <<4 x float>*>:355 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 1 ; <<4 x float>*>:356 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 2 ; <<4 x float>*>:357 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 3 ; <<4 x float>*>:358 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 1 ; <<4 x float>*>:359 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 2 ; <<4 x float>*>:360 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 3 ; <<4 x float>*>:361 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 1 ; <<4 x float>*>:362 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 2 ; <<4 x float>*>:363 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 3 ; <<4 x float>*>:364 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 1 ; <<4 x float>*>:365 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 2 ; <<4 x float>*>:366 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 3 ; <<4 x float>*>:367 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 1 ; <<4 x float>*>:368 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 2 ; <<4 x float>*>:369 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 3 ; <<4 x float>*>:370 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 1 ; <<4 x float>*>:371 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 2 ; <<4 x float>*>:372 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 3 ; <<4 x float>*>:373 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 1 ; <<4 x float>*>:374 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 2 ; <<4 x float>*>:375 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 3 ; <<4 x float>*>:376 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 1 ; <<4 x float>*>:377 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 2 ; <<4 x float>*>:378 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 3 ; <<4 x float>*>:379 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 1 ; <<4 x float>*>:380 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 2 ; <<4 x float>*>:381 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 3 ; <<4 x float>*>:382 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 1 ; <<4 x float>*>:383 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 2 ; <<4 x float>*>:384 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 3 ; <<4 x float>*>:385 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 1 ; <<4 x float>*>:386 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 2 ; <<4 x float>*>:387 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 3 ; <<4 x float>*>:388 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 1 ; <<4 x float>*>:389 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 2 ; <<4 x float>*>:390 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 3 ; <<4 x float>*>:391 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 1 ; <<4 x float>*>:392 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 2 ; <<4 x float>*>:393 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 3 ; <<4 x float>*>:394 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 1 ; <<4 x float>*>:395 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 2 ; <<4 x float>*>:396 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 3 ; <<4 x float>*>:397 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 1 ; <<4 x float>*>:398 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 2 ; <<4 x float>*>:399 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 3 ; <<4 x float>*>:400 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 1 ; <<4 x float>*>:401 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 2 ; <<4 x float>*>:402 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 3 ; <<4 x float>*>:403 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 1 ; <<4 x float>*>:404 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 2 ; <<4 x float>*>:405 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 3 ; <<4 x float>*>:406 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 1 ; <<4 x float>*>:407 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 2 ; <<4 x float>*>:408 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 3 ; <<4 x float>*>:409 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 1 ; <<4 x float>*>:410 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 2 ; <<4 x float>*>:411 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 3 ; <<4 x float>*>:412 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 1 ; <<4 x float>*>:413 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 2 ; <<4 x float>*>:414 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 3 ; <<4 x float>*>:415 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 1 ; <<4 x float>*>:416 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 2 ; <<4 x float>*>:417 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 3 ; <<4 x float>*>:418 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 1 ; <<4 x float>*>:419 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 2 ; <<4 x float>*>:420 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 3 ; <<4 x float>*>:421 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 1 ; <<4 x float>*>:422 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 2 ; <<4 x float>*>:423 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 3 ; <<4 x float>*>:424 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 1 ; <<4 x float>*>:425 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 2 ; <<4 x float>*>:426 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 3 ; <<4 x float>*>:427 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 1 ; <<4 x float>*>:428 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 2 ; <<4 x float>*>:429 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 3 ; <<4 x float>*>:430 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 1 ; <<4 x float>*>:431 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 2 ; <<4 x float>*>:432 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 3 ; <<4 x float>*>:433 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 1 ; <<4 x float>*>:434 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 2 ; <<4 x float>*>:435 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 3 ; <<4 x float>*>:436 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 1 ; <<4 x float>*>:437 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 2 ; <<4 x float>*>:438 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 3 ; <<4 x float>*>:439 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 1 ; <<4 x float>*>:440 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 2 ; <<4 x float>*>:441 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 3 ; <<4 x float>*>:442 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 1 ; <<4 x float>*>:443 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 2 ; <<4 x float>*>:444 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 3 ; <<4 x float>*>:445 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 1 ; <<4 x float>*>:446 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 2 ; <<4 x float>*>:447 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 3 ; <<4 x float>*>:448 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 1 ; <<4 x float>*>:449 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 2 ; <<4 x float>*>:450 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 3 ; <<4 x float>*>:451 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 1 ; <<4 x float>*>:452 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 2 ; <<4 x float>*>:453 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 3 ; <<4 x float>*>:454 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 1 ; <<4 x float>*>:455 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 2 ; <<4 x float>*>:456 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 3 ; <<4 x float>*>:457 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 1 ; <<4 x float>*>:458 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 2 ; <<4 x float>*>:459 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 3 ; <<4 x float>*>:460 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 1 ; <<4 x float>*>:461 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 2 ; <<4 x float>*>:462 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 3 ; <<4 x float>*>:463 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 1 ; <<4 x float>*>:464 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 2 ; <<4 x float>*>:465 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 3 ; <<4 x float>*>:466 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 1 ; <<4 x float>*>:467 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 2 ; <<4 x float>*>:468 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 3 ; <<4 x float>*>:469 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 1 ; <<4 x float>*>:470 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 2 ; <<4 x float>*>:471 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 3 ; <<4 x float>*>:472 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 1 ; <<4 x float>*>:473 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 2 ; <<4 x float>*>:474 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 3 ; <<4 x float>*>:475 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 1 ; <<4 x float>*>:476 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 2 ; <<4 x float>*>:477 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 3 ; <<4 x float>*>:478 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 1 ; <<4 x float>*>:479 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 2 ; <<4 x float>*>:480 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 3 ; <<4 x float>*>:481 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 1 ; <<4 x float>*>:482 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 2 ; <<4 x float>*>:483 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 3 ; <<4 x float>*>:484 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:485 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:486 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:487 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:488 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 2 ; <<4 x float>*>:489 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 3 ; <<4 x float>*>:490 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 1 ; <<4 x float>*>:491 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 2 ; <<4 x float>*>:492 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 3 ; <<4 x float>*>:493 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 1 ; <<4 x float>*>:494 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 2 ; <<4 x float>*>:495 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 3 ; <<4 x float>*>:496 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 1 ; <<4 x float>*>:497 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 2 ; <<4 x float>*>:498 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 3 ; <<4 x float>*>:499 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 1 ; <<4 x float>*>:500 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 2 ; <<4 x float>*>:501 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 3 ; <<4 x float>*>:502 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 1 ; <<4 x float>*>:503 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 2 ; <<4 x float>*>:504 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 3 ; <<4 x float>*>:505 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 1 ; <<4 x float>*>:506 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 2 ; <<4 x float>*>:507 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 3 ; <<4 x float>*>:508 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 1 ; <<4 x float>*>:509 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 2 ; <<4 x float>*>:510 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 3 ; <<4 x float>*>:511 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 1 ; <<4 x float>*>:512 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 2 ; <<4 x float>*>:513 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 3 ; <<4 x float>*>:514 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 1 ; <<4 x float>*>:515 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 2 ; <<4 x float>*>:516 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 3 ; <<4 x float>*>:517 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 1 ; <<4 x float>*>:518 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 2 ; <<4 x float>*>:519 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 3 ; <<4 x float>*>:520 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 1 ; <<4 x float>*>:521 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 2 ; <<4 x float>*>:522 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 3 ; <<4 x float>*>:523 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 1 ; <<4 x float>*>:524 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 2 ; <<4 x float>*>:525 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 3 ; <<4 x float>*>:526 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:527 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:528 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:529 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:530 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:531 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:532 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:533 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:534 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:535 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 1 ; <<4 x float>*>:536 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 2 ; <<4 x float>*>:537 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 3 ; <<4 x float>*>:538 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 1 ; <<4 x float>*>:539 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 2 ; <<4 x float>*>:540 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 3 ; <<4 x float>*>:541 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:542 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:543 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:544 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 1 ; <<4 x float>*>:545 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 2 ; <<4 x float>*>:546 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 3 ; <<4 x float>*>:547 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 1 ; <<4 x float>*>:548 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 2 ; <<4 x float>*>:549 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 3 ; <<4 x float>*>:550 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:551 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 1 ; <<4 x float>*>:552 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 2 ; <<4 x float>*>:553 [#uses=1]
- load <4 x float>* %553 ; <<4 x float>>:554 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 3 ; <<4 x float>*>:555 [#uses=0]
+ %.sub7896 = getelementptr [4 x <4 x i32>], [4 x <4 x i32>]* null, i32 0, i32 0 ; <<4 x i32>*> [#uses=24]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 175, i32 3 ; <<4 x float>*>:2 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 174, i32 2 ; <<4 x float>*>:3 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 174, i32 3 ; <<4 x float>*>:4 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 1 ; <<4 x float>*>:5 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 2 ; <<4 x float>*>:6 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 3 ; <<4 x float>*>:7 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 1 ; <<4 x float>*>:8 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 2 ; <<4 x float>*>:9 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 3 ; <<4 x float>*>:10 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 1 ; <<4 x float>*>:11 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 2 ; <<4 x float>*>:12 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 3 ; <<4 x float>*>:13 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 1 ; <<4 x float>*>:14 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 2 ; <<4 x float>*>:15 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 3 ; <<4 x float>*>:16 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 1 ; <<4 x float>*>:17 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 2 ; <<4 x float>*>:18 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 3 ; <<4 x float>*>:19 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 1 ; <<4 x float>*>:20 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 2 ; <<4 x float>*>:21 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 3 ; <<4 x float>*>:22 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 1 ; <<4 x float>*>:23 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 2 ; <<4 x float>*>:24 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 3 ; <<4 x float>*>:25 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 1 ; <<4 x float>*>:26 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 2 ; <<4 x float>*>:27 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 3 ; <<4 x float>*>:28 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 1 ; <<4 x float>*>:29 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 2 ; <<4 x float>*>:30 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 3 ; <<4 x float>*>:31 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 1 ; <<4 x float>*>:32 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 2 ; <<4 x float>*>:33 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 3 ; <<4 x float>*>:34 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 1 ; <<4 x float>*>:35 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 2 ; <<4 x float>*>:36 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 3 ; <<4 x float>*>:37 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 1 ; <<4 x float>*>:38 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 2 ; <<4 x float>*>:39 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 3 ; <<4 x float>*>:40 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 1 ; <<4 x float>*>:41 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 2 ; <<4 x float>*>:42 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 3 ; <<4 x float>*>:43 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 1 ; <<4 x float>*>:44 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 2 ; <<4 x float>*>:45 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 3 ; <<4 x float>*>:46 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 1 ; <<4 x float>*>:47 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 2 ; <<4 x float>*>:48 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 3 ; <<4 x float>*>:49 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 1 ; <<4 x float>*>:50 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 2 ; <<4 x float>*>:51 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 3 ; <<4 x float>*>:52 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 1 ; <<4 x float>*>:53 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 2 ; <<4 x float>*>:54 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 3 ; <<4 x float>*>:55 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 1 ; <<4 x float>*>:56 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 2 ; <<4 x float>*>:57 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 3 ; <<4 x float>*>:58 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 1 ; <<4 x float>*>:59 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 2 ; <<4 x float>*>:60 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 3 ; <<4 x float>*>:61 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 1 ; <<4 x float>*>:62 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 2 ; <<4 x float>*>:63 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 3 ; <<4 x float>*>:64 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 1 ; <<4 x float>*>:65 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 2 ; <<4 x float>*>:66 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 3 ; <<4 x float>*>:67 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 1 ; <<4 x float>*>:68 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 2 ; <<4 x float>*>:69 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 3 ; <<4 x float>*>:70 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 1 ; <<4 x float>*>:71 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 2 ; <<4 x float>*>:72 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 3 ; <<4 x float>*>:73 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 1 ; <<4 x float>*>:74 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 2 ; <<4 x float>*>:75 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 3 ; <<4 x float>*>:76 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 1 ; <<4 x float>*>:77 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 2 ; <<4 x float>*>:78 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 3 ; <<4 x float>*>:79 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 1 ; <<4 x float>*>:80 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 2 ; <<4 x float>*>:81 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 3 ; <<4 x float>*>:82 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 1 ; <<4 x float>*>:83 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 2 ; <<4 x float>*>:84 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 3 ; <<4 x float>*>:85 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 1 ; <<4 x float>*>:86 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 2 ; <<4 x float>*>:87 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 3 ; <<4 x float>*>:88 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 1 ; <<4 x float>*>:89 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 2 ; <<4 x float>*>:90 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 3 ; <<4 x float>*>:91 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 1 ; <<4 x float>*>:92 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 2 ; <<4 x float>*>:93 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 3 ; <<4 x float>*>:94 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 1 ; <<4 x float>*>:95 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 2 ; <<4 x float>*>:96 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 3 ; <<4 x float>*>:97 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 1 ; <<4 x float>*>:98 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 2 ; <<4 x float>*>:99 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 3 ; <<4 x float>*>:100 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 1 ; <<4 x float>*>:101 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 2 ; <<4 x float>*>:102 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 3 ; <<4 x float>*>:103 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 1 ; <<4 x float>*>:104 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 2 ; <<4 x float>*>:105 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 3 ; <<4 x float>*>:106 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 1 ; <<4 x float>*>:107 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 2 ; <<4 x float>*>:108 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 3 ; <<4 x float>*>:109 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 1 ; <<4 x float>*>:110 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 2 ; <<4 x float>*>:111 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 3 ; <<4 x float>*>:112 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 1 ; <<4 x float>*>:113 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 2 ; <<4 x float>*>:114 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 3 ; <<4 x float>*>:115 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 1 ; <<4 x float>*>:116 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 2 ; <<4 x float>*>:117 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 3 ; <<4 x float>*>:118 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 1 ; <<4 x float>*>:119 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 2 ; <<4 x float>*>:120 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 3 ; <<4 x float>*>:121 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 1 ; <<4 x float>*>:122 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 2 ; <<4 x float>*>:123 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 3 ; <<4 x float>*>:124 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 1 ; <<4 x float>*>:125 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 2 ; <<4 x float>*>:126 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 3 ; <<4 x float>*>:127 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 1 ; <<4 x float>*>:128 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 2 ; <<4 x float>*>:129 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 3 ; <<4 x float>*>:130 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 1 ; <<4 x float>*>:131 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 2 ; <<4 x float>*>:132 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 3 ; <<4 x float>*>:133 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 1 ; <<4 x float>*>:134 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 2 ; <<4 x float>*>:135 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 3 ; <<4 x float>*>:136 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 1 ; <<4 x float>*>:137 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 2 ; <<4 x float>*>:138 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 3 ; <<4 x float>*>:139 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 1 ; <<4 x float>*>:140 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 2 ; <<4 x float>*>:141 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 3 ; <<4 x float>*>:142 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 1 ; <<4 x float>*>:143 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 2 ; <<4 x float>*>:144 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 3 ; <<4 x float>*>:145 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 1 ; <<4 x float>*>:146 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 2 ; <<4 x float>*>:147 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 3 ; <<4 x float>*>:148 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 1 ; <<4 x float>*>:149 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 2 ; <<4 x float>*>:150 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 3 ; <<4 x float>*>:151 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 1 ; <<4 x float>*>:152 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 2 ; <<4 x float>*>:153 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 3 ; <<4 x float>*>:154 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 1 ; <<4 x float>*>:155 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 2 ; <<4 x float>*>:156 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 3 ; <<4 x float>*>:157 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 1 ; <<4 x float>*>:158 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 2 ; <<4 x float>*>:159 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 3 ; <<4 x float>*>:160 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 1 ; <<4 x float>*>:161 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 2 ; <<4 x float>*>:162 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 3 ; <<4 x float>*>:163 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 1 ; <<4 x float>*>:164 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 2 ; <<4 x float>*>:165 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 3 ; <<4 x float>*>:166 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 1 ; <<4 x float>*>:167 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 2 ; <<4 x float>*>:168 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 3 ; <<4 x float>*>:169 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 1 ; <<4 x float>*>:170 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 2 ; <<4 x float>*>:171 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 3 ; <<4 x float>*>:172 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 1 ; <<4 x float>*>:173 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 2 ; <<4 x float>*>:174 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 3 ; <<4 x float>*>:175 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 1 ; <<4 x float>*>:176 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 2 ; <<4 x float>*>:177 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 3 ; <<4 x float>*>:178 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 1 ; <<4 x float>*>:179 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 2 ; <<4 x float>*>:180 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 3 ; <<4 x float>*>:181 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 1 ; <<4 x float>*>:182 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 2 ; <<4 x float>*>:183 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 3 ; <<4 x float>*>:184 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 1 ; <<4 x float>*>:185 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 2 ; <<4 x float>*>:186 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 3 ; <<4 x float>*>:187 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 1 ; <<4 x float>*>:188 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 2 ; <<4 x float>*>:189 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 3 ; <<4 x float>*>:190 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 1 ; <<4 x float>*>:191 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 2 ; <<4 x float>*>:192 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 3 ; <<4 x float>*>:193 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 1 ; <<4 x float>*>:194 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 2 ; <<4 x float>*>:195 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 3 ; <<4 x float>*>:196 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 1 ; <<4 x float>*>:197 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 2 ; <<4 x float>*>:198 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 3 ; <<4 x float>*>:199 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 1 ; <<4 x float>*>:200 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 2 ; <<4 x float>*>:201 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 3 ; <<4 x float>*>:202 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 1 ; <<4 x float>*>:203 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 2 ; <<4 x float>*>:204 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 3 ; <<4 x float>*>:205 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 1 ; <<4 x float>*>:206 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 2 ; <<4 x float>*>:207 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 3 ; <<4 x float>*>:208 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 1 ; <<4 x float>*>:209 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 2 ; <<4 x float>*>:210 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 3 ; <<4 x float>*>:211 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 1 ; <<4 x float>*>:212 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 2 ; <<4 x float>*>:213 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 3 ; <<4 x float>*>:214 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 1 ; <<4 x float>*>:215 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 2 ; <<4 x float>*>:216 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 3 ; <<4 x float>*>:217 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 1 ; <<4 x float>*>:218 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 2 ; <<4 x float>*>:219 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 3 ; <<4 x float>*>:220 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 1 ; <<4 x float>*>:221 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 2 ; <<4 x float>*>:222 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 3 ; <<4 x float>*>:223 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 1 ; <<4 x float>*>:224 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 2 ; <<4 x float>*>:225 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 3 ; <<4 x float>*>:226 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 1 ; <<4 x float>*>:227 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 2 ; <<4 x float>*>:228 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 3 ; <<4 x float>*>:229 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 1 ; <<4 x float>*>:230 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 2 ; <<4 x float>*>:231 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 3 ; <<4 x float>*>:232 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 1 ; <<4 x float>*>:233 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 2 ; <<4 x float>*>:234 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 3 ; <<4 x float>*>:235 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 1 ; <<4 x float>*>:236 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 2 ; <<4 x float>*>:237 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 3 ; <<4 x float>*>:238 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 1 ; <<4 x float>*>:239 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 2 ; <<4 x float>*>:240 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 3 ; <<4 x float>*>:241 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 1 ; <<4 x float>*>:242 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 2 ; <<4 x float>*>:243 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 3 ; <<4 x float>*>:244 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 1 ; <<4 x float>*>:245 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 2 ; <<4 x float>*>:246 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 3 ; <<4 x float>*>:247 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 1 ; <<4 x float>*>:248 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 2 ; <<4 x float>*>:249 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 3 ; <<4 x float>*>:250 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 1 ; <<4 x float>*>:251 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 2 ; <<4 x float>*>:252 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 3 ; <<4 x float>*>:253 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 1 ; <<4 x float>*>:254 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 2 ; <<4 x float>*>:255 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 3 ; <<4 x float>*>:256 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 1 ; <<4 x float>*>:257 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 2 ; <<4 x float>*>:258 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 3 ; <<4 x float>*>:259 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 1 ; <<4 x float>*>:260 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 2 ; <<4 x float>*>:261 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 3 ; <<4 x float>*>:262 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 1 ; <<4 x float>*>:263 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 2 ; <<4 x float>*>:264 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 3 ; <<4 x float>*>:265 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 1 ; <<4 x float>*>:266 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 2 ; <<4 x float>*>:267 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 3 ; <<4 x float>*>:268 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 1 ; <<4 x float>*>:269 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 2 ; <<4 x float>*>:270 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 3 ; <<4 x float>*>:271 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 1 ; <<4 x float>*>:272 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 2 ; <<4 x float>*>:273 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 3 ; <<4 x float>*>:274 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 1 ; <<4 x float>*>:275 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 2 ; <<4 x float>*>:276 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 3 ; <<4 x float>*>:277 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 1 ; <<4 x float>*>:278 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 2 ; <<4 x float>*>:279 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 3 ; <<4 x float>*>:280 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 1 ; <<4 x float>*>:281 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 2 ; <<4 x float>*>:282 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 3 ; <<4 x float>*>:283 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 1 ; <<4 x float>*>:284 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 2 ; <<4 x float>*>:285 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 3 ; <<4 x float>*>:286 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 1 ; <<4 x float>*>:287 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 2 ; <<4 x float>*>:288 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 3 ; <<4 x float>*>:289 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 1 ; <<4 x float>*>:290 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 2 ; <<4 x float>*>:291 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 3 ; <<4 x float>*>:292 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 1 ; <<4 x float>*>:293 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 2 ; <<4 x float>*>:294 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 3 ; <<4 x float>*>:295 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 1 ; <<4 x float>*>:296 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 2 ; <<4 x float>*>:297 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 3 ; <<4 x float>*>:298 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 1 ; <<4 x float>*>:299 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 2 ; <<4 x float>*>:300 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 3 ; <<4 x float>*>:301 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 1 ; <<4 x float>*>:302 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 2 ; <<4 x float>*>:303 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 3 ; <<4 x float>*>:304 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 1 ; <<4 x float>*>:305 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 2 ; <<4 x float>*>:306 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 3 ; <<4 x float>*>:307 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 1 ; <<4 x float>*>:308 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 2 ; <<4 x float>*>:309 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 3 ; <<4 x float>*>:310 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 1 ; <<4 x float>*>:311 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 2 ; <<4 x float>*>:312 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 3 ; <<4 x float>*>:313 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 1 ; <<4 x float>*>:314 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 2 ; <<4 x float>*>:315 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 3 ; <<4 x float>*>:316 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 1 ; <<4 x float>*>:317 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 2 ; <<4 x float>*>:318 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 3 ; <<4 x float>*>:319 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 1 ; <<4 x float>*>:320 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 2 ; <<4 x float>*>:321 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 3 ; <<4 x float>*>:322 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 1 ; <<4 x float>*>:323 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 2 ; <<4 x float>*>:324 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 3 ; <<4 x float>*>:325 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 1 ; <<4 x float>*>:326 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 2 ; <<4 x float>*>:327 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 3 ; <<4 x float>*>:328 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 1 ; <<4 x float>*>:329 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 2 ; <<4 x float>*>:330 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 3 ; <<4 x float>*>:331 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 1 ; <<4 x float>*>:332 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 2 ; <<4 x float>*>:333 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 3 ; <<4 x float>*>:334 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 1 ; <<4 x float>*>:335 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 2 ; <<4 x float>*>:336 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 3 ; <<4 x float>*>:337 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 1 ; <<4 x float>*>:338 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 2 ; <<4 x float>*>:339 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 3 ; <<4 x float>*>:340 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 1 ; <<4 x float>*>:341 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 2 ; <<4 x float>*>:342 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 3 ; <<4 x float>*>:343 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 1 ; <<4 x float>*>:344 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 2 ; <<4 x float>*>:345 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 3 ; <<4 x float>*>:346 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 1 ; <<4 x float>*>:347 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 2 ; <<4 x float>*>:348 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 3 ; <<4 x float>*>:349 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 1 ; <<4 x float>*>:350 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 2 ; <<4 x float>*>:351 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 3 ; <<4 x float>*>:352 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 1 ; <<4 x float>*>:353 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 2 ; <<4 x float>*>:354 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 3 ; <<4 x float>*>:355 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 1 ; <<4 x float>*>:356 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 2 ; <<4 x float>*>:357 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 3 ; <<4 x float>*>:358 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 1 ; <<4 x float>*>:359 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 2 ; <<4 x float>*>:360 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 3 ; <<4 x float>*>:361 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 1 ; <<4 x float>*>:362 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 2 ; <<4 x float>*>:363 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 3 ; <<4 x float>*>:364 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 1 ; <<4 x float>*>:365 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 2 ; <<4 x float>*>:366 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 3 ; <<4 x float>*>:367 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 1 ; <<4 x float>*>:368 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 2 ; <<4 x float>*>:369 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 3 ; <<4 x float>*>:370 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 1 ; <<4 x float>*>:371 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 2 ; <<4 x float>*>:372 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 3 ; <<4 x float>*>:373 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 1 ; <<4 x float>*>:374 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 2 ; <<4 x float>*>:375 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 3 ; <<4 x float>*>:376 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 1 ; <<4 x float>*>:377 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 2 ; <<4 x float>*>:378 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 3 ; <<4 x float>*>:379 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 1 ; <<4 x float>*>:380 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 2 ; <<4 x float>*>:381 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 3 ; <<4 x float>*>:382 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 1 ; <<4 x float>*>:383 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 2 ; <<4 x float>*>:384 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 3 ; <<4 x float>*>:385 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 1 ; <<4 x float>*>:386 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 2 ; <<4 x float>*>:387 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 3 ; <<4 x float>*>:388 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 1 ; <<4 x float>*>:389 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 2 ; <<4 x float>*>:390 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 3 ; <<4 x float>*>:391 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 1 ; <<4 x float>*>:392 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 2 ; <<4 x float>*>:393 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 3 ; <<4 x float>*>:394 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 1 ; <<4 x float>*>:395 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 2 ; <<4 x float>*>:396 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 3 ; <<4 x float>*>:397 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 1 ; <<4 x float>*>:398 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 2 ; <<4 x float>*>:399 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 3 ; <<4 x float>*>:400 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 1 ; <<4 x float>*>:401 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 2 ; <<4 x float>*>:402 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 3 ; <<4 x float>*>:403 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 1 ; <<4 x float>*>:404 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 2 ; <<4 x float>*>:405 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 3 ; <<4 x float>*>:406 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 1 ; <<4 x float>*>:407 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 2 ; <<4 x float>*>:408 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 3 ; <<4 x float>*>:409 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 1 ; <<4 x float>*>:410 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 2 ; <<4 x float>*>:411 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 3 ; <<4 x float>*>:412 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 1 ; <<4 x float>*>:413 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 2 ; <<4 x float>*>:414 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 3 ; <<4 x float>*>:415 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 1 ; <<4 x float>*>:416 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 2 ; <<4 x float>*>:417 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 3 ; <<4 x float>*>:418 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 1 ; <<4 x float>*>:419 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 2 ; <<4 x float>*>:420 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 3 ; <<4 x float>*>:421 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 1 ; <<4 x float>*>:422 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 2 ; <<4 x float>*>:423 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 3 ; <<4 x float>*>:424 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 1 ; <<4 x float>*>:425 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 2 ; <<4 x float>*>:426 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 3 ; <<4 x float>*>:427 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 1 ; <<4 x float>*>:428 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 2 ; <<4 x float>*>:429 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 3 ; <<4 x float>*>:430 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 1 ; <<4 x float>*>:431 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 2 ; <<4 x float>*>:432 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 3 ; <<4 x float>*>:433 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 1 ; <<4 x float>*>:434 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 2 ; <<4 x float>*>:435 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 3 ; <<4 x float>*>:436 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 1 ; <<4 x float>*>:437 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 2 ; <<4 x float>*>:438 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 3 ; <<4 x float>*>:439 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 1 ; <<4 x float>*>:440 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 2 ; <<4 x float>*>:441 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 3 ; <<4 x float>*>:442 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 1 ; <<4 x float>*>:443 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 2 ; <<4 x float>*>:444 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 3 ; <<4 x float>*>:445 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 1 ; <<4 x float>*>:446 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 2 ; <<4 x float>*>:447 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 3 ; <<4 x float>*>:448 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 1 ; <<4 x float>*>:449 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 2 ; <<4 x float>*>:450 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 3 ; <<4 x float>*>:451 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 1 ; <<4 x float>*>:452 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 2 ; <<4 x float>*>:453 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 3 ; <<4 x float>*>:454 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 1 ; <<4 x float>*>:455 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 2 ; <<4 x float>*>:456 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 3 ; <<4 x float>*>:457 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 1 ; <<4 x float>*>:458 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 2 ; <<4 x float>*>:459 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 3 ; <<4 x float>*>:460 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 1 ; <<4 x float>*>:461 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 2 ; <<4 x float>*>:462 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 3 ; <<4 x float>*>:463 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 1 ; <<4 x float>*>:464 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 2 ; <<4 x float>*>:465 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 3 ; <<4 x float>*>:466 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 1 ; <<4 x float>*>:467 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 2 ; <<4 x float>*>:468 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 3 ; <<4 x float>*>:469 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 1 ; <<4 x float>*>:470 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 2 ; <<4 x float>*>:471 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 3 ; <<4 x float>*>:472 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 1 ; <<4 x float>*>:473 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 2 ; <<4 x float>*>:474 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 3 ; <<4 x float>*>:475 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 1 ; <<4 x float>*>:476 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 2 ; <<4 x float>*>:477 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 3 ; <<4 x float>*>:478 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 1 ; <<4 x float>*>:479 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 2 ; <<4 x float>*>:480 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 3 ; <<4 x float>*>:481 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 1 ; <<4 x float>*>:482 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 2 ; <<4 x float>*>:483 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 3 ; <<4 x float>*>:484 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:485 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:486 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:487 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:488 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 2 ; <<4 x float>*>:489 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 3 ; <<4 x float>*>:490 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 1 ; <<4 x float>*>:491 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 2 ; <<4 x float>*>:492 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 3 ; <<4 x float>*>:493 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 1 ; <<4 x float>*>:494 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 2 ; <<4 x float>*>:495 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 3 ; <<4 x float>*>:496 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 1 ; <<4 x float>*>:497 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 2 ; <<4 x float>*>:498 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 3 ; <<4 x float>*>:499 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 1 ; <<4 x float>*>:500 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 2 ; <<4 x float>*>:501 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 3 ; <<4 x float>*>:502 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 1 ; <<4 x float>*>:503 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 2 ; <<4 x float>*>:504 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 3 ; <<4 x float>*>:505 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 1 ; <<4 x float>*>:506 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 2 ; <<4 x float>*>:507 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 3 ; <<4 x float>*>:508 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 1 ; <<4 x float>*>:509 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 2 ; <<4 x float>*>:510 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 3 ; <<4 x float>*>:511 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 1 ; <<4 x float>*>:512 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 2 ; <<4 x float>*>:513 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 3 ; <<4 x float>*>:514 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 1 ; <<4 x float>*>:515 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 2 ; <<4 x float>*>:516 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 3 ; <<4 x float>*>:517 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 1 ; <<4 x float>*>:518 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 2 ; <<4 x float>*>:519 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 3 ; <<4 x float>*>:520 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 1 ; <<4 x float>*>:521 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 2 ; <<4 x float>*>:522 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 3 ; <<4 x float>*>:523 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 1 ; <<4 x float>*>:524 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 2 ; <<4 x float>*>:525 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 3 ; <<4 x float>*>:526 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:527 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:528 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:529 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:530 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:531 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:532 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:533 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:534 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:535 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 1 ; <<4 x float>*>:536 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 2 ; <<4 x float>*>:537 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 3 ; <<4 x float>*>:538 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 1 ; <<4 x float>*>:539 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 2 ; <<4 x float>*>:540 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 3 ; <<4 x float>*>:541 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:542 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:543 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:544 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 1 ; <<4 x float>*>:545 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 2 ; <<4 x float>*>:546 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 3 ; <<4 x float>*>:547 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 1 ; <<4 x float>*>:548 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 2 ; <<4 x float>*>:549 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 3 ; <<4 x float>*>:550 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:551 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 1 ; <<4 x float>*>:552 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 2 ; <<4 x float>*>:553 [#uses=1]
+ load <4 x float>, <4 x float>* %553 ; <<4 x float>>:554 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 3 ; <<4 x float>*>:555 [#uses=0]
shufflevector <4 x float> %554, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:556 [#uses=1]
call <4 x i32> @llvm.ppc.altivec.vcmpgtfp( <4 x float> zeroinitializer, <4 x float> %556 ) ; <<4 x i32>>:557 [#uses=0]
bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>>:558 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:559 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 2 ; <<4 x float>*>:560 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:559 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 2 ; <<4 x float>*>:560 [#uses=1]
store <4 x float> zeroinitializer, <4 x float>* %560
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:561 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:562 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 2 ; <<4 x float>*>:563 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:564 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:561 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:562 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 2 ; <<4 x float>*>:563 [#uses=0]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:564 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:565 [#uses=1]
store <4 x float> %565, <4 x float>* null
icmp eq i32 0, 0 ; <i1>:566 [#uses=1]
br i1 %566, label %.critedge, label %xPIF.exit
.critedge: ; preds = %xOperationInitMasks.exit
- getelementptr [4 x <4 x i32>]* null, i32 0, i32 3 ; <<4 x i32>*>:567 [#uses=0]
+ getelementptr [4 x <4 x i32>], [4 x <4 x i32>]* null, i32 0, i32 3 ; <<4 x i32>*>:567 [#uses=0]
and <4 x i32> zeroinitializer, zeroinitializer ; <<4 x i32>>:568 [#uses=0]
or <4 x i32> zeroinitializer, zeroinitializer ; <<4 x i32>>:569 [#uses=0]
icmp eq i32 0, 0 ; <i1>:570 [#uses=1]
@@ -583,24 +583,24 @@ xOperationInitMasks.exit:
br label %xPIF.exit
xPIF.exit: ; preds = %.critedge7898, %xOperationInitMasks.exit
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 1 ; <<4 x float>*>:571 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:572 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 1 ; <<4 x float>*>:571 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:572 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:573 [#uses=0]
icmp eq i32 0, 0 ; <i1>:574 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:575 [#uses=0]
- load <4 x float>* %0 ; <<4 x float>>:576 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:575 [#uses=0]
+ load <4 x float>, <4 x float>* %0 ; <<4 x float>>:576 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:577 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 0 ; <<4 x float>*>:578 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:579 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:580 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:581 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:582 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:583 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:584 [#uses=1]
- load <4 x float>* %584 ; <<4 x float>>:585 [#uses=1]
- load <4 x float>* null ; <<4 x float>>:586 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:587 [#uses=1]
- load <4 x float>* %587 ; <<4 x float>>:588 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 0 ; <<4 x float>*>:578 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:579 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:580 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:581 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:582 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:583 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:584 [#uses=1]
+ load <4 x float>, <4 x float>* %584 ; <<4 x float>>:585 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:586 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:587 [#uses=1]
+ load <4 x float>, <4 x float>* %587 ; <<4 x float>>:588 [#uses=1]
shufflevector <4 x float> %583, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:589 [#uses=1]
shufflevector <4 x float> %585, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:590 [#uses=1]
shufflevector <4 x float> %588, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:591 [#uses=1]
@@ -608,32 +608,32 @@ xPIF.exit: ; preds = %.critedge7898, %xOperationInitMasks.exit
fmul <4 x float> zeroinitializer, %590 ; <<4 x float>>:593 [#uses=0]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:594 [#uses=1]
fmul <4 x float> zeroinitializer, %591 ; <<4 x float>>:595 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:596 [#uses=2]
- load <4 x float>* %596 ; <<4 x float>>:597 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:596 [#uses=2]
+ load <4 x float>, <4 x float>* %596 ; <<4 x float>>:597 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %596
- load <4 x float>* null ; <<4 x float>>:598 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:599 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:598 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:599 [#uses=0]
shufflevector <4 x float> %594, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:600 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:601 [#uses=2]
- load <4 x float>* %601 ; <<4 x float>>:602 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:601 [#uses=2]
+ load <4 x float>, <4 x float>* %601 ; <<4 x float>>:602 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %601
- load <4 x float>* null ; <<4 x float>>:603 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:604 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:605 [#uses=1]
- load <4 x float>* %605 ; <<4 x float>>:606 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:603 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:604 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:605 [#uses=1]
+ load <4 x float>, <4 x float>* %605 ; <<4 x float>>:606 [#uses=1]
fsub <4 x float> zeroinitializer, %604 ; <<4 x float>>:607 [#uses=2]
fsub <4 x float> zeroinitializer, %606 ; <<4 x float>>:608 [#uses=2]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:609 [#uses=0]
br i1 false, label %617, label %610
; <label>:610 ; preds = %xPIF.exit
- load <4 x float>* null ; <<4 x float>>:611 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:612 [#uses=2]
- load <4 x float>* %612 ; <<4 x float>>:613 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:611 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:612 [#uses=2]
+ load <4 x float>, <4 x float>* %612 ; <<4 x float>>:613 [#uses=1]
shufflevector <4 x float> %607, <4 x float> %613, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:614 [#uses=1]
store <4 x float> %614, <4 x float>* %612
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:615 [#uses=2]
- load <4 x float>* %615 ; <<4 x float>>:616 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:615 [#uses=2]
+ load <4 x float>, <4 x float>* %615 ; <<4 x float>>:616 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %615
br label %xST.exit400
@@ -645,33 +645,33 @@ xPIF.exit: ; preds = %.critedge7898, %xOperationInitMasks.exit
br i1 %621, label %625, label %622
; <label>:622 ; preds = %617
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:623 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:623 [#uses=0]
shufflevector <4 x float> %607, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:624 [#uses=0]
br label %625
; <label>:625 ; preds = %622, %617
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:626 [#uses=0]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:626 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:627 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:628 [#uses=1]
- load <4 x float>* %628 ; <<4 x float>>:629 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:630 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:628 [#uses=1]
+ load <4 x float>, <4 x float>* %628 ; <<4 x float>>:629 [#uses=0]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:630 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:631 [#uses=1]
icmp eq i32 %631, 0 ; <i1>:632 [#uses=1]
br i1 %632, label %xST.exit400, label %633
; <label>:633 ; preds = %625
- load <4 x float>* null ; <<4 x float>>:634 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:634 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %634, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:635 [#uses=1]
store <4 x float> %635, <4 x float>* null
br label %xST.exit400
xST.exit400: ; preds = %633, %625, %610
%.17218 = phi <4 x float> [ zeroinitializer, %610 ], [ %608, %633 ], [ %608, %625 ] ; <<4 x float>> [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 0 ; <<4 x float>*>:636 [#uses=1]
- load <4 x float>* %636 ; <<4 x float>>:637 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:638 [#uses=2]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:639 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:640 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 0 ; <<4 x float>*>:636 [#uses=1]
+ load <4 x float>, <4 x float>* %636 ; <<4 x float>>:637 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:638 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:639 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:640 [#uses=2]
fmul <4 x float> %638, %638 ; <<4 x float>>:641 [#uses=1]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:642 [#uses=0]
fmul <4 x float> %640, %640 ; <<4 x float>>:643 [#uses=2]
@@ -691,12 +691,12 @@ xST.exit400: ; preds = %633, %625, %610
br i1 %656, label %665, label %657
; <label>:657 ; preds = %xST.exit400
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:658 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:658 [#uses=0]
shufflevector <4 x float> %653, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:659 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:660 [#uses=1]
- load <4 x float>* %660 ; <<4 x float>>:661 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:662 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:663 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:660 [#uses=1]
+ load <4 x float>, <4 x float>* %660 ; <<4 x float>>:661 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:662 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:663 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:664 [#uses=0]
br label %xST.exit402
@@ -705,7 +705,7 @@ xST.exit400: ; preds = %633, %625, %610
br i1 false, label %669, label %667
; <label>:667 ; preds = %665
- load <4 x float>* null ; <<4 x float>>:668 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:668 [#uses=0]
br label %669
; <label>:669 ; preds = %667, %665
@@ -713,12 +713,12 @@ xST.exit400: ; preds = %633, %625, %610
br label %xST.exit402
xST.exit402: ; preds = %669, %657
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:671 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:672 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:673 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:674 [#uses=1]
- load <4 x float>* %674 ; <<4 x float>>:675 [#uses=1]
- load <4 x float>* null ; <<4 x float>>:676 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:671 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:672 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:673 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:674 [#uses=1]
+ load <4 x float>, <4 x float>* %674 ; <<4 x float>>:675 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:676 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:677 [#uses=1]
shufflevector <4 x float> %675, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:678 [#uses=1]
fmul <4 x float> zeroinitializer, %677 ; <<4 x float>>:679 [#uses=0]
@@ -728,68 +728,68 @@ xST.exit402: ; preds = %669, %657
br i1 %682, label %689, label %683
; <label>:683 ; preds = %xST.exit402
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:684 [#uses=1]
- load <4 x float>* %684 ; <<4 x float>>:685 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:686 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:687 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:684 [#uses=1]
+ load <4 x float>, <4 x float>* %684 ; <<4 x float>>:685 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:686 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:687 [#uses=0]
shufflevector <4 x float> %681, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:688 [#uses=0]
br label %xST.exit405
; <label>:689 ; preds = %xST.exit402
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:690 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:691 [#uses=1]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:691 [#uses=1]
shufflevector <4 x i32> %691, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:692 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %692, <4 x i32> zeroinitializer ) ; <i32>:693 [#uses=1]
icmp eq i32 %693, 0 ; <i1>:694 [#uses=0]
br label %xST.exit405
xST.exit405: ; preds = %689, %683
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:695 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:695 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:696 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:697 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:698 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:699 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:698 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:699 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:700 [#uses=1]
fadd <4 x float> zeroinitializer, %700 ; <<4 x float>>:701 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:702 [#uses=1]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:702 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %702, <4 x i32> zeroinitializer ) ; <i32>:703 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:704 [#uses=2]
- load <4 x float>* %704 ; <<4 x float>>:705 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:704 [#uses=2]
+ load <4 x float>, <4 x float>* %704 ; <<4 x float>>:705 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %704
- load <4 x float>* null ; <<4 x float>>:706 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:706 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:707 [#uses=2]
- load <4 x float>* %707 ; <<4 x float>>:708 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:707 [#uses=2]
+ load <4 x float>, <4 x float>* %707 ; <<4 x float>>:708 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %707
- load <4 x float>* null ; <<4 x float>>:709 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:710 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:711 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:709 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:710 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:711 [#uses=1]
shufflevector <4 x float> %711, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>>:712 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:713 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:714 [#uses=1]
- load <4 x float>* %714 ; <<4 x float>>:715 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:713 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:714 [#uses=1]
+ load <4 x float>, <4 x float>* %714 ; <<4 x float>>:715 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:716 [#uses=0]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:717 [#uses=1]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:718 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 0 ; <<4 x float>*>:719 [#uses=1]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:718 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 0 ; <<4 x float>*>:719 [#uses=1]
store <4 x float> zeroinitializer, <4 x float>* %719
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:720 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:720 [#uses=1]
shufflevector <4 x float> %717, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:721 [#uses=1]
store <4 x float> %721, <4 x float>* %720
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:722 [#uses=1]
- load <4 x float>* %722 ; <<4 x float>>:723 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:722 [#uses=1]
+ load <4 x float>, <4 x float>* %722 ; <<4 x float>>:723 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %723, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:724 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:725 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:725 [#uses=1]
store <4 x float> zeroinitializer, <4 x float>* %725
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:726 [#uses=1]
- load <4 x float>* %726 ; <<4 x float>>:727 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:728 [#uses=1]
- load <4 x float>* %728 ; <<4 x float>>:729 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:730 [#uses=1]
- load <4 x float>* %730 ; <<4 x float>>:731 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:732 [#uses=1]
- load <4 x float>* %732 ; <<4 x float>>:733 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:734 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:726 [#uses=1]
+ load <4 x float>, <4 x float>* %726 ; <<4 x float>>:727 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:728 [#uses=1]
+ load <4 x float>, <4 x float>* %728 ; <<4 x float>>:729 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:730 [#uses=1]
+ load <4 x float>, <4 x float>* %730 ; <<4 x float>>:731 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:732 [#uses=1]
+ load <4 x float>, <4 x float>* %732 ; <<4 x float>>:733 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:734 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:735 [#uses=1]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:736 [#uses=1]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:737 [#uses=1]
@@ -797,28 +797,28 @@ xST.exit405: ; preds = %689, %683
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:739 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:740 [#uses=1]
icmp eq i32 %740, 0 ; <i1>:741 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:742 [#uses=2]
- load <4 x float>* %742 ; <<4 x float>>:743 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:742 [#uses=2]
+ load <4 x float>, <4 x float>* %742 ; <<4 x float>>:743 [#uses=1]
shufflevector <4 x float> %736, <4 x float> %743, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:744 [#uses=1]
store <4 x float> %744, <4 x float>* %742
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:745 [#uses=1]
- load <4 x float>* %745 ; <<4 x float>>:746 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:745 [#uses=1]
+ load <4 x float>, <4 x float>* %745 ; <<4 x float>>:746 [#uses=1]
shufflevector <4 x float> %737, <4 x float> %746, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:747 [#uses=0]
shufflevector <4 x float> %738, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:748 [#uses=1]
store <4 x float> %748, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:749 [#uses=1]
- load <4 x float>* %749 ; <<4 x float>>:750 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:749 [#uses=1]
+ load <4 x float>, <4 x float>* %749 ; <<4 x float>>:750 [#uses=1]
shufflevector <4 x float> %739, <4 x float> %750, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:751 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:752 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:753 [#uses=1]
- load <4 x float>* %753 ; <<4 x float>>:754 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:755 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:756 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:752 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:753 [#uses=1]
+ load <4 x float>, <4 x float>* %753 ; <<4 x float>>:754 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:755 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:756 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:757 [#uses=1]
shufflevector <4 x float> %756, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:758 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:759 [#uses=1]
- load <4 x float>* %759 ; <<4 x float>>:760 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:761 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:759 [#uses=1]
+ load <4 x float>, <4 x float>* %759 ; <<4 x float>>:760 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:761 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:762 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:763 [#uses=1]
fadd <4 x float> %757, zeroinitializer ; <<4 x float>>:764 [#uses=0]
@@ -827,12 +827,12 @@ xST.exit405: ; preds = %689, %683
br i1 false, label %773, label %767
; <label>:767 ; preds = %xST.exit405
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:768 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:769 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:768 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:769 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %769, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:770 [#uses=1]
store <4 x float> %770, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:771 [#uses=1]
- load <4 x float>* %771 ; <<4 x float>>:772 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:771 [#uses=1]
+ load <4 x float>, <4 x float>* %771 ; <<4 x float>>:772 [#uses=0]
br label %xST.exit422
; <label>:773 ; preds = %xST.exit405
@@ -840,30 +840,30 @@ xST.exit405: ; preds = %689, %683
xST.exit422: ; preds = %773, %767
%.07267 = phi <4 x float> [ %766, %767 ], [ undef, %773 ] ; <<4 x float>> [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:774 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:774 [#uses=0]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:775 [#uses=0]
icmp eq i32 0, 0 ; <i1>:776 [#uses=1]
br i1 %776, label %780, label %777
; <label>:777 ; preds = %xST.exit422
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:778 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:779 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:778 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:779 [#uses=0]
br label %xST.exit431
; <label>:780 ; preds = %xST.exit422
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:781 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:782 [#uses=2]
- load <4 x float>* %782 ; <<4 x float>>:783 [#uses=0]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:781 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:782 [#uses=2]
+ load <4 x float>, <4 x float>* %782 ; <<4 x float>>:783 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %782
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:784 [#uses=1]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:784 [#uses=1]
shufflevector <4 x i32> %784, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:785 [#uses=0]
icmp eq i32 0, 0 ; <i1>:786 [#uses=0]
br label %xST.exit431
xST.exit431: ; preds = %780, %777
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:787 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:788 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:789 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:787 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:788 [#uses=0]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:789 [#uses=2]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %789, <4 x i32> zeroinitializer ) ; <i32>:790 [#uses=1]
icmp eq i32 %790, 0 ; <i1>:791 [#uses=0]
shufflevector <4 x i32> %789, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:792 [#uses=1]
@@ -872,7 +872,7 @@ xST.exit431: ; preds = %780, %777
br i1 %794, label %797, label %795
; <label>:795 ; preds = %xST.exit431
- load <4 x float>* null ; <<4 x float>>:796 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:796 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* null
br label %797
@@ -882,38 +882,38 @@ xST.exit431: ; preds = %780, %777
br i1 false, label %xST.exit434, label %799
; <label>:799 ; preds = %797
- load <4 x float>* null ; <<4 x float>>:800 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:800 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* null
br label %xST.exit434
xST.exit434: ; preds = %799, %797
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:801 [#uses=1]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:801 [#uses=1]
shufflevector <4 x i32> %801, <4 x i32> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x i32>>:802 [#uses=0]
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:803 [#uses=0]
icmp eq i32 0, 0 ; <i1>:804 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 0 ; <<4 x float>*>:805 [#uses=1]
- load <4 x float>* %805 ; <<4 x float>>:806 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:807 [#uses=1]
- load <4 x float>* %807 ; <<4 x float>>:808 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:809 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:810 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:811 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:812 [#uses=1]
- load <4 x float>* %812 ; <<4 x float>>:813 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:814 [#uses=1]
- load <4 x float>* %814 ; <<4 x float>>:815 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 0 ; <<4 x float>*>:805 [#uses=1]
+ load <4 x float>, <4 x float>* %805 ; <<4 x float>>:806 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:807 [#uses=1]
+ load <4 x float>, <4 x float>* %807 ; <<4 x float>>:808 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:809 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:810 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:811 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:812 [#uses=1]
+ load <4 x float>, <4 x float>* %812 ; <<4 x float>>:813 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:814 [#uses=1]
+ load <4 x float>, <4 x float>* %814 ; <<4 x float>>:815 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:816 [#uses=0]
unreachable
xPBRK.exit: ; preds = %.critedge
store <4 x i32> < i32 -1, i32 -1, i32 -1, i32 -1 >, <4 x i32>* %.sub7896
store <4 x i32> zeroinitializer, <4 x i32>* null
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:817 [#uses=1]
- load <4 x float>* %817 ; <<4 x float>>:818 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:819 [#uses=1]
- load <4 x float>* %819 ; <<4 x float>>:820 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:821 [#uses=1]
- load <4 x float>* %821 ; <<4 x float>>:822 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:817 [#uses=1]
+ load <4 x float>, <4 x float>* %817 ; <<4 x float>>:818 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:819 [#uses=1]
+ load <4 x float>, <4 x float>* %819 ; <<4 x float>>:820 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:821 [#uses=1]
+ load <4 x float>, <4 x float>* %821 ; <<4 x float>>:822 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:823 [#uses=1]
shufflevector <4 x float> %818, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:824 [#uses=1]
shufflevector <4 x float> %820, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:825 [#uses=1]
@@ -921,10 +921,10 @@ xPBRK.exit: ; preds = %.critedge
shufflevector <4 x float> %823, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:827 [#uses=0]
shufflevector <4 x float> %824, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:828 [#uses=1]
store <4 x float> %828, <4 x float>* null
- load <4 x float>* null ; <<4 x float>>:829 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:829 [#uses=1]
shufflevector <4 x float> %825, <4 x float> %829, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:830 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:831 [#uses=2]
- load <4 x float>* %831 ; <<4 x float>>:832 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:831 [#uses=2]
+ load <4 x float>, <4 x float>* %831 ; <<4 x float>>:832 [#uses=1]
shufflevector <4 x float> %826, <4 x float> %832, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:833 [#uses=1]
store <4 x float> %833, <4 x float>* %831
br label %xLS.exit449
@@ -958,14 +958,14 @@ xLS.exit449: ; preds = %1215, %xPBRK.exit
%.17731 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07730, %1215 ] ; <<4 x float>> [#uses=2]
%.17735 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07734, %1215 ] ; <<4 x float>> [#uses=2]
%.17770 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07769, %1215 ] ; <<4 x float>> [#uses=2]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 0 ; <<4 x float>*>:834 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:835 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:836 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:837 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 0 ; <<4 x float>*>:834 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:835 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:836 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:837 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:838 [#uses=0]
shufflevector <4 x float> %835, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:839 [#uses=1]
- getelementptr <4 x float>* null, i32 878 ; <<4 x float>*>:840 [#uses=1]
- load <4 x float>* %840 ; <<4 x float>>:841 [#uses=0]
+ getelementptr <4 x float>, <4 x float>* null, i32 878 ; <<4 x float>*>:840 [#uses=1]
+ load <4 x float>, <4 x float>* %840 ; <<4 x float>>:841 [#uses=0]
call <4 x float> @llvm.ppc.altivec.vcfsx( <4 x i32> zeroinitializer, i32 0 ) ; <<4 x float>>:842 [#uses=1]
shufflevector <4 x float> %842, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:843 [#uses=2]
call <4 x i32> @llvm.ppc.altivec.vcmpgtfp( <4 x float> %843, <4 x float> %839 ) ; <<4 x i32>>:844 [#uses=1]
@@ -977,7 +977,7 @@ xLS.exit449: ; preds = %1215, %xPBRK.exit
; <label>:849 ; preds = %xLS.exit449
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:850 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:851 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:851 [#uses=1]
store <4 x float> zeroinitializer, <4 x float>* %851
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:852 [#uses=1]
store <4 x float> %852, <4 x float>* null
@@ -989,8 +989,8 @@ xLS.exit449: ; preds = %1215, %xPBRK.exit
br i1 false, label %859, label %856
; <label>:856 ; preds = %854
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:857 [#uses=2]
- load <4 x float>* %857 ; <<4 x float>>:858 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:857 [#uses=2]
+ load <4 x float>, <4 x float>* %857 ; <<4 x float>>:858 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %857
br label %859
@@ -999,13 +999,13 @@ xLS.exit449: ; preds = %1215, %xPBRK.exit
br i1 false, label %864, label %861
; <label>:861 ; preds = %859
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:862 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:862 [#uses=1]
shufflevector <4 x float> %845, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:863 [#uses=1]
store <4 x float> %863, <4 x float>* %862
br label %864
; <label>:864 ; preds = %861, %859
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:865 [#uses=1]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:865 [#uses=1]
shufflevector <4 x i32> %865, <4 x i32> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x i32>>:866 [#uses=0]
br i1 false, label %868, label %867
@@ -1018,9 +1018,9 @@ xLS.exit449: ; preds = %1215, %xPBRK.exit
br label %xST.exit451
xST.exit451: ; preds = %868, %849
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:870 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:871 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:872 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:870 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:871 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:872 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:873 [#uses=1]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:874 [#uses=1]
xor <4 x i32> %874, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>>:875 [#uses=0]
@@ -1029,16 +1029,16 @@ xST.exit451: ; preds = %868, %849
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:878 [#uses=1]
xor <4 x i32> %878, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>>:879 [#uses=1]
bitcast <4 x i32> %879 to <4 x float> ; <<4 x float>>:880 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:881 [#uses=1]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:881 [#uses=1]
icmp eq i32 0, 0 ; <i1>:882 [#uses=1]
br i1 %882, label %888, label %883
; <label>:883 ; preds = %xST.exit451
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:884 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:884 [#uses=1]
store <4 x float> zeroinitializer, <4 x float>* %884
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:885 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:885 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:886 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:887 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:887 [#uses=0]
br label %xST.exit453
; <label>:888 ; preds = %xST.exit451
@@ -1047,7 +1047,7 @@ xST.exit451: ; preds = %868, %849
br i1 false, label %894, label %891
; <label>:891 ; preds = %888
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:892 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:892 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:893 [#uses=1]
store <4 x float> %893, <4 x float>* %892
br label %894
@@ -1061,34 +1061,34 @@ xST.exit451: ; preds = %868, %849
br label %898
; <label>:898 ; preds = %897, %894
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:899 [#uses=0]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:899 [#uses=0]
br i1 false, label %xST.exit453, label %900
; <label>:900 ; preds = %898
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:901 [#uses=1]
- load <4 x float>* %901 ; <<4 x float>>:902 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:901 [#uses=1]
+ load <4 x float>, <4 x float>* %901 ; <<4 x float>>:902 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %902, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:903 [#uses=0]
br label %xST.exit453
xST.exit453: ; preds = %900, %898, %883
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:904 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:905 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:906 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:904 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:905 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:906 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:907 [#uses=1]
shufflevector <4 x float> %905, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:908 [#uses=1]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:909 [#uses=0]
bitcast <4 x float> %908 to <4 x i32> ; <<4 x i32>>:910 [#uses=0]
bitcast <4 x float> %907 to <4 x i32> ; <<4 x i32>>:911 [#uses=0]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:912 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:913 [#uses=0]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:913 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 2, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:914 [#uses=0]
br i1 false, label %915, label %xPIF.exit455
; <label>:915 ; preds = %xST.exit453
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:916 [#uses=0]
- getelementptr [4 x <4 x i32>]* null, i32 0, i32 3 ; <<4 x i32>*>:917 [#uses=1]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:916 [#uses=0]
+ getelementptr [4 x <4 x i32>], [4 x <4 x i32>]* null, i32 0, i32 3 ; <<4 x i32>*>:917 [#uses=1]
store <4 x i32> zeroinitializer, <4 x i32>* %917
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:918 [#uses=1]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:918 [#uses=1]
and <4 x i32> %918, zeroinitializer ; <<4 x i32>>:919 [#uses=0]
br label %.critedge7899
@@ -1101,16 +1101,16 @@ xPBRK.exit456: ; preds = %.critedge7899
unreachable
xPIF.exit455: ; preds = %xST.exit453
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 0 ; <<4 x float>*>:922 [#uses=1]
- load <4 x float>* %922 ; <<4 x float>>:923 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:924 [#uses=1]
- load <4 x float>* %924 ; <<4 x float>>:925 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:926 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:927 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 0 ; <<4 x float>*>:922 [#uses=1]
+ load <4 x float>, <4 x float>* %922 ; <<4 x float>>:923 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:924 [#uses=1]
+ load <4 x float>, <4 x float>* %924 ; <<4 x float>>:925 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:926 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:927 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:928 [#uses=0]
bitcast { { i16, i16, i32 } }* %1 to <4 x float>* ; <<4 x float>*>:929 [#uses=0]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:930 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:931 [#uses=0]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:931 [#uses=0]
icmp eq i32 0, 0 ; <i1>:932 [#uses=1]
br i1 %932, label %934, label %933
@@ -1129,13 +1129,13 @@ xPIF.exit455: ; preds = %xST.exit453
xST.exit459: ; preds = %937, %934
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x i32>>:938 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %938, <4 x i32> zeroinitializer ) ; <i32>:939 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 2 ; <<4 x float>*>:940 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 2 ; <<4 x float>*>:940 [#uses=1]
store <4 x float> zeroinitializer, <4 x float>* %940
- load <4 x float>* null ; <<4 x float>>:941 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:941 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %941, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:942 [#uses=1]
store <4 x float> %942, <4 x float>* null
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:943 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:944 [#uses=0]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:944 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:945 [#uses=0]
br i1 false, label %947, label %946
@@ -1156,7 +1156,7 @@ xST.exit459: ; preds = %937, %934
br i1 false, label %955, label %953
; <label>:953 ; preds = %952
- getelementptr [4 x <4 x i32>]* null, i32 0, i32 2 ; <<4 x i32>*>:954 [#uses=0]
+ getelementptr [4 x <4 x i32>], [4 x <4 x i32>]* null, i32 0, i32 2 ; <<4 x i32>*>:954 [#uses=0]
br label %955
; <label>:955 ; preds = %953, %952
@@ -1170,16 +1170,16 @@ xST.exit459: ; preds = %937, %934
br label %xStoreDestAddressWithMask.exit461
xStoreDestAddressWithMask.exit461: ; preds = %958, %955
- load <4 x float>* %0 ; <<4 x float>>:960 [#uses=0]
+ load <4 x float>, <4 x float>* %0 ; <<4 x float>>:960 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:961 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 0 ; <<4 x float>*>:962 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 0 ; <<4 x float>*>:962 [#uses=0]
br i1 false, label %968, label %xST.exit463
xST.exit463: ; preds = %xStoreDestAddressWithMask.exit461
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:963 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 2 ; <<4 x float>*>:964 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 3 ; <<4 x float>*>:965 [#uses=0]
- load <4 x float>* %0 ; <<4 x float>>:966 [#uses=3]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:963 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 2 ; <<4 x float>*>:964 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 3 ; <<4 x float>*>:965 [#uses=0]
+ load <4 x float>, <4 x float>* %0 ; <<4 x float>>:966 [#uses=3]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:967 [#uses=0]
br i1 false, label %972, label %969
@@ -1187,8 +1187,8 @@ xST.exit463: ; preds = %xStoreDestAddressWithMask.exit461
unreachable
; <label>:969 ; preds = %xST.exit463
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:970 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:971 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:970 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:971 [#uses=1]
store <4 x float> %966, <4 x float>* %971
store <4 x float> %966, <4 x float>* null
br label %xST.exit465
@@ -1197,39 +1197,39 @@ xST.exit463: ; preds = %xStoreDestAddressWithMask.exit461
call <4 x i32> @llvm.ppc.altivec.vsel( <4 x i32> zeroinitializer, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <<4 x i32>>:973 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* null
store <4 x float> zeroinitializer, <4 x float>* null
- load <4 x float>* null ; <<4 x float>>:974 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:974 [#uses=0]
bitcast <4 x float> %966 to <4 x i32> ; <<4 x i32>>:975 [#uses=1]
call <4 x i32> @llvm.ppc.altivec.vsel( <4 x i32> zeroinitializer, <4 x i32> %975, <4 x i32> zeroinitializer ) ; <<4 x i32>>:976 [#uses=1]
bitcast <4 x i32> %976 to <4 x float> ; <<4 x float>>:977 [#uses=1]
store <4 x float> %977, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:978 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:978 [#uses=0]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:979 [#uses=1]
call <4 x i32> @llvm.ppc.altivec.vsel( <4 x i32> %979, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <<4 x i32>>:980 [#uses=1]
bitcast <4 x i32> %980 to <4 x float> ; <<4 x float>>:981 [#uses=0]
br label %xST.exit465
xST.exit465: ; preds = %972, %969
- load <4 x float>* %0 ; <<4 x float>>:982 [#uses=3]
+ load <4 x float>, <4 x float>* %0 ; <<4 x float>>:982 [#uses=3]
icmp eq i32 0, 0 ; <i1>:983 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:984 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:984 [#uses=1]
br i1 %983, label %989, label %985
; <label>:985 ; preds = %xST.exit465
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:986 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:987 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:986 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:987 [#uses=1]
store <4 x float> %982, <4 x float>* %987
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:988 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:988 [#uses=0]
br label %xST.exit467
; <label>:989 ; preds = %xST.exit465
bitcast <4 x float> %982 to <4 x i32> ; <<4 x i32>>:990 [#uses=0]
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:991 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %984
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:992 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:993 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:994 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:992 [#uses=0]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:993 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:994 [#uses=0]
bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>>:995 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:996 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:996 [#uses=0]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:997 [#uses=1]
bitcast <4 x float> %982 to <4 x i32> ; <<4 x i32>>:998 [#uses=1]
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:999 [#uses=1]
@@ -1238,17 +1238,17 @@ xST.exit465: ; preds = %972, %969
br label %xST.exit467
xST.exit467: ; preds = %989, %985
- load <4 x float>* %0 ; <<4 x float>>:1002 [#uses=5]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:1003 [#uses=2]
+ load <4 x float>, <4 x float>* %0 ; <<4 x float>>:1002 [#uses=5]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:1003 [#uses=2]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %1003, <4 x i32> zeroinitializer ) ; <i32>:1004 [#uses=0]
br i1 false, label %1011, label %1005
; <label>:1005 ; preds = %xST.exit467
- load <4 x float>* null ; <<4 x float>>:1006 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:1007 [#uses=1]
- load <4 x float>* %1007 ; <<4 x float>>:1008 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:1009 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1010 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:1006 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:1007 [#uses=1]
+ load <4 x float>, <4 x float>* %1007 ; <<4 x float>>:1008 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:1009 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1010 [#uses=0]
br label %xST.exit469
; <label>:1011 ; preds = %xST.exit467
@@ -1266,7 +1266,7 @@ xST.exit467: ; preds = %989, %985
br i1 %1017, label %1021, label %1018
; <label>:1018 ; preds = %1015
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:1019 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:1019 [#uses=0]
shufflevector <4 x float> %1002, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:1020 [#uses=0]
br label %1021
@@ -1276,7 +1276,7 @@ xST.exit467: ; preds = %989, %985
br i1 %1022, label %1025, label %1023
; <label>:1023 ; preds = %1021
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1024 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1024 [#uses=1]
store <4 x float> zeroinitializer, <4 x float>* %1024
br label %1025
@@ -1286,23 +1286,23 @@ xST.exit467: ; preds = %989, %985
br i1 %1026, label %xST.exit469, label %1027
; <label>:1027 ; preds = %1025
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1028 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1028 [#uses=0]
br label %xST.exit469
xST.exit469: ; preds = %1027, %1025, %1005
%.17463 = phi <4 x float> [ %.27464, %1005 ], [ %.07462, %1027 ], [ %.07462, %1025 ] ; <<4 x float>> [#uses=1]
%.17468 = phi <4 x float> [ %.27469, %1005 ], [ %.07467, %1027 ], [ %.07467, %1025 ] ; <<4 x float>> [#uses=1]
%.07489 = phi <4 x float> [ %1002, %1005 ], [ %.17490, %1027 ], [ %.17490, %1025 ] ; <<4 x float>> [#uses=1]
- load <4 x float>* null ; <<4 x float>>:1029 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:1030 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:1029 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:1030 [#uses=0]
fsub <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1031 [#uses=1]
br i1 false, label %1037, label %1032
; <label>:1032 ; preds = %xST.exit469
- load <4 x float>* null ; <<4 x float>>:1033 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:1034 [#uses=1]
- load <4 x float>* %1034 ; <<4 x float>>:1035 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:1036 [#uses=0]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:1033 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:1034 [#uses=1]
+ load <4 x float>, <4 x float>* %1034 ; <<4 x float>>:1035 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:1036 [#uses=0]
br label %xST.exit472
; <label>:1037 ; preds = %xST.exit469
@@ -1318,8 +1318,8 @@ xST.exit469: ; preds = %1027, %1025, %1005
br i1 %1041, label %1045, label %1042
; <label>:1042 ; preds = %1040
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:1043 [#uses=1]
- load <4 x float>* %1043 ; <<4 x float>>:1044 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:1043 [#uses=1]
+ load <4 x float>, <4 x float>* %1043 ; <<4 x float>>:1044 [#uses=0]
br label %1045
; <label>:1045 ; preds = %1042, %1040
@@ -1367,7 +1367,7 @@ xST.exit472: ; preds = %1050, %1048, %1032
br label %xST.exit474
xST.exit474: ; preds = %1059, %1058, %1051
- load <4 x float>* null ; <<4 x float>>:1060 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:1060 [#uses=1]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1061 [#uses=1]
fmul <4 x float> %1060, zeroinitializer ; <<4 x float>>:1062 [#uses=2]
br i1 false, label %1065, label %1063
@@ -1555,7 +1555,7 @@ xST.exit489: ; preds = %1109, %1108, %1101
br label %xST.exit492
xST.exit492: ; preds = %1118, %1117, %1110
- load <4 x float>* null ; <<4 x float>>:1119 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:1119 [#uses=1]
fmul <4 x float> %1119, zeroinitializer ; <<4 x float>>:1120 [#uses=1]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1121 [#uses=1]
br i1 false, label %1123, label %1122
@@ -1590,7 +1590,7 @@ xST.exit492: ; preds = %1118, %1117, %1110
xST.exit495: ; preds = %1130, %1129, %1122
%.07582 = phi <4 x float> [ %1121, %1122 ], [ %.17583, %1130 ], [ %.17583, %1129 ] ; <<4 x float>> [#uses=1]
%.07590 = phi <4 x float> [ %1120, %1122 ], [ %.17591, %1130 ], [ %.17591, %1129 ] ; <<4 x float>> [#uses=1]
- load <4 x float>* null ; <<4 x float>>:1131 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:1131 [#uses=1]
fadd <4 x float> %1131, zeroinitializer ; <<4 x float>>:1132 [#uses=1]
fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1133 [#uses=1]
br i1 false, label %1135, label %1134
@@ -1625,11 +1625,11 @@ xST.exit495: ; preds = %1130, %1129, %1122
xST.exit498: ; preds = %1142, %1141, %1134
%.07617 = phi <4 x float> [ %1133, %1134 ], [ %.17618, %1142 ], [ %.17618, %1141 ] ; <<4 x float>> [#uses=1]
%.07621 = phi <4 x float> [ %1132, %1134 ], [ %.17622, %1142 ], [ %.17622, %1141 ] ; <<4 x float>> [#uses=1]
- load <4 x float>* null ; <<4 x float>>:1143 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1144 [#uses=1]
- load <4 x float>* %1144 ; <<4 x float>>:1145 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1146 [#uses=1]
- load <4 x float>* %1146 ; <<4 x float>>:1147 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:1143 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1144 [#uses=1]
+ load <4 x float>, <4 x float>* %1144 ; <<4 x float>>:1145 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1146 [#uses=1]
+ load <4 x float>, <4 x float>* %1146 ; <<4 x float>>:1147 [#uses=1]
shufflevector <4 x float> %1143, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1148 [#uses=1]
shufflevector <4 x float> %1145, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1149 [#uses=1]
shufflevector <4 x float> %1147, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1150 [#uses=1]
@@ -1671,11 +1671,11 @@ xST.exit501: ; preds = %1163, %1162, %1155
%.07656 = phi <4 x float> [ %1153, %1155 ], [ %.17657, %1163 ], [ %.17657, %1162 ] ; <<4 x float>> [#uses=1]
%.07660 = phi <4 x float> [ %1152, %1155 ], [ %.17661, %1163 ], [ %.17661, %1162 ] ; <<4 x float>> [#uses=1]
%.07664 = phi <4 x float> [ %1151, %1155 ], [ %.17665, %1163 ], [ %.17665, %1162 ] ; <<4 x float>> [#uses=1]
- load <4 x float>* null ; <<4 x float>>:1164 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1165 [#uses=1]
- load <4 x float>* %1165 ; <<4 x float>>:1166 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1167 [#uses=1]
- load <4 x float>* %1167 ; <<4 x float>>:1168 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:1164 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1165 [#uses=1]
+ load <4 x float>, <4 x float>* %1165 ; <<4 x float>>:1166 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1167 [#uses=1]
+ load <4 x float>, <4 x float>* %1167 ; <<4 x float>>:1168 [#uses=1]
fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1169 [#uses=1]
fadd <4 x float> zeroinitializer, %1164 ; <<4 x float>>:1170 [#uses=1]
fadd <4 x float> zeroinitializer, %1166 ; <<4 x float>>:1171 [#uses=1]
@@ -1734,21 +1734,21 @@ xST.exit504: ; preds = %1181, %1180, %1173
br label %1188
; <label>:1188 ; preds = %1187, %1186
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:1189 [#uses=1]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:1189 [#uses=1]
shufflevector <4 x i32> %1189, <4 x i32> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x i32>>:1190 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %1190, <4 x i32> zeroinitializer ) ; <i32>:1191 [#uses=1]
icmp eq i32 %1191, 0 ; <i1>:1192 [#uses=1]
br i1 %1192, label %1196, label %1193
; <label>:1193 ; preds = %1188
- load <4 x float>* null ; <<4 x float>>:1194 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:1194 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %1194, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:1195 [#uses=1]
store <4 x float> %1195, <4 x float>* null
br label %1196
; <label>:1196 ; preds = %1193, %1188
%.07742 = phi <4 x float> [ zeroinitializer, %1193 ], [ zeroinitializer, %1188 ] ; <<4 x float>> [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:1197 [#uses=1]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:1197 [#uses=1]
shufflevector <4 x i32> %1197, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:1198 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %1198, <4 x i32> zeroinitializer ) ; <i32>:1199 [#uses=1]
icmp eq i32 %1199, 0 ; <i1>:1200 [#uses=1]
@@ -1765,20 +1765,20 @@ xST.exit507: ; preds = %1201, %1196, %1183
br i1 %1203, label %1207, label %1204
; <label>:1204 ; preds = %xST.exit507
- load <4 x float>* null ; <<4 x float>>:1205 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:1205 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %1205, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:1206 [#uses=1]
store <4 x float> %1206, <4 x float>* null
br label %1207
; <label>:1207 ; preds = %1204, %xST.exit507
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:1208 [#uses=1]
+ load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:1208 [#uses=1]
shufflevector <4 x i32> %1208, <4 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x i32>>:1209 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %1209, <4 x i32> zeroinitializer ) ; <i32>:1210 [#uses=1]
icmp eq i32 %1210, 0 ; <i1>:1211 [#uses=1]
br i1 %1211, label %1215, label %1212
; <label>:1212 ; preds = %1207
- load <4 x float>* null ; <<4 x float>>:1213 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:1213 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %1213, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:1214 [#uses=1]
store <4 x float> %1214, <4 x float>* null
br label %1215
diff --git a/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll b/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll
index 53231b4f435e..017775781f7d 100644
--- a/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll
+++ b/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll
@@ -22,7 +22,7 @@ define i64 @test(i32 %A, i32 %B, i32 %C) nounwind {
entry:
%Y = alloca i32, align 4 ; <i32*> [#uses=2]
%tmp4 = call i32 asm "subf${3:I}c $1,$4,$3\0A\09subfze $0,$2", "=r,=*&r,r,rI,r"( i32* %Y, i32 %A, i32 %B, i32 %C ) ; <i32> [#uses=1]
- %tmp5 = load i32* %Y ; <i32> [#uses=1]
+ %tmp5 = load i32, i32* %Y ; <i32> [#uses=1]
%tmp56 = zext i32 %tmp5 to i64 ; <i64> [#uses=1]
%tmp7 = shl i64 %tmp56, 32 ; <i64> [#uses=1]
%tmp89 = zext i32 %tmp4 to i64 ; <i64> [#uses=1]
diff --git a/test/CodeGen/PowerPC/2007-05-03-InlineAsm-S-Constraint.ll b/test/CodeGen/PowerPC/2007-05-03-InlineAsm-S-Constraint.ll
index 490aa0c1442c..c5721560d382 100644
--- a/test/CodeGen/PowerPC/2007-05-03-InlineAsm-S-Constraint.ll
+++ b/test/CodeGen/PowerPC/2007-05-03-InlineAsm-S-Constraint.ll
@@ -7,6 +7,6 @@ target triple = "powerpc-apple-darwin8.8.0"
define void @foo() {
entry:
- tail call void asm sideeffect "$0 $1", "s,i"( i8* bitcast (i32* getelementptr ([2 x i32]* @x, i32 0, i32 1) to i8*), i8* bitcast (i32* getelementptr ([2 x i32]* @x, i32 0, i32 1) to i8*) )
+ tail call void asm sideeffect "$0 $1", "s,i"( i8* bitcast (i32* getelementptr ([2 x i32], [2 x i32]* @x, i32 0, i32 1) to i8*), i8* bitcast (i32* getelementptr ([2 x i32], [2 x i32]* @x, i32 0, i32 1) to i8*) )
ret void
}
diff --git a/test/CodeGen/PowerPC/2007-05-14-InlineAsmSelectCrash.ll b/test/CodeGen/PowerPC/2007-05-14-InlineAsmSelectCrash.ll
index e4e931492ac4..1305c42e9320 100644
--- a/test/CodeGen/PowerPC/2007-05-14-InlineAsmSelectCrash.ll
+++ b/test/CodeGen/PowerPC/2007-05-14-InlineAsmSelectCrash.ll
@@ -12,7 +12,7 @@ entry:
bb: ; preds = %bb, %entry
%i.035.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
- %tmp8 = getelementptr float* %tmp56, i32 %i.035.0 ; <float*> [#uses=2]
+ %tmp8 = getelementptr float, float* %tmp56, i32 %i.035.0 ; <float*> [#uses=2]
%tmp101112 = bitcast float* %tmp8 to i8* ; <i8*> [#uses=1]
%tmp1617 = bitcast float* %tmp8 to i32* ; <i32*> [#uses=1]
%tmp21 = tail call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,*m"( i8* %tmp101112, i32 0, i32* %tmp1617 ) ; <i32> [#uses=0]
diff --git a/test/CodeGen/PowerPC/2007-05-22-tailmerge-3.ll b/test/CodeGen/PowerPC/2007-05-22-tailmerge-3.ll
index 382ba1f6a82d..b3b73238420d 100644
--- a/test/CodeGen/PowerPC/2007-05-22-tailmerge-3.ll
+++ b/test/CodeGen/PowerPC/2007-05-22-tailmerge-3.ll
@@ -15,46 +15,46 @@ entry:
%retval = alloca i32, align 4 ; <i32*> [#uses=1]
store i32 %i, i32* %i_addr
store i32 %q, i32* %q_addr
- %tmp = load i32* %i_addr ; <i32> [#uses=1]
+ %tmp = load i32, i32* %i_addr ; <i32> [#uses=1]
%tmp1 = icmp ne i32 %tmp, 0 ; <i1> [#uses=1]
%tmp12 = zext i1 %tmp1 to i8 ; <i8> [#uses=1]
%toBool = icmp ne i8 %tmp12, 0 ; <i1> [#uses=1]
br i1 %toBool, label %cond_true, label %cond_false
cond_true: ; preds = %entry
- %tmp3 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
- %tmp4 = call i32 (...)* @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
- %tmp7 = load i32* %q_addr ; <i32> [#uses=1]
+ %tmp3 = call i32 (...) @bar( ) ; <i32> [#uses=0]
+ %tmp4 = call i32 (...) @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
+ %tmp7 = load i32, i32* %q_addr ; <i32> [#uses=1]
%tmp8 = icmp ne i32 %tmp7, 0 ; <i1> [#uses=1]
%tmp89 = zext i1 %tmp8 to i8 ; <i8> [#uses=1]
%toBool10 = icmp ne i8 %tmp89, 0 ; <i1> [#uses=1]
br i1 %toBool10, label %cond_true11, label %cond_false15
cond_false: ; preds = %entry
- %tmp5 = call i32 (...)* @foo( ) ; <i32> [#uses=0]
- %tmp6 = call i32 (...)* @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
- %tmp27 = load i32* %q_addr ; <i32> [#uses=1]
+ %tmp5 = call i32 (...) @foo( ) ; <i32> [#uses=0]
+ %tmp6 = call i32 (...) @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
+ %tmp27 = load i32, i32* %q_addr ; <i32> [#uses=1]
%tmp28 = icmp ne i32 %tmp27, 0 ; <i1> [#uses=1]
%tmp289 = zext i1 %tmp28 to i8 ; <i8> [#uses=1]
%toBool210 = icmp ne i8 %tmp289, 0 ; <i1> [#uses=1]
br i1 %toBool210, label %cond_true11, label %cond_false15
cond_true11: ; preds = %cond_next
- %tmp13 = call i32 (...)* @foo( ) ; <i32> [#uses=0]
- %tmp14 = call i32 (...)* @quux( i32 3, i32 4 ) ; <i32> [#uses=0]
+ %tmp13 = call i32 (...) @foo( ) ; <i32> [#uses=0]
+ %tmp14 = call i32 (...) @quux( i32 3, i32 4 ) ; <i32> [#uses=0]
br label %cond_next18
cond_false15: ; preds = %cond_next
- %tmp16 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
- %tmp17 = call i32 (...)* @quux( i32 3, i32 4 ) ; <i32> [#uses=0]
+ %tmp16 = call i32 (...) @bar( ) ; <i32> [#uses=0]
+ %tmp17 = call i32 (...) @quux( i32 3, i32 4 ) ; <i32> [#uses=0]
br label %cond_next18
cond_next18: ; preds = %cond_false15, %cond_true11
- %tmp19 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
+ %tmp19 = call i32 (...) @bar( ) ; <i32> [#uses=0]
br label %return
return: ; preds = %cond_next18
- %retval20 = load i32* %retval ; <i32> [#uses=1]
+ %retval20 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval20
}
diff --git a/test/CodeGen/PowerPC/2007-06-28-BCCISelBug.ll b/test/CodeGen/PowerPC/2007-06-28-BCCISelBug.ll
index 6de7a09128f0..7a8eb175a93a 100644
--- a/test/CodeGen/PowerPC/2007-06-28-BCCISelBug.ll
+++ b/test/CodeGen/PowerPC/2007-06-28-BCCISelBug.ll
@@ -70,7 +70,7 @@ declare i32 @llvm.ppc.altivec.vcmpequw.p(i32, <4 x i32>, <4 x i32>)
define void @test(%struct.XState* %gldst, <4 x float>* %prgrm, <4 x float>** %buffs, %struct._GVMConstants* %cnstn, %struct.PPSToken* %pstrm, %struct.GVMFPContext* %vmctx, %struct.GVMTs* %txtrs, %struct.GVMFPStack* %fpstk, %struct.GVMFGAttrib* %start, %struct.GVMFGAttrib* %deriv, i32 %fragx, i32 %fragy) {
bb58.i:
- %tmp3405.i = getelementptr %struct.XTRec* null, i32 0, i32 1 ; <float*> [#uses=1]
+ %tmp3405.i = getelementptr %struct.XTRec, %struct.XTRec* null, i32 0, i32 1 ; <float*> [#uses=1]
%tmp34053406.i = bitcast float* %tmp3405.i to i8* ; <i8*> [#uses=1]
%tmp3407.i = call <4 x i32> @llvm.ppc.altivec.lvewx( i8* %tmp34053406.i ) ; <<4 x i32>> [#uses=0]
%tmp4146.i = call i32 @llvm.ppc.altivec.vcmpequw.p( i32 3, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32> [#uses=1]
diff --git a/test/CodeGen/PowerPC/2007-08-04-CoalescerAssert.ll b/test/CodeGen/PowerPC/2007-08-04-CoalescerAssert.ll
index 06f40d98c68c..6b88b81681cb 100644
--- a/test/CodeGen/PowerPC/2007-08-04-CoalescerAssert.ll
+++ b/test/CodeGen/PowerPC/2007-08-04-CoalescerAssert.ll
@@ -22,7 +22,7 @@ cond_true28: ; preds = %cond_false, %cond_true
cond_next30: ; preds = %cond_true28, %cond_false, %cond_true
%iftmp.0.043.1 = phi %struct._obstack_chunk* [ %iftmp.0.043.0, %cond_true28 ], [ null, %cond_true ], [ %tmp22, %cond_false ] ; <%struct._obstack_chunk*> [#uses=1]
- %tmp41 = getelementptr %struct._obstack_chunk* %iftmp.0.043.1, i32 0, i32 0 ; <i8**> [#uses=1]
+ %tmp41 = getelementptr %struct._obstack_chunk, %struct._obstack_chunk* %iftmp.0.043.1, i32 0, i32 0 ; <i8**> [#uses=1]
store i8* null, i8** %tmp41, align 8
ret i32 undef
}
diff --git a/test/CodeGen/PowerPC/2007-09-07-LoadStoreIdxForms.ll b/test/CodeGen/PowerPC/2007-09-07-LoadStoreIdxForms.ll
index 40f46fda468d..aae914ecc435 100644
--- a/test/CodeGen/PowerPC/2007-09-07-LoadStoreIdxForms.ll
+++ b/test/CodeGen/PowerPC/2007-09-07-LoadStoreIdxForms.ll
@@ -8,8 +8,8 @@ define void @foo() {
entry:
%ttype = alloca i32, align 4 ; <i32*> [#uses=1]
%regs = alloca [1024 x %struct.__db_region], align 16 ; <[1024 x %struct.__db_region]*> [#uses=0]
- %tmp = load i32* %ttype, align 4 ; <i32> [#uses=1]
- %tmp1 = call i32 (...)* @bork( i32 %tmp ) ; <i32> [#uses=0]
+ %tmp = load i32, i32* %ttype, align 4 ; <i32> [#uses=1]
+ %tmp1 = call i32 (...) @bork( i32 %tmp ) ; <i32> [#uses=0]
ret void
; CHECK: @foo
diff --git a/test/CodeGen/PowerPC/2007-09-08-unaligned.ll b/test/CodeGen/PowerPC/2007-09-08-unaligned.ll
index bdd91f345718..ccbadb4255a0 100644
--- a/test/CodeGen/PowerPC/2007-09-08-unaligned.ll
+++ b/test/CodeGen/PowerPC/2007-09-08-unaligned.ll
@@ -16,18 +16,18 @@ define i32 @foo() {
entry:
%retval = alloca i32, align 4 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = getelementptr %struct.anon* @s, i32 0, i32 1 ; <float*> [#uses=1]
- %tmp1 = load float* %tmp, align 1 ; <float> [#uses=1]
- %tmp2 = getelementptr %struct.anon* @t, i32 0, i32 1 ; <float*> [#uses=1]
+ %tmp = getelementptr %struct.anon, %struct.anon* @s, i32 0, i32 1 ; <float*> [#uses=1]
+ %tmp1 = load float, float* %tmp, align 1 ; <float> [#uses=1]
+ %tmp2 = getelementptr %struct.anon, %struct.anon* @t, i32 0, i32 1 ; <float*> [#uses=1]
store float %tmp1, float* %tmp2, align 1
- %tmp3 = getelementptr <{ i8, double }>* @u, i32 0, i32 1 ; <double*> [#uses=1]
- %tmp4 = load double* %tmp3, align 1 ; <double> [#uses=1]
- %tmp5 = getelementptr <{ i8, double }>* @v, i32 0, i32 1 ; <double*> [#uses=1]
+ %tmp3 = getelementptr <{ i8, double }>, <{ i8, double }>* @u, i32 0, i32 1 ; <double*> [#uses=1]
+ %tmp4 = load double, double* %tmp3, align 1 ; <double> [#uses=1]
+ %tmp5 = getelementptr <{ i8, double }>, <{ i8, double }>* @v, i32 0, i32 1 ; <double*> [#uses=1]
store double %tmp4, double* %tmp5, align 1
br label %return
return: ; preds = %entry
- %retval6 = load i32* %retval ; <i32> [#uses=1]
+ %retval6 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval6
}
@@ -36,17 +36,17 @@ entry:
%retval = alloca i32, align 4 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%tmp = call i32 @foo( ) ; <i32> [#uses=0]
- %tmp1 = getelementptr %struct.anon* @t, i32 0, i32 1 ; <float*> [#uses=1]
- %tmp2 = load float* %tmp1, align 1 ; <float> [#uses=1]
+ %tmp1 = getelementptr %struct.anon, %struct.anon* @t, i32 0, i32 1 ; <float*> [#uses=1]
+ %tmp2 = load float, float* %tmp1, align 1 ; <float> [#uses=1]
%tmp23 = fpext float %tmp2 to double ; <double> [#uses=1]
- %tmp4 = getelementptr <{ i8, double }>* @v, i32 0, i32 1 ; <double*> [#uses=1]
- %tmp5 = load double* %tmp4, align 1 ; <double> [#uses=1]
- %tmp6 = getelementptr [8 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
- %tmp7 = call i32 (i8*, ...)* @printf( i8* %tmp6, double %tmp23, double %tmp5 ) ; <i32> [#uses=0]
+ %tmp4 = getelementptr <{ i8, double }>, <{ i8, double }>* @v, i32 0, i32 1 ; <double*> [#uses=1]
+ %tmp5 = load double, double* %tmp4, align 1 ; <double> [#uses=1]
+ %tmp6 = getelementptr [8 x i8], [8 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp7 = call i32 (i8*, ...) @printf( i8* %tmp6, double %tmp23, double %tmp5 ) ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
- %retval8 = load i32* %retval ; <i32> [#uses=1]
+ %retval8 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval8
}
diff --git a/test/CodeGen/PowerPC/2007-10-18-PtrArithmetic.ll b/test/CodeGen/PowerPC/2007-10-18-PtrArithmetic.ll
index 84fadd1b0461..07b1f8d7698a 100644
--- a/test/CodeGen/PowerPC/2007-10-18-PtrArithmetic.ll
+++ b/test/CodeGen/PowerPC/2007-10-18-PtrArithmetic.ll
@@ -10,9 +10,9 @@ entry:
cond_true: ; preds = %entry
%tmp89 = bitcast float* %res to <4 x i32>* ; <<4 x i32>*> [#uses=1]
%tmp1011 = bitcast float* %argA to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %tmp14 = load <4 x i32>* %tmp1011, align 16 ; <<4 x i32>> [#uses=1]
+ %tmp14 = load <4 x i32>, <4 x i32>* %tmp1011, align 16 ; <<4 x i32>> [#uses=1]
%tmp1516 = bitcast float* %argB to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %tmp18 = load <4 x i32>* %tmp1516, align 16 ; <<4 x i32>> [#uses=1]
+ %tmp18 = load <4 x i32>, <4 x i32>* %tmp1516, align 16 ; <<4 x i32>> [#uses=1]
%tmp19 = sdiv <4 x i32> %tmp14, %tmp18 ; <<4 x i32>> [#uses=1]
store <4 x i32> %tmp19, <4 x i32>* %tmp89, align 16
ret void
diff --git a/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert.ll b/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert.ll
index a60d11c85c55..13b9be31b69b 100644
--- a/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert.ll
+++ b/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert.ll
@@ -17,11 +17,11 @@
define %struct.NSManagedObjectContext* @"+[ListGenerator(Private) managedObjectContextWithModelURL:storeURL:]"(%struct.objc_object* %self, %struct._message_ref_t* %_cmd, %struct.NSURL* %modelURL, %struct.NSURL* %storeURL) {
entry:
%storeCoordinator = alloca %struct.NSPersistentStoreCoordinator* ; <%struct.NSPersistentStoreCoordinator**> [#uses=0]
- %tmp29 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)* null( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_2" ) ; <%struct.objc_object*> [#uses=0]
- %tmp34 = load %struct.NSString** @NSXMLStoreType, align 8 ; <%struct.NSString*> [#uses=1]
- %tmp37 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_5", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
- %tmp42 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)* null( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_4", i32 1 ) ; <%struct.objc_object*> [#uses=1]
- %tmp45 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)* %tmp37( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_5", %struct.objc_object* %tmp42, %struct.NSString* null ) ; <%struct.objc_object*> [#uses=1]
- %tmp48 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)* null( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_6", %struct.NSString* %tmp34, i8* null, %struct.NSURL* null, %struct.objc_object* %tmp45, %struct.NSError** null ) ; <%struct.objc_object*> [#uses=0]
+ %tmp29 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...) null( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_2" ) ; <%struct.objc_object*> [#uses=0]
+ %tmp34 = load %struct.NSString*, %struct.NSString** @NSXMLStoreType, align 8 ; <%struct.NSString*> [#uses=1]
+ %tmp37 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*, %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_5", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
+ %tmp42 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...) null( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_4", i32 1 ) ; <%struct.objc_object*> [#uses=1]
+ %tmp45 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...) %tmp37( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_5", %struct.objc_object* %tmp42, %struct.NSString* null ) ; <%struct.objc_object*> [#uses=1]
+ %tmp48 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...) null( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_6", %struct.NSString* %tmp34, i8* null, %struct.NSURL* null, %struct.objc_object* %tmp45, %struct.NSError** null ) ; <%struct.objc_object*> [#uses=0]
unreachable
}
diff --git a/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert2.ll b/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert2.ll
index 3d1a328ec3c1..ff5f835fd531 100644
--- a/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert2.ll
+++ b/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert2.ll
@@ -14,12 +14,12 @@
define %struct.NSManagedObjectContext* @"+[ListGenerator(Private) managedObjectContextWithModelURL:storeURL:]"(%struct.objc_object* %self, %struct._message_ref_t* %_cmd, %struct.NSURL* %modelURL, %struct.NSURL* %storeURL) {
entry:
- %tmp27 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_2", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
- %tmp29 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)* %tmp27( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_2" ) ; <%struct.objc_object*> [#uses=0]
- %tmp33 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_6", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
- %tmp34 = load %struct.NSString** @NSXMLStoreType, align 8 ; <%struct.NSString*> [#uses=1]
- %tmp40 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_4", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
- %tmp42 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)* %tmp40( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_4", i32 1 ) ; <%struct.objc_object*> [#uses=0]
- %tmp48 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)* %tmp33( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_6", %struct.NSString* %tmp34, i8* null, %struct.NSURL* null, %struct.objc_object* null, %struct.NSError** null ) ; <%struct.objc_object*> [#uses=0]
+ %tmp27 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*, %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_2", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
+ %tmp29 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...) %tmp27( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_2" ) ; <%struct.objc_object*> [#uses=0]
+ %tmp33 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*, %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_6", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
+ %tmp34 = load %struct.NSString*, %struct.NSString** @NSXMLStoreType, align 8 ; <%struct.NSString*> [#uses=1]
+ %tmp40 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*, %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_4", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
+ %tmp42 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...) %tmp40( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_4", i32 1 ) ; <%struct.objc_object*> [#uses=0]
+ %tmp48 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...) %tmp33( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_6", %struct.NSString* %tmp34, i8* null, %struct.NSURL* null, %struct.objc_object* null, %struct.NSError** null ) ; <%struct.objc_object*> [#uses=0]
unreachable
}
diff --git a/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll b/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
index df83f8b191c6..34122912349b 100644
--- a/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
+++ b/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
@@ -29,7 +29,7 @@ entry:
to label %bb30.preheader unwind label %unwind
bb30.preheader: ; preds = %entry
- %tmp26 = getelementptr %struct.Range* %effectiveRange, i64 0, i32 1 ; <i64*> [#uses=1]
+ %tmp26 = getelementptr %struct.Range, %struct.Range* %effectiveRange, i64 0, i32 1 ; <i64*> [#uses=1]
br label %bb30
unwind: ; preds = %cond_true, %entry
@@ -39,7 +39,7 @@ unwind: ; preds = %cond_true, %entry
resume { i8*, i32 } %exn
invcont23: ; preds = %cond_true
- %tmp27 = load i64* %tmp26, align 8 ; <i64> [#uses=1]
+ %tmp27 = load i64, i64* %tmp26, align 8 ; <i64> [#uses=1]
%tmp28 = sub i64 %range_addr.1.0, %tmp27 ; <i64> [#uses=1]
br label %bb30
diff --git a/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll b/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll
index d1f028586160..4830ca60f9ff 100644
--- a/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll
+++ b/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll
@@ -6,7 +6,7 @@
define void @execute_shader(<4 x float>* %OUT, <4 x float>* %IN, <4 x float>*
%CONST) {
entry:
- %input2 = load <4 x float>* null, align 16 ; <<4 x float>>
+ %input2 = load <4 x float>, <4 x float>* null, align 16 ; <<4 x float>>
%shuffle7 = shufflevector <4 x float> %input2, <4 x float> < float 0.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>> [#uses=1]
%mul1 = fmul <4 x float> %shuffle7, zeroinitializer ; <<4 x
diff --git a/test/CodeGen/PowerPC/2008-02-05-LiveIntervalsAssert.ll b/test/CodeGen/PowerPC/2008-02-05-LiveIntervalsAssert.ll
index 791e9e610655..7ed7b9b36687 100644
--- a/test/CodeGen/PowerPC/2008-02-05-LiveIntervalsAssert.ll
+++ b/test/CodeGen/PowerPC/2008-02-05-LiveIntervalsAssert.ll
@@ -44,7 +44,7 @@ bb103.preheader: ; preds = %bb113
bb113: ; preds = %bb113, %bb93, %bb82, %bb52, %entry
%fingerprint_addr.0.reg2mem.9 = phi i64 [ 0, %entry ], [ 0, %bb52 ], [ 0, %bb82 ], [ 0, %bb93 ], [ %tmp118, %bb113 ] ; <i64> [#uses=1]
- tail call void @_Z28report_should_not_reach_herePKci( i8* getelementptr ([44 x i8]* @.str, i32 0, i32 0), i32 817 ) nounwind
+ tail call void @_Z28report_should_not_reach_herePKci( i8* getelementptr ([44 x i8], [44 x i8]* @.str, i32 0, i32 0), i32 817 ) nounwind
%tmp118 = lshr i64 %fingerprint_addr.0.reg2mem.9, 4 ; <i64> [#uses=2]
%tmp21158 = and i64 %tmp118, 15 ; <i64> [#uses=1]
switch i64 %tmp21158, label %bb113 [
diff --git a/test/CodeGen/PowerPC/2008-02-09-LocalRegAllocAssert.ll b/test/CodeGen/PowerPC/2008-02-09-LocalRegAllocAssert.ll
index e28a3e04cf1b..73a804bf02b2 100644
--- a/test/CodeGen/PowerPC/2008-02-09-LocalRegAllocAssert.ll
+++ b/test/CodeGen/PowerPC/2008-02-09-LocalRegAllocAssert.ll
@@ -2,7 +2,7 @@
define i32 @bork(i64 %foo, i64 %bar) {
entry:
- %tmp = load i64* null, align 8 ; <i64> [#uses=2]
+ %tmp = load i64, i64* null, align 8 ; <i64> [#uses=2]
%tmp2 = icmp ule i64 %tmp, 0 ; <i1> [#uses=1]
%min = select i1 %tmp2, i64 %tmp, i64 0 ; <i64> [#uses=1]
store i64 %min, i64* null, align 8
diff --git a/test/CodeGen/PowerPC/2008-03-05-RegScavengerAssert.ll b/test/CodeGen/PowerPC/2008-03-05-RegScavengerAssert.ll
index d10291e190b9..863b02528e1e 100644
--- a/test/CodeGen/PowerPC/2008-03-05-RegScavengerAssert.ll
+++ b/test/CodeGen/PowerPC/2008-03-05-RegScavengerAssert.ll
@@ -6,7 +6,7 @@ define void @foo(i8* %pp) nounwind {
entry:
%tmp2 = tail call i8* @bar( i32 14 ) nounwind ; <i8*> [#uses=0]
%tmp28 = bitcast i8* %pp to void ()** ; <void ()**> [#uses=1]
- %tmp38 = load void ()** %tmp28, align 4 ; <void ()*> [#uses=2]
+ %tmp38 = load void ()*, void ()** %tmp28, align 4 ; <void ()*> [#uses=2]
br i1 false, label %bb34, label %bb25
bb25: ; preds = %entry
%tmp30 = bitcast void ()* %tmp38 to void (i8*)* ; <void (i8*)*> [#uses=1]
diff --git a/test/CodeGen/PowerPC/2008-03-17-RegScavengerCrash.ll b/test/CodeGen/PowerPC/2008-03-17-RegScavengerCrash.ll
index fb8cdcea63aa..dc9734f2f734 100644
--- a/test/CodeGen/PowerPC/2008-03-17-RegScavengerCrash.ll
+++ b/test/CodeGen/PowerPC/2008-03-17-RegScavengerCrash.ll
@@ -7,7 +7,7 @@ declare fastcc void @emit_numeric_escape(i32, i32, %struct._cpp_strbuf*, i32) no
define i32 @cpp_interpret_string(i32 %pfile, %struct.cpp_string* %from, i32 %wide) nounwind {
entry:
- %tmp61 = load i32* null, align 4 ; <i32> [#uses=1]
+ %tmp61 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%toBool = icmp eq i32 %wide, 0 ; <i1> [#uses=2]
%iftmp.87.0 = select i1 %toBool, i32 %tmp61, i32 0 ; <i32> [#uses=2]
%tmp69 = icmp ult i32 %iftmp.87.0, 33 ; <i1> [#uses=1]
@@ -23,7 +23,7 @@ bb94: ; preds = %bb79
bb103: ; preds = %bb79
ret i32 0
bb130.preheader: ; preds = %bb94
- %tmp134 = getelementptr %struct.cpp_string* %from, i32 0, i32 1 ; <i8**> [#uses=0]
+ %tmp134 = getelementptr %struct.cpp_string, %struct.cpp_string* %from, i32 0, i32 1 ; <i8**> [#uses=0]
ret i32 0
bb729: ; preds = %bb94
call fastcc void @emit_numeric_escape( i32 %pfile, i32 0, %struct._cpp_strbuf* null, i32 %wide ) nounwind
diff --git a/test/CodeGen/PowerPC/2008-03-24-AddressRegImm.ll b/test/CodeGen/PowerPC/2008-03-24-AddressRegImm.ll
index a8fef05b1ad8..1191748b87d4 100644
--- a/test/CodeGen/PowerPC/2008-03-24-AddressRegImm.ll
+++ b/test/CodeGen/PowerPC/2008-03-24-AddressRegImm.ll
@@ -5,8 +5,8 @@ entry:
%tmp2627 = ptrtoint i8* %rec to i64 ; <i64> [#uses=2]
%tmp28 = and i64 %tmp2627, -16384 ; <i64> [#uses=2]
%tmp2829 = inttoptr i64 %tmp28 to i8* ; <i8*> [#uses=1]
- %tmp37 = getelementptr i8* %tmp2829, i64 42 ; <i8*> [#uses=1]
- %tmp40 = load i8* %tmp37, align 1 ; <i8> [#uses=1]
+ %tmp37 = getelementptr i8, i8* %tmp2829, i64 42 ; <i8*> [#uses=1]
+ %tmp40 = load i8, i8* %tmp37, align 1 ; <i8> [#uses=1]
%tmp4041 = zext i8 %tmp40 to i64 ; <i64> [#uses=1]
%tmp42 = shl i64 %tmp4041, 8 ; <i64> [#uses=1]
%tmp47 = add i64 %tmp42, 0 ; <i64> [#uses=1]
diff --git a/test/CodeGen/PowerPC/2008-03-26-CoalescerBug.ll b/test/CodeGen/PowerPC/2008-03-26-CoalescerBug.ll
index 8e5bf567b126..908a2a803b2b 100644
--- a/test/CodeGen/PowerPC/2008-03-26-CoalescerBug.ll
+++ b/test/CodeGen/PowerPC/2008-03-26-CoalescerBug.ll
@@ -2,7 +2,7 @@
define i32 @t(i64 %byteStart, i32 %activeIndex) nounwind {
entry:
- %tmp50 = load i32* null, align 4 ; <i32> [#uses=1]
+ %tmp50 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%tmp5051 = zext i32 %tmp50 to i64 ; <i64> [#uses=3]
%tmp53 = udiv i64 %byteStart, %tmp5051 ; <i64> [#uses=1]
%tmp5354 = trunc i64 %tmp53 to i32 ; <i32> [#uses=1]
diff --git a/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll b/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll
index e7a1cf69c693..45d43997fefb 100644
--- a/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll
+++ b/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll
@@ -24,7 +24,7 @@ bb: ; preds = %entry
bb31: ; preds = %_Z24unlock_then_erase_sectory.exit, %bb
%Pos.0.reg2mem.0 = phi i64 [ %tmp93, %_Z24unlock_then_erase_sectory.exit ], [ %Offset, %bb ] ; <i64> [#uses=3]
- %tmp35 = load i16* @_ZL10DeviceCode, align 2 ; <i16> [#uses=1]
+ %tmp35 = load i16, i16* @_ZL10DeviceCode, align 2 ; <i16> [#uses=1]
%tmp3536 = zext i16 %tmp35 to i32 ; <i32> [#uses=2]
%tmp37 = and i32 %tmp3536, 65520 ; <i32> [#uses=1]
%tmp38 = icmp eq i32 %tmp37, 35008 ; <i1> [#uses=1]
@@ -39,11 +39,11 @@ bb41: ; preds = %bb31
ret i32 0
bb68: ; preds = %bb31
- tail call void (i8*, ...)* @IOLog( i8* getelementptr ([68 x i8]* @.str34, i32 0, i32 0), i64 %tmp34, i64 0, i32 131072 ) nounwind
+ tail call void (i8*, ...) @IOLog( i8* getelementptr ([68 x i8], [68 x i8]* @.str34, i32 0, i32 0), i64 %tmp34, i64 0, i32 131072 ) nounwind
%tmp2021.i = trunc i64 %Pos.0.reg2mem.0 to i32 ; <i32> [#uses=1]
%tmp202122.i = inttoptr i32 %tmp2021.i to i8* ; <i8*> [#uses=1]
tail call void @IODelay( i32 500 ) nounwind
- %tmp53.i = load volatile i16* null, align 2 ; <i16> [#uses=2]
+ %tmp53.i = load volatile i16, i16* null, align 2 ; <i16> [#uses=2]
%tmp5455.i = zext i16 %tmp53.i to i32 ; <i32> [#uses=1]
br i1 false, label %bb.i, label %bb65.i
@@ -55,7 +55,7 @@ bb65.i: ; preds = %bb68
br i1 %tmp67.i, label %_Z24unlock_then_erase_sectory.exit, label %bb70.i
bb70.i: ; preds = %bb65.i
- tail call void (i8*, ...)* @IOLog( i8* getelementptr ([64 x i8]* @.str19, i32 0, i32 0), i32 %tmp5455.i ) nounwind
+ tail call void (i8*, ...) @IOLog( i8* getelementptr ([64 x i8], [64 x i8]* @.str19, i32 0, i32 0), i32 %tmp5455.i ) nounwind
ret i32 0
_Z24unlock_then_erase_sectory.exit: ; preds = %bb65.i
@@ -66,15 +66,15 @@ _Z24unlock_then_erase_sectory.exit: ; preds = %bb65.i
br i1 %tmp100, label %bb31, label %bb103
bb103: ; preds = %_Z24unlock_then_erase_sectory.exit, %bb
- tail call void (i8*, ...)* @IOLog( i8* getelementptr ([37 x i8]* @.str35, i32 0, i32 0) ) nounwind
+ tail call void (i8*, ...) @IOLog( i8* getelementptr ([37 x i8], [37 x i8]* @.str35, i32 0, i32 0) ) nounwind
ret i32 0
bb107: ; preds = %entry
- tail call void (i8*, ...)* @IOLog( i8* getelementptr ([48 x i8]* @.str36, i32 0, i32 0) ) nounwind
+ tail call void (i8*, ...) @IOLog( i8* getelementptr ([48 x i8], [48 x i8]* @.str36, i32 0, i32 0) ) nounwind
%tmp114115 = bitcast i8* %buffer to i16* ; <i16*> [#uses=1]
%tmp256 = lshr i64 %bufferSize, 1 ; <i64> [#uses=1]
%tmp256257 = trunc i64 %tmp256 to i32 ; <i32> [#uses=1]
- %tmp258 = getelementptr i16* %tmp114115, i32 %tmp256257 ; <i16*> [#uses=0]
+ %tmp258 = getelementptr i16, i16* %tmp114115, i32 %tmp256257 ; <i16*> [#uses=0]
ret i32 0
}
diff --git a/test/CodeGen/PowerPC/2008-06-21-F128LoadStore.ll b/test/CodeGen/PowerPC/2008-06-21-F128LoadStore.ll
index 862559b109cf..8ce1708acdbf 100644
--- a/test/CodeGen/PowerPC/2008-06-21-F128LoadStore.ll
+++ b/test/CodeGen/PowerPC/2008-06-21-F128LoadStore.ll
@@ -4,7 +4,7 @@
@h = external global ppc_fp128
define void @f() {
- %tmp = load ppc_fp128* @g
+ %tmp = load ppc_fp128, ppc_fp128* @g
store ppc_fp128 %tmp, ppc_fp128* @h
ret void
}
diff --git a/test/CodeGen/PowerPC/2008-06-23-LiveVariablesCrash.ll b/test/CodeGen/PowerPC/2008-06-23-LiveVariablesCrash.ll
index 83c5511878ca..db488ff36bfb 100644
--- a/test/CodeGen/PowerPC/2008-06-23-LiveVariablesCrash.ll
+++ b/test/CodeGen/PowerPC/2008-06-23-LiveVariablesCrash.ll
@@ -6,7 +6,7 @@ entry:
br i1 true, label %bb1, label %bb3
bb1:
- %tmp1 = load i8* null, align 1
+ %tmp1 = load i8, i8* null, align 1
%tmp2 = icmp eq i8 %tmp1, 0
br label %bb2
diff --git a/test/CodeGen/PowerPC/2008-07-15-Bswap.ll b/test/CodeGen/PowerPC/2008-07-15-Bswap.ll
index 4a834f93a205..b271048fd045 100644
--- a/test/CodeGen/PowerPC/2008-07-15-Bswap.ll
+++ b/test/CodeGen/PowerPC/2008-07-15-Bswap.ll
@@ -95,13 +95,13 @@ entry:
bb16: ; preds = %entry
bitcast %struct.PerMacroblockBoundaryStrengths* null to i32* ; <i32*>:1 [#uses=3]
- getelementptr i32* %1, i32 1 ; <i32*>:2 [#uses=0]
- getelementptr i32* %1, i32 2 ; <i32*>:3 [#uses=0]
- getelementptr i32* %1, i32 3 ; <i32*>:4 [#uses=0]
+ getelementptr i32, i32* %1, i32 1 ; <i32*>:2 [#uses=0]
+ getelementptr i32, i32* %1, i32 2 ; <i32*>:3 [#uses=0]
+ getelementptr i32, i32* %1, i32 3 ; <i32*>:4 [#uses=0]
bitcast [16 x i8]* null to i32* ; <i32*>:5 [#uses=3]
- getelementptr i32* %5, i32 1 ; <i32*>:6 [#uses=0]
- getelementptr i32* %5, i32 2 ; <i32*>:7 [#uses=0]
- getelementptr i32* %5, i32 3 ; <i32*>:8 [#uses=0]
+ getelementptr i32, i32* %5, i32 1 ; <i32*>:6 [#uses=0]
+ getelementptr i32, i32* %5, i32 2 ; <i32*>:7 [#uses=0]
+ getelementptr i32, i32* %5, i32 3 ; <i32*>:8 [#uses=0]
icmp eq i32 0, 0 ; <i1>:9 [#uses=0]
lshr i32 0, 30 ; <i32>:10 [#uses=0]
and i32 0, 268435455 ; <i32>:11 [#uses=0]
@@ -117,14 +117,14 @@ bb16: ; preds = %entry
%.not658 = icmp ne i32 0, 0 ; <i1> [#uses=1]
and i32 0, 268369920 ; <i32>:20 [#uses=1]
icmp eq i32 %20, 268369920 ; <i1>:21 [#uses=2]
- getelementptr %struct.PerMacroblockBoundaryStrengths* null, i32 0, i32 2 ; <[4 x i8]*>:22 [#uses=1]
- getelementptr %struct.PerMacroblockBoundaryStrengths* null, i32 0, i32 2, i32 0 ; <i8*>:23 [#uses=0]
+ getelementptr %struct.PerMacroblockBoundaryStrengths, %struct.PerMacroblockBoundaryStrengths* null, i32 0, i32 2 ; <[4 x i8]*>:22 [#uses=1]
+ getelementptr %struct.PerMacroblockBoundaryStrengths, %struct.PerMacroblockBoundaryStrengths* null, i32 0, i32 2, i32 0 ; <i8*>:23 [#uses=0]
and i32 0, -2 ; <i32>:24 [#uses=1]
add i32 %24, -1 ; <i32>:25 [#uses=0]
bitcast [4 x i8]* %22 to i32* ; <i32*>:26 [#uses=3]
- getelementptr i32* %26, i32 1 ; <i32*>:27 [#uses=0]
- getelementptr i32* %26, i32 2 ; <i32*>:28 [#uses=0]
- getelementptr i32* %26, i32 3 ; <i32*>:29 [#uses=0]
+ getelementptr i32, i32* %26, i32 1 ; <i32*>:27 [#uses=0]
+ getelementptr i32, i32* %26, i32 2 ; <i32*>:28 [#uses=0]
+ getelementptr i32, i32* %26, i32 3 ; <i32*>:29 [#uses=0]
br label %bb144
bb144: ; preds = %bb395, %bb16
@@ -136,8 +136,8 @@ bb144: ; preds = %bb395, %bb16
%boundaryStrengthsV.1771 = phi i8* [ null, %bb16 ], [ %158, %bb395 ] ; <i8*> [#uses=2]
%numEdgesToTest.1770 = phi i32 [ 4, %bb16 ], [ %numEdgesToTest.2, %bb395 ] ; <i32> [#uses=1]
icmp eq i32 %idxEachField11.0773, 0 ; <i1>:30 [#uses=0]
- getelementptr %struct.BiPartSrcDescriptor** null, i32 %mbIndexLeft.2772 ; <%struct.BiPartSrcDescriptor**>:31 [#uses=1]
- load %struct.BiPartSrcDescriptor** %31, align 4 ; <%struct.BiPartSrcDescriptor*>:32 [#uses=0]
+ getelementptr %struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor** null, i32 %mbIndexLeft.2772 ; <%struct.BiPartSrcDescriptor**>:31 [#uses=1]
+ load %struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor** %31, align 4 ; <%struct.BiPartSrcDescriptor*>:32 [#uses=0]
%fMacroblockHasNonZeroBS.4 = select i1 %21, i32 1, i32 0 ; <i32> [#uses=1]
%numEdgesToTest.2 = select i1 %21, i32 1, i32 %numEdgesToTest.1770 ; <i32> [#uses=2]
store i8 32, i8* %boundaryStrengthsV.1771, align 1
@@ -180,32 +180,32 @@ bb210.preheader: ; preds = %bb206
add i32 %52, %42 ; <i32>:53 [#uses=1]
mul i32 %51, 0 ; <i32>:54 [#uses=1]
add i32 %46, %54 ; <i32>:55 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor** null, i32 %53 ; <%struct.BiPartSrcDescriptor**>:56 [#uses=1]
- load %struct.BiPartSrcDescriptor** %56, align 4 ; <%struct.BiPartSrcDescriptor*>:57 [#uses=7]
- getelementptr %struct.BiPartSrcDescriptor** null, i32 %55 ; <%struct.BiPartSrcDescriptor**>:58 [#uses=1]
- load %struct.BiPartSrcDescriptor** %58, align 4 ; <%struct.BiPartSrcDescriptor*>:59 [#uses=5]
+ getelementptr %struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor** null, i32 %53 ; <%struct.BiPartSrcDescriptor**>:56 [#uses=1]
+ load %struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor** %56, align 4 ; <%struct.BiPartSrcDescriptor*>:57 [#uses=7]
+ getelementptr %struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor** null, i32 %55 ; <%struct.BiPartSrcDescriptor**>:58 [#uses=1]
+ load %struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor** %58, align 4 ; <%struct.BiPartSrcDescriptor*>:59 [#uses=5]
icmp slt i32 %159, 0 ; <i1>:60 [#uses=0]
icmp eq %struct.BiPartSrcDescriptor* %57, %59 ; <i1>:61 [#uses=0]
bitcast %struct.BiPartSrcDescriptor* %57 to i16* ; <i16*>:62 [#uses=5]
- load i16* %62, align 2 ; <i16>:63 [#uses=2]
- getelementptr i16* %62, i32 1 ; <i16*>:64 [#uses=1]
- load i16* %64, align 2 ; <i16>:65 [#uses=2]
- getelementptr i16* %62, i32 2 ; <i16*>:66 [#uses=1]
- load i16* %66, align 2 ; <i16>:67 [#uses=2]
- getelementptr i16* %62, i32 3 ; <i16*>:68 [#uses=1]
- load i16* %68, align 2 ; <i16>:69 [#uses=2]
- getelementptr i16* %62, i32 6 ; <i16*>:70 [#uses=1]
- load i16* %70, align 2 ; <i16>:71 [#uses=2]
+ load i16, i16* %62, align 2 ; <i16>:63 [#uses=2]
+ getelementptr i16, i16* %62, i32 1 ; <i16*>:64 [#uses=1]
+ load i16, i16* %64, align 2 ; <i16>:65 [#uses=2]
+ getelementptr i16, i16* %62, i32 2 ; <i16*>:66 [#uses=1]
+ load i16, i16* %66, align 2 ; <i16>:67 [#uses=2]
+ getelementptr i16, i16* %62, i32 3 ; <i16*>:68 [#uses=1]
+ load i16, i16* %68, align 2 ; <i16>:69 [#uses=2]
+ getelementptr i16, i16* %62, i32 6 ; <i16*>:70 [#uses=1]
+ load i16, i16* %70, align 2 ; <i16>:71 [#uses=2]
bitcast %struct.BiPartSrcDescriptor* %59 to i16* ; <i16*>:72 [#uses=5]
- load i16* %72, align 2 ; <i16>:73 [#uses=2]
- getelementptr i16* %72, i32 1 ; <i16*>:74 [#uses=1]
- load i16* %74, align 2 ; <i16>:75 [#uses=2]
- getelementptr i16* %72, i32 2 ; <i16*>:76 [#uses=1]
- load i16* %76, align 2 ; <i16>:77 [#uses=2]
- getelementptr i16* %72, i32 3 ; <i16*>:78 [#uses=1]
- load i16* %78, align 2 ; <i16>:79 [#uses=2]
- getelementptr i16* %72, i32 6 ; <i16*>:80 [#uses=1]
- load i16* %80, align 2 ; <i16>:81 [#uses=2]
+ load i16, i16* %72, align 2 ; <i16>:73 [#uses=2]
+ getelementptr i16, i16* %72, i32 1 ; <i16*>:74 [#uses=1]
+ load i16, i16* %74, align 2 ; <i16>:75 [#uses=2]
+ getelementptr i16, i16* %72, i32 2 ; <i16*>:76 [#uses=1]
+ load i16, i16* %76, align 2 ; <i16>:77 [#uses=2]
+ getelementptr i16, i16* %72, i32 3 ; <i16*>:78 [#uses=1]
+ load i16, i16* %78, align 2 ; <i16>:79 [#uses=2]
+ getelementptr i16, i16* %72, i32 6 ; <i16*>:80 [#uses=1]
+ load i16, i16* %80, align 2 ; <i16>:81 [#uses=2]
sub i16 %63, %73 ; <i16>:82 [#uses=3]
sub i16 %65, %75 ; <i16>:83 [#uses=3]
sub i16 %67, %77 ; <i16>:84 [#uses=3]
@@ -226,23 +226,23 @@ bb210.preheader: ; preds = %bb206
sub i16 0, %86 ; <i16>:95 [#uses=1]
icmp slt i16 %86, 0 ; <i1>:96 [#uses=1]
%.663 = select i1 %96, i16 %95, i16 %86 ; <i16> [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 1, i32 0 ; <i8*>:97 [#uses=1]
- load i8* %97, align 1 ; <i8>:98 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 1, i32 0 ; <i8*>:97 [#uses=1]
+ load i8, i8* %97, align 1 ; <i8>:98 [#uses=1]
zext i8 %98 to i32 ; <i32>:99 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 1, i32 1 ; <i8*>:100 [#uses=1]
- load i8* %100, align 1 ; <i8>:101 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 1, i32 1 ; <i8*>:100 [#uses=1]
+ load i8, i8* %100, align 1 ; <i8>:101 [#uses=1]
zext i8 %101 to i32 ; <i32>:102 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 3, i32 0 ; <i8*>:103 [#uses=1]
- load i8* %103, align 1 ; <i8>:104 [#uses=2]
+ getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 3, i32 0 ; <i8*>:103 [#uses=1]
+ load i8, i8* %103, align 1 ; <i8>:104 [#uses=2]
zext i8 %104 to i32 ; <i32>:105 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %59, i32 0, i32 0, i32 0, i32 3, i32 0 ; <i8*>:106 [#uses=1]
- load i8* %106, align 1 ; <i8>:107 [#uses=2]
+ getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %59, i32 0, i32 0, i32 0, i32 3, i32 0 ; <i8*>:106 [#uses=1]
+ load i8, i8* %106, align 1 ; <i8>:107 [#uses=2]
zext i8 %107 to i32 ; <i32>:108 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 3, i32 1 ; <i8*>:109 [#uses=1]
- load i8* %109, align 1 ; <i8>:110 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 3, i32 1 ; <i8*>:109 [#uses=1]
+ load i8, i8* %109, align 1 ; <i8>:110 [#uses=1]
zext i8 %110 to i32 ; <i32>:111 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %59, i32 0, i32 0, i32 0, i32 3, i32 1 ; <i8*>:112 [#uses=1]
- load i8* %112, align 1 ; <i8>:113 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %59, i32 0, i32 0, i32 0, i32 3, i32 1 ; <i8*>:112 [#uses=1]
+ load i8, i8* %112, align 1 ; <i8>:113 [#uses=1]
zext i8 %113 to i32 ; <i32>:114 [#uses=1]
lshr i32 %99, 4 ; <i32>:115 [#uses=1]
and i32 %115, 2 ; <i32>:116 [#uses=1]
@@ -322,13 +322,13 @@ labelContinueEdgesLoopV: ; preds = %bb206, %bb205, %bb144
%bfNZ12.2 = phi i32 [ %159, %bb205 ], [ 0, %bb144 ], [ %159, %bb206 ] ; <i32> [#uses=1]
%boundaryStrengthsV.3 = phi i8* [ %158, %bb205 ], [ %boundaryStrengthsV.1771, %bb144 ], [ %158, %bb206 ] ; <i8*> [#uses=3]
or i32 %fMacroblockHasNonZeroBS.6, %fEdgeHasNonZeroBS.0 ; <i32>:152 [#uses=2]
- load i8* %boundaryStrengthsV.3, align 1 ; <i8>:153 [#uses=1]
+ load i8, i8* %boundaryStrengthsV.3, align 1 ; <i8>:153 [#uses=1]
trunc i32 %fEdgeHasNonZeroBS.0 to i8 ; <i8>:154 [#uses=1]
shl i8 %154, 5 ; <i8>:155 [#uses=1]
xor i8 %155, 32 ; <i8>:156 [#uses=1]
or i8 %153, %156 ; <i8>:157 [#uses=1]
store i8 %157, i8* %boundaryStrengthsV.3, align 1
- getelementptr i8* %boundaryStrengthsV.3, i32 4 ; <i8*>:158 [#uses=4]
+ getelementptr i8, i8* %boundaryStrengthsV.3, i32 4 ; <i8*>:158 [#uses=4]
shl i32 %bfNZ12.2, 4 ; <i32>:159 [#uses=4]
add i32 %ixEdge.1, 1 ; <i32>:160 [#uses=6]
icmp ult i32 %160, %numEdgesToTest.2 ; <i1>:161 [#uses=1]
diff --git a/test/CodeGen/PowerPC/2008-07-15-SignExtendInreg.ll b/test/CodeGen/PowerPC/2008-07-15-SignExtendInreg.ll
index 21b0c619e111..53639e7ceb04 100644
--- a/test/CodeGen/PowerPC/2008-07-15-SignExtendInreg.ll
+++ b/test/CodeGen/PowerPC/2008-07-15-SignExtendInreg.ll
@@ -4,7 +4,7 @@ target triple = "powerpc-apple-darwin9"
define signext i16 @t(i16* %dct) nounwind {
entry:
- load i16* null, align 2 ; <i16>:0 [#uses=2]
+ load i16, i16* null, align 2 ; <i16>:0 [#uses=2]
lshr i16 %0, 11 ; <i16>:1 [#uses=0]
trunc i16 %0 to i8 ; <i8>:2 [#uses=1]
sext i8 %2 to i16 ; <i16>:3 [#uses=1]
diff --git a/test/CodeGen/PowerPC/2008-07-24-PPC64-CCBug.ll b/test/CodeGen/PowerPC/2008-07-24-PPC64-CCBug.ll
index c9c05e1cc363..ee3d0f4ea46c 100644
--- a/test/CodeGen/PowerPC/2008-07-24-PPC64-CCBug.ll
+++ b/test/CodeGen/PowerPC/2008-07-24-PPC64-CCBug.ll
@@ -4,7 +4,7 @@
define void @llvm_static_func(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, i32 %a10, i32 %a11, i32 %a12, i32 %a13, i32 %a14, i32 %a15) nounwind {
entry:
- tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i64 0), i32 %a8 ) nounwind ; <i32>:0 [#uses=0]
+ tail call i32 (i8*, ...) @printf( i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i64 0), i32 %a8 ) nounwind ; <i32>:0 [#uses=0]
ret void
}
diff --git a/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll b/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll
index 97844dd7486a..b107600de135 100644
--- a/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll
+++ b/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll
@@ -10,25 +10,25 @@
define void @lb(%struct.CGLSI* %src, i32 %n, %struct.CGLDI* %dst) nounwind {
entry:
- %0 = load i32* null, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%1 = icmp sgt i32 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %bb.nph4945, label %return
bb.nph4945: ; preds = %entry
%2 = bitcast [2 x %struct.vv_t]* null to i64* ; <i64*> [#uses=6]
- %3 = getelementptr [2 x i64]* null, i32 0, i32 1 ; <i64*> [#uses=6]
+ %3 = getelementptr [2 x i64], [2 x i64]* null, i32 0, i32 1 ; <i64*> [#uses=6]
%4 = bitcast %struct.vv_t* null to i64* ; <i64*> [#uses=5]
- %5 = getelementptr [2 x i64]* null, i32 0, i32 1 ; <i64*> [#uses=3]
+ %5 = getelementptr [2 x i64], [2 x i64]* null, i32 0, i32 1 ; <i64*> [#uses=3]
br label %bb2326
bb2217: ; preds = %bb2326
%6 = or i64 0, 0 ; <i64> [#uses=2]
%7 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
%8 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %9 = getelementptr float* null, i32 2 ; <float*> [#uses=1]
- %10 = load float* %9, align 4 ; <float> [#uses=1]
- %11 = getelementptr float* null, i32 3 ; <float*> [#uses=1]
- %12 = load float* %11, align 4 ; <float> [#uses=1]
+ %9 = getelementptr float, float* null, i32 2 ; <float*> [#uses=1]
+ %10 = load float, float* %9, align 4 ; <float> [#uses=1]
+ %11 = getelementptr float, float* null, i32 3 ; <float*> [#uses=1]
+ %12 = load float, float* %11, align 4 ; <float> [#uses=1]
%13 = fmul float %10, 6.553500e+04 ; <float> [#uses=1]
%14 = fadd float %13, 5.000000e-01 ; <float> [#uses=1]
%15 = fmul float %12, 6.553500e+04 ; <float> [#uses=1]
@@ -63,11 +63,11 @@ bb2265: ; preds = %bb2264, %bb2262, %bb2217
%34 = and i64 %33, 281470681743360 ; <i64> [#uses=1]
store i64 %6, i64* %2, align 16
store i64 %31, i64* %3, align 8
- %35 = getelementptr i8* null, i32 0 ; <i8*> [#uses=1]
+ %35 = getelementptr i8, i8* null, i32 0 ; <i8*> [#uses=1]
%36 = bitcast i8* %35 to float* ; <float*> [#uses=4]
- %37 = load float* %36, align 4 ; <float> [#uses=1]
- %38 = getelementptr float* %36, i32 1 ; <float*> [#uses=1]
- %39 = load float* %38, align 4 ; <float> [#uses=1]
+ %37 = load float, float* %36, align 4 ; <float> [#uses=1]
+ %38 = getelementptr float, float* %36, i32 1 ; <float*> [#uses=1]
+ %39 = load float, float* %38, align 4 ; <float> [#uses=1]
%40 = fmul float %37, 6.553500e+04 ; <float> [#uses=1]
%41 = fadd float %40, 5.000000e-01 ; <float> [#uses=1]
%42 = fmul float %39, 6.553500e+04 ; <float> [#uses=1]
@@ -84,10 +84,10 @@ bb2277: ; preds = %bb2274, %bb2265
%f1582.0 = phi float [ 0.000000e+00, %bb2265 ], [ %43, %bb2274 ] ; <float> [#uses=1]
%47 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
%48 = fptosi float %f1582.0 to i32 ; <i32> [#uses=1]
- %49 = getelementptr float* %36, i32 2 ; <float*> [#uses=1]
- %50 = load float* %49, align 4 ; <float> [#uses=1]
- %51 = getelementptr float* %36, i32 3 ; <float*> [#uses=1]
- %52 = load float* %51, align 4 ; <float> [#uses=1]
+ %49 = getelementptr float, float* %36, i32 2 ; <float*> [#uses=1]
+ %50 = load float, float* %49, align 4 ; <float> [#uses=1]
+ %51 = getelementptr float, float* %36, i32 3 ; <float*> [#uses=1]
+ %52 = load float, float* %51, align 4 ; <float> [#uses=1]
%53 = fmul float %50, 6.553500e+04 ; <float> [#uses=1]
%54 = fadd float %53, 5.000000e-01 ; <float> [#uses=1]
%55 = fmul float %52, 6.553500e+04 ; <float> [#uses=1]
@@ -106,11 +106,11 @@ bb2277: ; preds = %bb2274, %bb2265
%68 = or i64 %64, %62 ; <i64> [#uses=1]
%69 = or i64 %68, %66 ; <i64> [#uses=1]
%70 = or i64 %69, %67 ; <i64> [#uses=2]
- %71 = getelementptr i8* null, i32 0 ; <i8*> [#uses=1]
+ %71 = getelementptr i8, i8* null, i32 0 ; <i8*> [#uses=1]
%72 = bitcast i8* %71 to float* ; <float*> [#uses=4]
- %73 = load float* %72, align 4 ; <float> [#uses=1]
- %74 = getelementptr float* %72, i32 1 ; <float*> [#uses=1]
- %75 = load float* %74, align 4 ; <float> [#uses=1]
+ %73 = load float, float* %72, align 4 ; <float> [#uses=1]
+ %74 = getelementptr float, float* %72, i32 1 ; <float*> [#uses=1]
+ %75 = load float, float* %74, align 4 ; <float> [#uses=1]
%76 = fmul float %73, 6.553500e+04 ; <float> [#uses=1]
%77 = fadd float %76, 5.000000e-01 ; <float> [#uses=3]
%78 = fmul float %75, 6.553500e+04 ; <float> [#uses=1]
@@ -130,10 +130,10 @@ bb2295: ; preds = %bb2294, %bb2292, %bb2277
%82 = fcmp olt float %79, 0.000000e+00 ; <i1> [#uses=0]
%83 = fptosi float %f0569.0 to i32 ; <i32> [#uses=1]
%84 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %85 = getelementptr float* %72, i32 2 ; <float*> [#uses=1]
- %86 = load float* %85, align 4 ; <float> [#uses=1]
- %87 = getelementptr float* %72, i32 3 ; <float*> [#uses=1]
- %88 = load float* %87, align 4 ; <float> [#uses=1]
+ %85 = getelementptr float, float* %72, i32 2 ; <float*> [#uses=1]
+ %86 = load float, float* %85, align 4 ; <float> [#uses=1]
+ %87 = getelementptr float, float* %72, i32 3 ; <float*> [#uses=1]
+ %88 = load float, float* %87, align 4 ; <float> [#uses=1]
%89 = fmul float %86, 6.553500e+04 ; <float> [#uses=1]
%90 = fadd float %89, 5.000000e-01 ; <float> [#uses=1]
%91 = fmul float %88, 6.553500e+04 ; <float> [#uses=1]
@@ -168,62 +168,62 @@ bb2315: ; preds = %bb2295
br i1 %114, label %bb2318, label %bb2317
bb2317: ; preds = %bb2315
- %115 = load i64* %2, align 16 ; <i64> [#uses=1]
- %116 = call i32 (...)* @_u16a_cm( i64 %115, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=1]
+ %115 = load i64, i64* %2, align 16 ; <i64> [#uses=1]
+ %116 = call i32 (...) @_u16a_cm( i64 %115, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=1]
%117 = sext i32 %116 to i64 ; <i64> [#uses=1]
store i64 %117, i64* %2, align 16
- %118 = load i64* %3, align 8 ; <i64> [#uses=1]
- %119 = call i32 (...)* @_u16a_cm( i64 %118, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=1]
+ %118 = load i64, i64* %3, align 8 ; <i64> [#uses=1]
+ %119 = call i32 (...) @_u16a_cm( i64 %118, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=1]
%120 = sext i32 %119 to i64 ; <i64> [#uses=1]
store i64 %120, i64* %3, align 8
- %121 = load i64* %4, align 16 ; <i64> [#uses=1]
- %122 = call i32 (...)* @_u16a_cm( i64 %121, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=1]
+ %121 = load i64, i64* %4, align 16 ; <i64> [#uses=1]
+ %122 = call i32 (...) @_u16a_cm( i64 %121, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=1]
%123 = sext i32 %122 to i64 ; <i64> [#uses=1]
store i64 %123, i64* %4, align 16
- %124 = load i64* %5, align 8 ; <i64> [#uses=1]
- %125 = call i32 (...)* @_u16a_cm( i64 %124, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=0]
+ %124 = load i64, i64* %5, align 8 ; <i64> [#uses=1]
+ %125 = call i32 (...) @_u16a_cm( i64 %124, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=0]
unreachable
bb2318: ; preds = %bb2315
- %126 = getelementptr %struct.CGLSI* %src, i32 %indvar5021, i32 8 ; <%struct.vv_t*> [#uses=1]
+ %126 = getelementptr %struct.CGLSI, %struct.CGLSI* %src, i32 %indvar5021, i32 8 ; <%struct.vv_t*> [#uses=1]
%127 = bitcast %struct.vv_t* %126 to i64* ; <i64*> [#uses=1]
- %128 = load i64* %127, align 8 ; <i64> [#uses=1]
+ %128 = load i64, i64* %127, align 8 ; <i64> [#uses=1]
%129 = trunc i64 %128 to i32 ; <i32> [#uses=4]
- %130 = load i64* %2, align 16 ; <i64> [#uses=1]
- %131 = call i32 (...)* @_u16_ff( i64 %130, i32 %129 ) nounwind ; <i32> [#uses=1]
+ %130 = load i64, i64* %2, align 16 ; <i64> [#uses=1]
+ %131 = call i32 (...) @_u16_ff( i64 %130, i32 %129 ) nounwind ; <i32> [#uses=1]
%132 = sext i32 %131 to i64 ; <i64> [#uses=1]
store i64 %132, i64* %2, align 16
- %133 = load i64* %3, align 8 ; <i64> [#uses=1]
- %134 = call i32 (...)* @_u16_ff( i64 %133, i32 %129 ) nounwind ; <i32> [#uses=1]
+ %133 = load i64, i64* %3, align 8 ; <i64> [#uses=1]
+ %134 = call i32 (...) @_u16_ff( i64 %133, i32 %129 ) nounwind ; <i32> [#uses=1]
%135 = sext i32 %134 to i64 ; <i64> [#uses=1]
store i64 %135, i64* %3, align 8
- %136 = load i64* %4, align 16 ; <i64> [#uses=1]
- %137 = call i32 (...)* @_u16_ff( i64 %136, i32 %129 ) nounwind ; <i32> [#uses=1]
+ %136 = load i64, i64* %4, align 16 ; <i64> [#uses=1]
+ %137 = call i32 (...) @_u16_ff( i64 %136, i32 %129 ) nounwind ; <i32> [#uses=1]
%138 = sext i32 %137 to i64 ; <i64> [#uses=1]
store i64 %138, i64* %4, align 16
- %139 = load i64* %5, align 8 ; <i64> [#uses=1]
- %140 = call i32 (...)* @_u16_ff( i64 %139, i32 %129 ) nounwind ; <i32> [#uses=0]
+ %139 = load i64, i64* %5, align 8 ; <i64> [#uses=1]
+ %140 = call i32 (...) @_u16_ff( i64 %139, i32 %129 ) nounwind ; <i32> [#uses=0]
unreachable
bb2319: ; preds = %bb2326
- %141 = getelementptr %struct.CGLSI* %src, i32 %indvar5021, i32 2 ; <i8**> [#uses=1]
- %142 = load i8** %141, align 4 ; <i8*> [#uses=4]
- %143 = getelementptr i8* %142, i32 0 ; <i8*> [#uses=1]
- %144 = call i32 (...)* @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %143 ) nounwind ; <i32> [#uses=1]
+ %141 = getelementptr %struct.CGLSI, %struct.CGLSI* %src, i32 %indvar5021, i32 2 ; <i8**> [#uses=1]
+ %142 = load i8*, i8** %141, align 4 ; <i8*> [#uses=4]
+ %143 = getelementptr i8, i8* %142, i32 0 ; <i8*> [#uses=1]
+ %144 = call i32 (...) @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %143 ) nounwind ; <i32> [#uses=1]
%145 = sext i32 %144 to i64 ; <i64> [#uses=2]
- %146 = getelementptr i8* %142, i32 0 ; <i8*> [#uses=1]
- %147 = call i32 (...)* @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %146 ) nounwind ; <i32> [#uses=1]
+ %146 = getelementptr i8, i8* %142, i32 0 ; <i8*> [#uses=1]
+ %147 = call i32 (...) @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %146 ) nounwind ; <i32> [#uses=1]
%148 = sext i32 %147 to i64 ; <i64> [#uses=2]
%149 = shl i64 %145, 48 ; <i64> [#uses=0]
%150 = shl i64 %148, 32 ; <i64> [#uses=1]
%151 = and i64 %150, 281470681743360 ; <i64> [#uses=0]
store i64 %145, i64* %2, align 16
store i64 %148, i64* %3, align 8
- %152 = getelementptr i8* %142, i32 0 ; <i8*> [#uses=1]
- %153 = call i32 (...)* @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %152 ) nounwind ; <i32> [#uses=1]
+ %152 = getelementptr i8, i8* %142, i32 0 ; <i8*> [#uses=1]
+ %153 = call i32 (...) @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %152 ) nounwind ; <i32> [#uses=1]
%154 = sext i32 %153 to i64 ; <i64> [#uses=0]
- %155 = getelementptr i8* %142, i32 0 ; <i8*> [#uses=1]
- %156 = call i32 (...)* @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %155 ) nounwind ; <i32> [#uses=0]
+ %155 = getelementptr i8, i8* %142, i32 0 ; <i8*> [#uses=1]
+ %156 = call i32 (...) @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %155 ) nounwind ; <i32> [#uses=0]
unreachable
bb2325: ; preds = %bb2326, %bb2295
@@ -233,10 +233,10 @@ bb2325: ; preds = %bb2326, %bb2295
bb2326: ; preds = %bb2325, %bb.nph4945
%indvar5021 = phi i32 [ 0, %bb.nph4945 ], [ %indvar.next5145, %bb2325 ] ; <i32> [#uses=6]
%157 = icmp slt i32 %indvar5021, %n ; <i1> [#uses=0]
- %158 = getelementptr %struct.CGLSI* %src, i32 %indvar5021, i32 10 ; <%struct.xx_t**> [#uses=1]
- %159 = load %struct.xx_t** %158, align 4 ; <%struct.xx_t*> [#uses=5]
- %160 = getelementptr %struct.CGLSI* %src, i32 %indvar5021, i32 1 ; <i32*> [#uses=1]
- %161 = load i32* %160, align 4 ; <i32> [#uses=1]
+ %158 = getelementptr %struct.CGLSI, %struct.CGLSI* %src, i32 %indvar5021, i32 10 ; <%struct.xx_t**> [#uses=1]
+ %159 = load %struct.xx_t*, %struct.xx_t** %158, align 4 ; <%struct.xx_t*> [#uses=5]
+ %160 = getelementptr %struct.CGLSI, %struct.CGLSI* %src, i32 %indvar5021, i32 1 ; <i32*> [#uses=1]
+ %161 = load i32, i32* %160, align 4 ; <i32> [#uses=1]
%162 = and i32 %161, 255 ; <i32> [#uses=1]
switch i32 %162, label %bb2325 [
i32 59, label %bb2217
diff --git a/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll b/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll
index f474a6d7cc22..2372b2f0b9dd 100644
--- a/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll
+++ b/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll
@@ -2,7 +2,7 @@
define void @__divtc3({ ppc_fp128, ppc_fp128 }* noalias sret %agg.result, ppc_fp128 %a, ppc_fp128 %b, ppc_fp128 %c, ppc_fp128 %d) nounwind {
entry:
- %imag59 = load ppc_fp128* null, align 8 ; <ppc_fp128> [#uses=1]
+ %imag59 = load ppc_fp128, ppc_fp128* null, align 8 ; <ppc_fp128> [#uses=1]
%0 = fmul ppc_fp128 0xM00000000000000000000000000000000, %imag59 ; <ppc_fp128> [#uses=1]
%1 = fmul ppc_fp128 0xM00000000000000000000000000000000, 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1]
%2 = fadd ppc_fp128 %0, %1 ; <ppc_fp128> [#uses=1]
diff --git a/test/CodeGen/PowerPC/2008-10-31-PPCF128Libcalls.ll b/test/CodeGen/PowerPC/2008-10-31-PPCF128Libcalls.ll
index 8322a8430815..fbe1287776f1 100644
--- a/test/CodeGen/PowerPC/2008-10-31-PPCF128Libcalls.ll
+++ b/test/CodeGen/PowerPC/2008-10-31-PPCF128Libcalls.ll
@@ -9,17 +9,17 @@ target triple = "powerpc-apple-darwin10.0"
define void @foo() nounwind {
entry:
- %0 = load ppc_fp128* @a, align 16 ; <ppc_fp128> [#uses=1]
+ %0 = load ppc_fp128, ppc_fp128* @a, align 16 ; <ppc_fp128> [#uses=1]
%1 = call ppc_fp128 @llvm.sqrt.ppcf128(ppc_fp128 %0) ; <ppc_fp128> [#uses=1]
store ppc_fp128 %1, ppc_fp128* @a, align 16
- %2 = load ppc_fp128* @b, align 16 ; <ppc_fp128> [#uses=1]
+ %2 = load ppc_fp128, ppc_fp128* @b, align 16 ; <ppc_fp128> [#uses=1]
%3 = call ppc_fp128 @"\01_sinl$LDBL128"(ppc_fp128 %2) nounwind readonly ; <ppc_fp128> [#uses=1]
store ppc_fp128 %3, ppc_fp128* @b, align 16
- %4 = load ppc_fp128* @c, align 16 ; <ppc_fp128> [#uses=1]
+ %4 = load ppc_fp128, ppc_fp128* @c, align 16 ; <ppc_fp128> [#uses=1]
%5 = call ppc_fp128 @"\01_cosl$LDBL128"(ppc_fp128 %4) nounwind readonly ; <ppc_fp128> [#uses=1]
store ppc_fp128 %5, ppc_fp128* @c, align 16
- %6 = load ppc_fp128* @d, align 16 ; <ppc_fp128> [#uses=1]
- %7 = load ppc_fp128* @c, align 16 ; <ppc_fp128> [#uses=1]
+ %6 = load ppc_fp128, ppc_fp128* @d, align 16 ; <ppc_fp128> [#uses=1]
+ %7 = load ppc_fp128, ppc_fp128* @c, align 16 ; <ppc_fp128> [#uses=1]
%8 = call ppc_fp128 @llvm.pow.ppcf128(ppc_fp128 %6, ppc_fp128 %7) ; <ppc_fp128> [#uses=1]
store ppc_fp128 %8, ppc_fp128* @d, align 16
br label %return
diff --git a/test/CodeGen/PowerPC/2009-01-16-DeclareISelBug.ll b/test/CodeGen/PowerPC/2009-01-16-DeclareISelBug.ll
index ce8e72df2616..74356d38f8a2 100644
--- a/test/CodeGen/PowerPC/2009-01-16-DeclareISelBug.ll
+++ b/test/CodeGen/PowerPC/2009-01-16-DeclareISelBug.ll
@@ -5,7 +5,7 @@
define i32 @main() nounwind {
entry:
- %0 = call i8* @fopen(i8* getelementptr ([13 x i8]* @"\01LC", i32 0, i32 0), i8* null) nounwind ; <i8*> [#uses=0]
+ %0 = call i8* @fopen(i8* getelementptr ([13 x i8], [13 x i8]* @"\01LC", i32 0, i32 0), i8* null) nounwind ; <i8*> [#uses=0]
unreachable
}
diff --git a/test/CodeGen/PowerPC/2009-03-17-LSRBug.ll b/test/CodeGen/PowerPC/2009-03-17-LSRBug.ll
index 172531e5db49..f5a7bf8de4c3 100644
--- a/test/CodeGen/PowerPC/2009-03-17-LSRBug.ll
+++ b/test/CodeGen/PowerPC/2009-03-17-LSRBug.ll
@@ -9,7 +9,7 @@ bb20.loopexit: ; preds = %entry
ret void
bb21: ; preds = %entry
- %0 = getelementptr i8* %a, i32 0 ; <i8*> [#uses=2]
+ %0 = getelementptr i8, i8* %a, i32 0 ; <i8*> [#uses=2]
br label %bb35
bb29: ; preds = %bb35
@@ -17,7 +17,7 @@ bb29: ; preds = %bb35
bb7.i252: ; preds = %bb7.i252, %bb29
%pj.0.rec.i247 = phi i32 [ %indvar.next488, %bb7.i252 ], [ 0, %bb29 ] ; <i32> [#uses=2]
- %pi.0.i248 = getelementptr i8* %pa.1, i32 %pj.0.rec.i247 ; <i8*> [#uses=0]
+ %pi.0.i248 = getelementptr i8, i8* %pa.1, i32 %pj.0.rec.i247 ; <i8*> [#uses=0]
%indvar.next488 = add i32 %pj.0.rec.i247, 1 ; <i32> [#uses=1]
br i1 false, label %bb34, label %bb7.i252
@@ -45,7 +45,7 @@ bb7.i161: ; preds = %bb7.i161, %bb50
%pj.0.rec.i156 = phi i32 [ %indvar.next394, %bb7.i161 ], [ 0, %bb50 ] ; <i32> [#uses=2]
%.sum279 = sub i32 %pj.0.rec.i156, %min ; <i32> [#uses=1]
%pb.0.sum542 = add i32 %pb.0.rec, %.sum279 ; <i32> [#uses=1]
- %pj.0.i158 = getelementptr i8* %0, i32 %pb.0.sum542 ; <i8*> [#uses=0]
+ %pj.0.i158 = getelementptr i8, i8* %0, i32 %pb.0.sum542 ; <i8*> [#uses=0]
%indvar.next394 = add i32 %pj.0.rec.i156, 1 ; <i32> [#uses=1]
br label %bb7.i161
}
diff --git a/test/CodeGen/PowerPC/2009-08-17-inline-asm-addr-mode-breakage.ll b/test/CodeGen/PowerPC/2009-08-17-inline-asm-addr-mode-breakage.ll
index 91253daae396..289e09b2dae2 100644
--- a/test/CodeGen/PowerPC/2009-08-17-inline-asm-addr-mode-breakage.ll
+++ b/test/CodeGen/PowerPC/2009-08-17-inline-asm-addr-mode-breakage.ll
@@ -15,8 +15,8 @@ entry:
%y_addr = alloca i32 ; <i32*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store i32 %y, i32* %y_addr
- %0 = load i32* %y_addr, align 4 ; <i32> [#uses=1]
- %1 = getelementptr inbounds [0 x i32]* @x, i32 0, i32 %0 ; <i32*> [#uses=1]
+ %0 = load i32, i32* %y_addr, align 4 ; <i32> [#uses=1]
+ %1 = getelementptr inbounds [0 x i32], [0 x i32]* @x, i32 0, i32 %0 ; <i32*> [#uses=1]
call void asm sideeffect "isync\0A\09eieio\0A\09stw $1, $0", "=*o,r,~{memory}"(i32* %1, i32 0) nounwind
br label %return
diff --git a/test/CodeGen/PowerPC/2009-11-15-ProcImpDefsBug.ll b/test/CodeGen/PowerPC/2009-11-15-ProcImpDefsBug.ll
index 2d9d16ae6d83..61a9a4fbd4c0 100644
--- a/test/CodeGen/PowerPC/2009-11-15-ProcImpDefsBug.ll
+++ b/test/CodeGen/PowerPC/2009-11-15-ProcImpDefsBug.ll
@@ -100,6 +100,6 @@ bb48.3: ; preds = %bb49.2
br label %bb49.3
bb48.4: ; preds = %bb49.3
- %0 = getelementptr inbounds [5 x i64*]* undef, i32 0, i32 %c_ix.0.3 ; <i64**> [#uses=0]
+ %0 = getelementptr inbounds [5 x i64*], [5 x i64*]* undef, i32 0, i32 %c_ix.0.3 ; <i64**> [#uses=0]
br label %bb51
}
diff --git a/test/CodeGen/PowerPC/2010-02-12-saveCR.ll b/test/CodeGen/PowerPC/2010-02-12-saveCR.ll
index b0c37b80ed2f..5932b6d75c0a 100644
--- a/test/CodeGen/PowerPC/2010-02-12-saveCR.ll
+++ b/test/CodeGen/PowerPC/2010-02-12-saveCR.ll
@@ -9,13 +9,13 @@ entry:
; CHECK: mfcr [[T1:r[0-9]+]] ; cr2
; CHECK: lis [[T2:r[0-9]+]], 1
; CHECK: addi r3, r1, 72
-; CHECK: rlwinm [[T1]], [[T1]], 8, 0, 31
+; CHECK: rotlwi [[T1]], [[T1]], 8
; CHECK: ori [[T2]], [[T2]], 34540
; CHECK: stwx [[T1]], r1, [[T2]]
; CHECK: lis [[T3:r[0-9]+]], 1
; CHECK: mfcr [[T4:r[0-9]+]] ; cr3
; CHECK: ori [[T3]], [[T3]], 34536
-; CHECK: rlwinm [[T4]], [[T4]], 12, 0, 31
+; CHECK: rotlwi [[T4]], [[T4]], 12
; CHECK: stwx [[T4]], r1, [[T3]]
%x = alloca [100000 x i8] ; <[100000 x i8]*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
@@ -28,12 +28,12 @@ return: ; preds = %entry
; CHECK: lis [[T1:r[0-9]+]], 1
; CHECK: ori [[T1]], [[T1]], 34536
; CHECK: lwzx [[T1]], r1, [[T1]]
-; CHECK: rlwinm [[T1]], [[T1]], 20, 0, 31
+; CHECK: rotlwi [[T1]], [[T1]], 20
; CHECK: mtcrf 16, [[T1]]
; CHECK: lis [[T1]], 1
; CHECK: ori [[T1]], [[T1]], 34540
; CHECK: lwzx [[T1]], r1, [[T1]]
-; CHECK: rlwinm [[T1]], [[T1]], 24, 0, 31
+; CHECK: rotlwi [[T1]], [[T1]], 24
; CHECK: mtcrf 32, [[T1]]
ret void
}
diff --git a/test/CodeGen/PowerPC/2010-03-09-indirect-call.ll b/test/CodeGen/PowerPC/2010-03-09-indirect-call.ll
index b95ac6880758..0599b74a69f5 100644
--- a/test/CodeGen/PowerPC/2010-03-09-indirect-call.ll
+++ b/test/CodeGen/PowerPC/2010-03-09-indirect-call.ll
@@ -11,8 +11,8 @@ define void @foo() nounwind ssp {
entry:
; CHECK: mtctr r12
; CHECK: bctrl
- %0 = load void (...)** @p, align 4 ; <void (...)*> [#uses=1]
- call void (...)* %0() nounwind
+ %0 = load void (...)*, void (...)** @p, align 4 ; <void (...)*> [#uses=1]
+ call void (...) %0() nounwind
br label %return
return: ; preds = %entry
diff --git a/test/CodeGen/PowerPC/2010-12-18-PPCStackRefs.ll b/test/CodeGen/PowerPC/2010-12-18-PPCStackRefs.ll
index a25ce07e83bf..1f320a84a4e6 100644
--- a/test/CodeGen/PowerPC/2010-12-18-PPCStackRefs.ll
+++ b/test/CodeGen/PowerPC/2010-12-18-PPCStackRefs.ll
@@ -12,11 +12,11 @@ entry:
%0 = alloca i32
%"alloca point" = bitcast i32 0 to i32
store i32 0, i32* %0, align 4
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
store i32 %1, i32* %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load i32* %retval
+ %retval1 = load i32, i32* %retval
ret i32 %retval1
}
diff --git a/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll b/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll
index e7bc5bfa37ec..e5920911ee2f 100644
--- a/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll
+++ b/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll
@@ -28,7 +28,7 @@ declare void @check(i32 %name) nounwind
define i32 @s122(i32 %n1, i32 %n3) nounwind {
entry:
- %call = tail call i32 @init(i8* getelementptr inbounds ([6 x i8]* @.str11, i64 0, i64 0))
+ %call = tail call i32 @init(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str11, i64 0, i64 0))
%call1 = tail call i64 @clock() nounwind
%sub = add nsw i32 %n1, -1
%cmp316 = icmp slt i32 %sub, 32000
@@ -46,10 +46,10 @@ for.body4.us: ; preds = %for.body4.lr.ph.us,
%sub5.us = sub i64 31999, %indvars.iv20
%sext = shl i64 %sub5.us, 32
%idxprom.us = ashr exact i64 %sext, 32
- %arrayidx.us = getelementptr inbounds [32000 x float]* @b, i64 0, i64 %idxprom.us
- %2 = load float* %arrayidx.us, align 4
- %arrayidx7.us = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv
- %3 = load float* %arrayidx7.us, align 4
+ %arrayidx.us = getelementptr inbounds [32000 x float], [32000 x float]* @b, i64 0, i64 %idxprom.us
+ %2 = load float, float* %arrayidx.us, align 4
+ %arrayidx7.us = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv
+ %3 = load float, float* %arrayidx7.us, align 4
%add8.us = fadd float %3, %2
store float %add8.us, float* %arrayidx7.us, align 4
%indvars.iv.next = add i64 %indvars.iv, %1
@@ -66,12 +66,12 @@ for.end12: ; preds = %for.end.7, %for.end
%sub14 = sub nsw i64 %call13, %call1
%conv = sitofp i64 %sub14 to double
%div = fdiv double %conv, 1.000000e+06
- %call15 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([14 x i8]* @.str152, i64 0, i64 0), double %div) nounwind
+ %call15 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str152, i64 0, i64 0), double %div) nounwind
tail call void @check(i32 1)
ret i32 0
for.body4.lr.ph.us.1: ; preds = %for.body4.us
- %call10.us = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.us = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
br label %for.body4.us.1
for.body4.us.1: ; preds = %for.body4.us.1, %for.body4.lr.ph.us.1
@@ -81,10 +81,10 @@ for.body4.us.1: ; preds = %for.body4.us.1, %fo
%sub5.us.1 = sub i64 31999, %indvars.iv20.1
%sext23 = shl i64 %sub5.us.1, 32
%idxprom.us.1 = ashr exact i64 %sext23, 32
- %arrayidx.us.1 = getelementptr inbounds [32000 x float]* @b, i64 0, i64 %idxprom.us.1
- %5 = load float* %arrayidx.us.1, align 4
- %arrayidx7.us.1 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv.1
- %6 = load float* %arrayidx7.us.1, align 4
+ %arrayidx.us.1 = getelementptr inbounds [32000 x float], [32000 x float]* @b, i64 0, i64 %idxprom.us.1
+ %5 = load float, float* %arrayidx.us.1, align 4
+ %arrayidx7.us.1 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.1
+ %6 = load float, float* %arrayidx7.us.1, align 4
%add8.us.1 = fadd float %6, %5
store float %add8.us.1, float* %arrayidx7.us.1, align 4
%indvars.iv.next.1 = add i64 %indvars.iv.1, %1
@@ -93,7 +93,7 @@ for.body4.us.1: ; preds = %for.body4.us.1, %fo
br i1 %cmp3.us.1, label %for.body4.us.1, label %for.body4.lr.ph.us.2
for.body4.lr.ph.us.2: ; preds = %for.body4.us.1
- %call10.us.1 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.us.1 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
br label %for.body4.us.2
for.body4.us.2: ; preds = %for.body4.us.2, %for.body4.lr.ph.us.2
@@ -103,10 +103,10 @@ for.body4.us.2: ; preds = %for.body4.us.2, %fo
%sub5.us.2 = sub i64 31999, %indvars.iv20.2
%sext24 = shl i64 %sub5.us.2, 32
%idxprom.us.2 = ashr exact i64 %sext24, 32
- %arrayidx.us.2 = getelementptr inbounds [32000 x float]* @b, i64 0, i64 %idxprom.us.2
- %8 = load float* %arrayidx.us.2, align 4
- %arrayidx7.us.2 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv.2
- %9 = load float* %arrayidx7.us.2, align 4
+ %arrayidx.us.2 = getelementptr inbounds [32000 x float], [32000 x float]* @b, i64 0, i64 %idxprom.us.2
+ %8 = load float, float* %arrayidx.us.2, align 4
+ %arrayidx7.us.2 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.2
+ %9 = load float, float* %arrayidx7.us.2, align 4
%add8.us.2 = fadd float %9, %8
store float %add8.us.2, float* %arrayidx7.us.2, align 4
%indvars.iv.next.2 = add i64 %indvars.iv.2, %1
@@ -115,7 +115,7 @@ for.body4.us.2: ; preds = %for.body4.us.2, %fo
br i1 %cmp3.us.2, label %for.body4.us.2, label %for.body4.lr.ph.us.3
for.body4.lr.ph.us.3: ; preds = %for.body4.us.2
- %call10.us.2 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.us.2 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
br label %for.body4.us.3
for.body4.us.3: ; preds = %for.body4.us.3, %for.body4.lr.ph.us.3
@@ -125,10 +125,10 @@ for.body4.us.3: ; preds = %for.body4.us.3, %fo
%sub5.us.3 = sub i64 31999, %indvars.iv20.3
%sext25 = shl i64 %sub5.us.3, 32
%idxprom.us.3 = ashr exact i64 %sext25, 32
- %arrayidx.us.3 = getelementptr inbounds [32000 x float]* @b, i64 0, i64 %idxprom.us.3
- %11 = load float* %arrayidx.us.3, align 4
- %arrayidx7.us.3 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv.3
- %12 = load float* %arrayidx7.us.3, align 4
+ %arrayidx.us.3 = getelementptr inbounds [32000 x float], [32000 x float]* @b, i64 0, i64 %idxprom.us.3
+ %11 = load float, float* %arrayidx.us.3, align 4
+ %arrayidx7.us.3 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.3
+ %12 = load float, float* %arrayidx7.us.3, align 4
%add8.us.3 = fadd float %12, %11
store float %add8.us.3, float* %arrayidx7.us.3, align 4
%indvars.iv.next.3 = add i64 %indvars.iv.3, %1
@@ -137,7 +137,7 @@ for.body4.us.3: ; preds = %for.body4.us.3, %fo
br i1 %cmp3.us.3, label %for.body4.us.3, label %for.body4.lr.ph.us.4
for.body4.lr.ph.us.4: ; preds = %for.body4.us.3
- %call10.us.3 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.us.3 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
br label %for.body4.us.4
for.body4.us.4: ; preds = %for.body4.us.4, %for.body4.lr.ph.us.4
@@ -147,10 +147,10 @@ for.body4.us.4: ; preds = %for.body4.us.4, %fo
%sub5.us.4 = sub i64 31999, %indvars.iv20.4
%sext26 = shl i64 %sub5.us.4, 32
%idxprom.us.4 = ashr exact i64 %sext26, 32
- %arrayidx.us.4 = getelementptr inbounds [32000 x float]* @b, i64 0, i64 %idxprom.us.4
- %14 = load float* %arrayidx.us.4, align 4
- %arrayidx7.us.4 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv.4
- %15 = load float* %arrayidx7.us.4, align 4
+ %arrayidx.us.4 = getelementptr inbounds [32000 x float], [32000 x float]* @b, i64 0, i64 %idxprom.us.4
+ %14 = load float, float* %arrayidx.us.4, align 4
+ %arrayidx7.us.4 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.4
+ %15 = load float, float* %arrayidx7.us.4, align 4
%add8.us.4 = fadd float %15, %14
store float %add8.us.4, float* %arrayidx7.us.4, align 4
%indvars.iv.next.4 = add i64 %indvars.iv.4, %1
@@ -159,21 +159,21 @@ for.body4.us.4: ; preds = %for.body4.us.4, %fo
br i1 %cmp3.us.4, label %for.body4.us.4, label %for.end.us.4
for.end.us.4: ; preds = %for.body4.us.4
- %call10.us.4 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.us.4 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
%inc.us.4 = add nsw i32 %nl.019.us, 5
%exitcond.4 = icmp eq i32 %inc.us.4, 200000
br i1 %exitcond.4, label %for.end12, label %for.body4.lr.ph.us
for.end.7: ; preds = %entry, %for.end.7
%nl.019 = phi i32 [ %inc.7, %for.end.7 ], [ 0, %entry ]
- %call10 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
- %call10.1 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
- %call10.2 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
- %call10.3 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
- %call10.4 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
- %call10.5 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
- %call10.6 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
- %call10.7 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.1 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.2 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.3 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.4 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.5 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.6 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.7 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
%inc.7 = add nsw i32 %nl.019, 8
%exitcond.7 = icmp eq i32 %inc.7, 200000
br i1 %exitcond.7, label %for.end12, label %for.end.7
diff --git a/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll b/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll
index a6223d41cc3f..93476827949f 100644
--- a/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll
+++ b/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll
@@ -29,13 +29,13 @@ declare void @check(i32 %name) nounwind
define i32 @s3110() nounwind {
entry:
- %call = tail call i32 @init(i8* getelementptr inbounds ([6 x i8]* @.str81, i64 0, i64 0))
+ %call = tail call i32 @init(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str81, i64 0, i64 0))
%call1 = tail call i64 @clock() nounwind
br label %for.body
for.body: ; preds = %for.end17, %entry
%nl.041 = phi i32 [ 0, %entry ], [ %inc22, %for.end17 ]
- %0 = load float* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0, i64 0), align 16
+ %0 = load float, float* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0, i64 0), align 16
br label %for.cond5.preheader
for.cond5.preheader: ; preds = %for.inc15, %for.body
@@ -50,8 +50,8 @@ for.body7: ; preds = %for.body7, %for.con
%max.235 = phi float [ %max.139, %for.cond5.preheader ], [ %max.3.15, %for.body7 ]
%xindex.234 = phi i32 [ %xindex.138, %for.cond5.preheader ], [ %xindex.3.15, %for.body7 ]
%yindex.233 = phi i32 [ %yindex.137, %for.cond5.preheader ], [ %yindex.3.15, %for.body7 ]
- %arrayidx9 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv
- %1 = load float* %arrayidx9, align 16
+ %arrayidx9 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv
+ %1 = load float, float* %arrayidx9, align 16
%cmp10 = fcmp ogt float %1, %max.235
%2 = trunc i64 %indvars.iv to i32
%yindex.3 = select i1 %cmp10, i32 %2, i32 %yindex.233
@@ -59,120 +59,120 @@ for.body7: ; preds = %for.body7, %for.con
%xindex.3 = select i1 %cmp10, i32 %3, i32 %xindex.234
%max.3 = select i1 %cmp10, float %1, float %max.235
%indvars.iv.next45 = or i64 %indvars.iv, 1
- %arrayidx9.1 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next45
- %4 = load float* %arrayidx9.1, align 4
+ %arrayidx9.1 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next45
+ %4 = load float, float* %arrayidx9.1, align 4
%cmp10.1 = fcmp ogt float %4, %max.3
%5 = trunc i64 %indvars.iv.next45 to i32
%yindex.3.1 = select i1 %cmp10.1, i32 %5, i32 %yindex.3
%xindex.3.1 = select i1 %cmp10.1, i32 %3, i32 %xindex.3
%max.3.1 = select i1 %cmp10.1, float %4, float %max.3
%indvars.iv.next.146 = or i64 %indvars.iv, 2
- %arrayidx9.2 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.146
- %6 = load float* %arrayidx9.2, align 8
+ %arrayidx9.2 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.146
+ %6 = load float, float* %arrayidx9.2, align 8
%cmp10.2 = fcmp ogt float %6, %max.3.1
%7 = trunc i64 %indvars.iv.next.146 to i32
%yindex.3.2 = select i1 %cmp10.2, i32 %7, i32 %yindex.3.1
%xindex.3.2 = select i1 %cmp10.2, i32 %3, i32 %xindex.3.1
%max.3.2 = select i1 %cmp10.2, float %6, float %max.3.1
%indvars.iv.next.247 = or i64 %indvars.iv, 3
- %arrayidx9.3 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.247
- %8 = load float* %arrayidx9.3, align 4
+ %arrayidx9.3 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.247
+ %8 = load float, float* %arrayidx9.3, align 4
%cmp10.3 = fcmp ogt float %8, %max.3.2
%9 = trunc i64 %indvars.iv.next.247 to i32
%yindex.3.3 = select i1 %cmp10.3, i32 %9, i32 %yindex.3.2
%xindex.3.3 = select i1 %cmp10.3, i32 %3, i32 %xindex.3.2
%max.3.3 = select i1 %cmp10.3, float %8, float %max.3.2
%indvars.iv.next.348 = or i64 %indvars.iv, 4
- %arrayidx9.4 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.348
- %10 = load float* %arrayidx9.4, align 16
+ %arrayidx9.4 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.348
+ %10 = load float, float* %arrayidx9.4, align 16
%cmp10.4 = fcmp ogt float %10, %max.3.3
%11 = trunc i64 %indvars.iv.next.348 to i32
%yindex.3.4 = select i1 %cmp10.4, i32 %11, i32 %yindex.3.3
%xindex.3.4 = select i1 %cmp10.4, i32 %3, i32 %xindex.3.3
%max.3.4 = select i1 %cmp10.4, float %10, float %max.3.3
%indvars.iv.next.449 = or i64 %indvars.iv, 5
- %arrayidx9.5 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.449
- %12 = load float* %arrayidx9.5, align 4
+ %arrayidx9.5 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.449
+ %12 = load float, float* %arrayidx9.5, align 4
%cmp10.5 = fcmp ogt float %12, %max.3.4
%13 = trunc i64 %indvars.iv.next.449 to i32
%yindex.3.5 = select i1 %cmp10.5, i32 %13, i32 %yindex.3.4
%xindex.3.5 = select i1 %cmp10.5, i32 %3, i32 %xindex.3.4
%max.3.5 = select i1 %cmp10.5, float %12, float %max.3.4
%indvars.iv.next.550 = or i64 %indvars.iv, 6
- %arrayidx9.6 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.550
- %14 = load float* %arrayidx9.6, align 8
+ %arrayidx9.6 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.550
+ %14 = load float, float* %arrayidx9.6, align 8
%cmp10.6 = fcmp ogt float %14, %max.3.5
%15 = trunc i64 %indvars.iv.next.550 to i32
%yindex.3.6 = select i1 %cmp10.6, i32 %15, i32 %yindex.3.5
%xindex.3.6 = select i1 %cmp10.6, i32 %3, i32 %xindex.3.5
%max.3.6 = select i1 %cmp10.6, float %14, float %max.3.5
%indvars.iv.next.651 = or i64 %indvars.iv, 7
- %arrayidx9.7 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.651
- %16 = load float* %arrayidx9.7, align 4
+ %arrayidx9.7 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.651
+ %16 = load float, float* %arrayidx9.7, align 4
%cmp10.7 = fcmp ogt float %16, %max.3.6
%17 = trunc i64 %indvars.iv.next.651 to i32
%yindex.3.7 = select i1 %cmp10.7, i32 %17, i32 %yindex.3.6
%xindex.3.7 = select i1 %cmp10.7, i32 %3, i32 %xindex.3.6
%max.3.7 = select i1 %cmp10.7, float %16, float %max.3.6
%indvars.iv.next.752 = or i64 %indvars.iv, 8
- %arrayidx9.8 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.752
- %18 = load float* %arrayidx9.8, align 16
+ %arrayidx9.8 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.752
+ %18 = load float, float* %arrayidx9.8, align 16
%cmp10.8 = fcmp ogt float %18, %max.3.7
%19 = trunc i64 %indvars.iv.next.752 to i32
%yindex.3.8 = select i1 %cmp10.8, i32 %19, i32 %yindex.3.7
%xindex.3.8 = select i1 %cmp10.8, i32 %3, i32 %xindex.3.7
%max.3.8 = select i1 %cmp10.8, float %18, float %max.3.7
%indvars.iv.next.853 = or i64 %indvars.iv, 9
- %arrayidx9.9 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.853
- %20 = load float* %arrayidx9.9, align 4
+ %arrayidx9.9 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.853
+ %20 = load float, float* %arrayidx9.9, align 4
%cmp10.9 = fcmp ogt float %20, %max.3.8
%21 = trunc i64 %indvars.iv.next.853 to i32
%yindex.3.9 = select i1 %cmp10.9, i32 %21, i32 %yindex.3.8
%xindex.3.9 = select i1 %cmp10.9, i32 %3, i32 %xindex.3.8
%max.3.9 = select i1 %cmp10.9, float %20, float %max.3.8
%indvars.iv.next.954 = or i64 %indvars.iv, 10
- %arrayidx9.10 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.954
- %22 = load float* %arrayidx9.10, align 8
+ %arrayidx9.10 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.954
+ %22 = load float, float* %arrayidx9.10, align 8
%cmp10.10 = fcmp ogt float %22, %max.3.9
%23 = trunc i64 %indvars.iv.next.954 to i32
%yindex.3.10 = select i1 %cmp10.10, i32 %23, i32 %yindex.3.9
%xindex.3.10 = select i1 %cmp10.10, i32 %3, i32 %xindex.3.9
%max.3.10 = select i1 %cmp10.10, float %22, float %max.3.9
%indvars.iv.next.1055 = or i64 %indvars.iv, 11
- %arrayidx9.11 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1055
- %24 = load float* %arrayidx9.11, align 4
+ %arrayidx9.11 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1055
+ %24 = load float, float* %arrayidx9.11, align 4
%cmp10.11 = fcmp ogt float %24, %max.3.10
%25 = trunc i64 %indvars.iv.next.1055 to i32
%yindex.3.11 = select i1 %cmp10.11, i32 %25, i32 %yindex.3.10
%xindex.3.11 = select i1 %cmp10.11, i32 %3, i32 %xindex.3.10
%max.3.11 = select i1 %cmp10.11, float %24, float %max.3.10
%indvars.iv.next.1156 = or i64 %indvars.iv, 12
- %arrayidx9.12 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1156
- %26 = load float* %arrayidx9.12, align 16
+ %arrayidx9.12 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1156
+ %26 = load float, float* %arrayidx9.12, align 16
%cmp10.12 = fcmp ogt float %26, %max.3.11
%27 = trunc i64 %indvars.iv.next.1156 to i32
%yindex.3.12 = select i1 %cmp10.12, i32 %27, i32 %yindex.3.11
%xindex.3.12 = select i1 %cmp10.12, i32 %3, i32 %xindex.3.11
%max.3.12 = select i1 %cmp10.12, float %26, float %max.3.11
%indvars.iv.next.1257 = or i64 %indvars.iv, 13
- %arrayidx9.13 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1257
- %28 = load float* %arrayidx9.13, align 4
+ %arrayidx9.13 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1257
+ %28 = load float, float* %arrayidx9.13, align 4
%cmp10.13 = fcmp ogt float %28, %max.3.12
%29 = trunc i64 %indvars.iv.next.1257 to i32
%yindex.3.13 = select i1 %cmp10.13, i32 %29, i32 %yindex.3.12
%xindex.3.13 = select i1 %cmp10.13, i32 %3, i32 %xindex.3.12
%max.3.13 = select i1 %cmp10.13, float %28, float %max.3.12
%indvars.iv.next.1358 = or i64 %indvars.iv, 14
- %arrayidx9.14 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1358
- %30 = load float* %arrayidx9.14, align 8
+ %arrayidx9.14 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1358
+ %30 = load float, float* %arrayidx9.14, align 8
%cmp10.14 = fcmp ogt float %30, %max.3.13
%31 = trunc i64 %indvars.iv.next.1358 to i32
%yindex.3.14 = select i1 %cmp10.14, i32 %31, i32 %yindex.3.13
%xindex.3.14 = select i1 %cmp10.14, i32 %3, i32 %xindex.3.13
%max.3.14 = select i1 %cmp10.14, float %30, float %max.3.13
%indvars.iv.next.1459 = or i64 %indvars.iv, 15
- %arrayidx9.15 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1459
- %32 = load float* %arrayidx9.15, align 4
+ %arrayidx9.15 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1459
+ %32 = load float, float* %arrayidx9.15, align 4
%cmp10.15 = fcmp ogt float %32, %max.3.14
%33 = trunc i64 %indvars.iv.next.1459 to i32
%yindex.3.15 = select i1 %cmp10.15, i32 %33, i32 %yindex.3.14
@@ -194,7 +194,7 @@ for.end17: ; preds = %for.inc15
%add = fadd float %max.3.15, %conv
%conv18 = sitofp i32 %yindex.3.15 to float
%add19 = fadd float %add, %conv18
- %call20 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]]* @cc, i64 0, i64 0), float %add19) nounwind
+ %call20 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float %add19) nounwind
%inc22 = add nsw i32 %nl.041, 1
%exitcond44 = icmp eq i32 %inc22, 78100
br i1 %exitcond44, label %for.end23, label %for.body
@@ -204,7 +204,7 @@ for.end23: ; preds = %for.end17
%sub = sub nsw i64 %call24, %call1
%conv25 = sitofp i64 %sub to double
%div = fdiv double %conv25, 1.000000e+06
- %call26 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([15 x i8]* @.str235, i64 0, i64 0), double %div) nounwind
+ %call26 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str235, i64 0, i64 0), double %div) nounwind
%add29 = fadd float %add, 1.000000e+00
%add31 = fadd float %add29, %conv18
%add32 = fadd float %add31, 1.000000e+00
diff --git a/test/CodeGen/PowerPC/2011-12-08-DemandedBitsMiscompile.ll b/test/CodeGen/PowerPC/2011-12-08-DemandedBitsMiscompile.ll
index b1cbb36fe041..05390cf8b92e 100644
--- a/test/CodeGen/PowerPC/2011-12-08-DemandedBitsMiscompile.ll
+++ b/test/CodeGen/PowerPC/2011-12-08-DemandedBitsMiscompile.ll
@@ -2,7 +2,7 @@
define void @test(i32* nocapture %x, i64* %xx, i32* %yp) nounwind uwtable ssp {
entry:
- %yy = load i32* %yp
+ %yy = load i32, i32* %yp
%y = add i32 %yy, 1
%z = zext i32 %y to i64
%z2 = shl i64 %z, 32
diff --git a/test/CodeGen/PowerPC/2012-09-16-TOC-entry-check.ll b/test/CodeGen/PowerPC/2012-09-16-TOC-entry-check.ll
index 5bff58f2bbf5..1d45c2e73455 100644
--- a/test/CodeGen/PowerPC/2012-09-16-TOC-entry-check.ll
+++ b/test/CodeGen/PowerPC/2012-09-16-TOC-entry-check.ll
@@ -19,9 +19,9 @@ define i32 @foo(double %X, double %Y) nounwind readnone {
}
; Check the creation of 2 .tc entries for both double constants. They
-; should be .LC1 and .LC3 to avoid name clash with global constants
-; .LC0 and .LC2
-; CHECK: .LC{{[13]}}:
+; avoid name clash with global constants .LC0 and .LC2
+; CHECK: .section .toc,"aw",@progbits
+; CHECK: .LC{{.*}}:
; CHECK-NEXT: .tc {{[\._a-zA-Z0-9]+}}[TC],{{[\._a-zA-Z0-9]+}}
-; CHECK: .LC{{[13]}}:
+; CHECK: .LC{{.*}}:
; CHECK-NEXT: .tc {{[\._a-zA-Z0-9]+}}[TC],{{[\._a-zA-Z0-9]+}}
diff --git a/test/CodeGen/PowerPC/2013-05-15-preinc-fold.ll b/test/CodeGen/PowerPC/2013-05-15-preinc-fold.ll
index 542a766300ef..9fe88a85d15f 100644
--- a/test/CodeGen/PowerPC/2013-05-15-preinc-fold.ll
+++ b/test/CodeGen/PowerPC/2013-05-15-preinc-fold.ll
@@ -5,9 +5,9 @@ target triple = "powerpc64-unknown-linux-gnu"
define i8* @test(i8* %base, i8 %val) {
entry:
- %arrayidx = getelementptr inbounds i8* %base, i32 -1
+ %arrayidx = getelementptr inbounds i8, i8* %base, i32 -1
store i8 %val, i8* %arrayidx, align 1
- %arrayidx2 = getelementptr inbounds i8* %base, i32 1
+ %arrayidx2 = getelementptr inbounds i8, i8* %base, i32 1
store i8 %val, i8* %arrayidx2, align 1
ret i8* %arrayidx
}
@@ -19,9 +19,9 @@ entry:
define i64* @test64(i64* %base, i64 %val) {
entry:
- %arrayidx = getelementptr inbounds i64* %base, i32 -1
+ %arrayidx = getelementptr inbounds i64, i64* %base, i32 -1
store i64 %val, i64* %arrayidx, align 8
- %arrayidx2 = getelementptr inbounds i64* %base, i32 1
+ %arrayidx2 = getelementptr inbounds i64, i64* %base, i32 1
store i64 %val, i64* %arrayidx2, align 8
ret i64* %arrayidx
}
diff --git a/test/CodeGen/PowerPC/2013-07-01-PHIElimBug.ll b/test/CodeGen/PowerPC/2013-07-01-PHIElimBug.ll
index 9bf25c8ffe49..3c6f3ff32454 100644
--- a/test/CodeGen/PowerPC/2013-07-01-PHIElimBug.ll
+++ b/test/CodeGen/PowerPC/2013-07-01-PHIElimBug.ll
@@ -10,17 +10,17 @@ target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
define fastcc void @func_7() #0 {
entry:
- %arrayidx638 = getelementptr inbounds [3 x [1 x i32]]* undef, i64 0, i64 1, i64 0
+ %arrayidx638 = getelementptr inbounds [3 x [1 x i32]], [3 x [1 x i32]]* undef, i64 0, i64 1, i64 0
br i1 undef, label %for.cond940, label %if.end1018
for.cond940: ; preds = %for.cond940, %if.else876
- %l_655.1 = phi i32* [ getelementptr inbounds ([8 x i32]* @g_51, i64 0, i64 6), %entry ], [ %l_654.0, %for.cond940 ]
+ %l_655.1 = phi i32* [ getelementptr inbounds ([8 x i32], [8 x i32]* @g_51, i64 0, i64 6), %entry ], [ %l_654.0, %for.cond940 ]
%l_654.0 = phi i32* [ null, %entry ], [ %arrayidx638, %for.cond940 ]
%exitcond = icmp eq i32 undef, 20
br i1 %exitcond, label %if.end1018, label %for.cond940
if.end1018: ; preds = %for.end957, %for.end834
- %l_655.3.ph33 = phi i32* [ %l_655.1, %for.cond940 ], [ getelementptr inbounds ([8 x i32]* @g_51, i64 0, i64 6), %entry ]
+ %l_655.3.ph33 = phi i32* [ %l_655.1, %for.cond940 ], [ getelementptr inbounds ([8 x i32], [8 x i32]* @g_51, i64 0, i64 6), %entry ]
store i32 0, i32* %l_655.3.ph33, align 4
ret void
}
diff --git a/test/CodeGen/PowerPC/Atomics-64.ll b/test/CodeGen/PowerPC/Atomics-64.ll
index 122b54e080ac..77066de25e78 100644
--- a/test/CodeGen/PowerPC/Atomics-64.ll
+++ b/test/CodeGen/PowerPC/Atomics-64.ll
@@ -254,272 +254,272 @@ return: ; preds = %entry
define void @test_op_and_fetch() nounwind {
entry:
- %0 = load i8* @uc, align 1
+ %0 = load i8, i8* @uc, align 1
%1 = atomicrmw add i8* @sc, i8 %0 monotonic
%2 = add i8 %1, %0
store i8 %2, i8* @sc, align 1
- %3 = load i8* @uc, align 1
+ %3 = load i8, i8* @uc, align 1
%4 = atomicrmw add i8* @uc, i8 %3 monotonic
%5 = add i8 %4, %3
store i8 %5, i8* @uc, align 1
- %6 = load i8* @uc, align 1
+ %6 = load i8, i8* @uc, align 1
%7 = zext i8 %6 to i16
%8 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%9 = atomicrmw add i16* %8, i16 %7 monotonic
%10 = add i16 %9, %7
store i16 %10, i16* @ss, align 2
- %11 = load i8* @uc, align 1
+ %11 = load i8, i8* @uc, align 1
%12 = zext i8 %11 to i16
%13 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%14 = atomicrmw add i16* %13, i16 %12 monotonic
%15 = add i16 %14, %12
store i16 %15, i16* @us, align 2
- %16 = load i8* @uc, align 1
+ %16 = load i8, i8* @uc, align 1
%17 = zext i8 %16 to i32
%18 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%19 = atomicrmw add i32* %18, i32 %17 monotonic
%20 = add i32 %19, %17
store i32 %20, i32* @si, align 4
- %21 = load i8* @uc, align 1
+ %21 = load i8, i8* @uc, align 1
%22 = zext i8 %21 to i32
%23 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%24 = atomicrmw add i32* %23, i32 %22 monotonic
%25 = add i32 %24, %22
store i32 %25, i32* @ui, align 4
- %26 = load i8* @uc, align 1
+ %26 = load i8, i8* @uc, align 1
%27 = zext i8 %26 to i64
%28 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%29 = atomicrmw add i64* %28, i64 %27 monotonic
%30 = add i64 %29, %27
store i64 %30, i64* @sl, align 8
- %31 = load i8* @uc, align 1
+ %31 = load i8, i8* @uc, align 1
%32 = zext i8 %31 to i64
%33 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%34 = atomicrmw add i64* %33, i64 %32 monotonic
%35 = add i64 %34, %32
store i64 %35, i64* @ul, align 8
- %36 = load i8* @uc, align 1
+ %36 = load i8, i8* @uc, align 1
%37 = atomicrmw sub i8* @sc, i8 %36 monotonic
%38 = sub i8 %37, %36
store i8 %38, i8* @sc, align 1
- %39 = load i8* @uc, align 1
+ %39 = load i8, i8* @uc, align 1
%40 = atomicrmw sub i8* @uc, i8 %39 monotonic
%41 = sub i8 %40, %39
store i8 %41, i8* @uc, align 1
- %42 = load i8* @uc, align 1
+ %42 = load i8, i8* @uc, align 1
%43 = zext i8 %42 to i16
%44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%45 = atomicrmw sub i16* %44, i16 %43 monotonic
%46 = sub i16 %45, %43
store i16 %46, i16* @ss, align 2
- %47 = load i8* @uc, align 1
+ %47 = load i8, i8* @uc, align 1
%48 = zext i8 %47 to i16
%49 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%50 = atomicrmw sub i16* %49, i16 %48 monotonic
%51 = sub i16 %50, %48
store i16 %51, i16* @us, align 2
- %52 = load i8* @uc, align 1
+ %52 = load i8, i8* @uc, align 1
%53 = zext i8 %52 to i32
%54 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%55 = atomicrmw sub i32* %54, i32 %53 monotonic
%56 = sub i32 %55, %53
store i32 %56, i32* @si, align 4
- %57 = load i8* @uc, align 1
+ %57 = load i8, i8* @uc, align 1
%58 = zext i8 %57 to i32
%59 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%60 = atomicrmw sub i32* %59, i32 %58 monotonic
%61 = sub i32 %60, %58
store i32 %61, i32* @ui, align 4
- %62 = load i8* @uc, align 1
+ %62 = load i8, i8* @uc, align 1
%63 = zext i8 %62 to i64
%64 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%65 = atomicrmw sub i64* %64, i64 %63 monotonic
%66 = sub i64 %65, %63
store i64 %66, i64* @sl, align 8
- %67 = load i8* @uc, align 1
+ %67 = load i8, i8* @uc, align 1
%68 = zext i8 %67 to i64
%69 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%70 = atomicrmw sub i64* %69, i64 %68 monotonic
%71 = sub i64 %70, %68
store i64 %71, i64* @ul, align 8
- %72 = load i8* @uc, align 1
+ %72 = load i8, i8* @uc, align 1
%73 = atomicrmw or i8* @sc, i8 %72 monotonic
%74 = or i8 %73, %72
store i8 %74, i8* @sc, align 1
- %75 = load i8* @uc, align 1
+ %75 = load i8, i8* @uc, align 1
%76 = atomicrmw or i8* @uc, i8 %75 monotonic
%77 = or i8 %76, %75
store i8 %77, i8* @uc, align 1
- %78 = load i8* @uc, align 1
+ %78 = load i8, i8* @uc, align 1
%79 = zext i8 %78 to i16
%80 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%81 = atomicrmw or i16* %80, i16 %79 monotonic
%82 = or i16 %81, %79
store i16 %82, i16* @ss, align 2
- %83 = load i8* @uc, align 1
+ %83 = load i8, i8* @uc, align 1
%84 = zext i8 %83 to i16
%85 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%86 = atomicrmw or i16* %85, i16 %84 monotonic
%87 = or i16 %86, %84
store i16 %87, i16* @us, align 2
- %88 = load i8* @uc, align 1
+ %88 = load i8, i8* @uc, align 1
%89 = zext i8 %88 to i32
%90 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%91 = atomicrmw or i32* %90, i32 %89 monotonic
%92 = or i32 %91, %89
store i32 %92, i32* @si, align 4
- %93 = load i8* @uc, align 1
+ %93 = load i8, i8* @uc, align 1
%94 = zext i8 %93 to i32
%95 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%96 = atomicrmw or i32* %95, i32 %94 monotonic
%97 = or i32 %96, %94
store i32 %97, i32* @ui, align 4
- %98 = load i8* @uc, align 1
+ %98 = load i8, i8* @uc, align 1
%99 = zext i8 %98 to i64
%100 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%101 = atomicrmw or i64* %100, i64 %99 monotonic
%102 = or i64 %101, %99
store i64 %102, i64* @sl, align 8
- %103 = load i8* @uc, align 1
+ %103 = load i8, i8* @uc, align 1
%104 = zext i8 %103 to i64
%105 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%106 = atomicrmw or i64* %105, i64 %104 monotonic
%107 = or i64 %106, %104
store i64 %107, i64* @ul, align 8
- %108 = load i8* @uc, align 1
+ %108 = load i8, i8* @uc, align 1
%109 = atomicrmw xor i8* @sc, i8 %108 monotonic
%110 = xor i8 %109, %108
store i8 %110, i8* @sc, align 1
- %111 = load i8* @uc, align 1
+ %111 = load i8, i8* @uc, align 1
%112 = atomicrmw xor i8* @uc, i8 %111 monotonic
%113 = xor i8 %112, %111
store i8 %113, i8* @uc, align 1
- %114 = load i8* @uc, align 1
+ %114 = load i8, i8* @uc, align 1
%115 = zext i8 %114 to i16
%116 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%117 = atomicrmw xor i16* %116, i16 %115 monotonic
%118 = xor i16 %117, %115
store i16 %118, i16* @ss, align 2
- %119 = load i8* @uc, align 1
+ %119 = load i8, i8* @uc, align 1
%120 = zext i8 %119 to i16
%121 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%122 = atomicrmw xor i16* %121, i16 %120 monotonic
%123 = xor i16 %122, %120
store i16 %123, i16* @us, align 2
- %124 = load i8* @uc, align 1
+ %124 = load i8, i8* @uc, align 1
%125 = zext i8 %124 to i32
%126 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%127 = atomicrmw xor i32* %126, i32 %125 monotonic
%128 = xor i32 %127, %125
store i32 %128, i32* @si, align 4
- %129 = load i8* @uc, align 1
+ %129 = load i8, i8* @uc, align 1
%130 = zext i8 %129 to i32
%131 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%132 = atomicrmw xor i32* %131, i32 %130 monotonic
%133 = xor i32 %132, %130
store i32 %133, i32* @ui, align 4
- %134 = load i8* @uc, align 1
+ %134 = load i8, i8* @uc, align 1
%135 = zext i8 %134 to i64
%136 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%137 = atomicrmw xor i64* %136, i64 %135 monotonic
%138 = xor i64 %137, %135
store i64 %138, i64* @sl, align 8
- %139 = load i8* @uc, align 1
+ %139 = load i8, i8* @uc, align 1
%140 = zext i8 %139 to i64
%141 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%142 = atomicrmw xor i64* %141, i64 %140 monotonic
%143 = xor i64 %142, %140
store i64 %143, i64* @ul, align 8
- %144 = load i8* @uc, align 1
+ %144 = load i8, i8* @uc, align 1
%145 = atomicrmw and i8* @sc, i8 %144 monotonic
%146 = and i8 %145, %144
store i8 %146, i8* @sc, align 1
- %147 = load i8* @uc, align 1
+ %147 = load i8, i8* @uc, align 1
%148 = atomicrmw and i8* @uc, i8 %147 monotonic
%149 = and i8 %148, %147
store i8 %149, i8* @uc, align 1
- %150 = load i8* @uc, align 1
+ %150 = load i8, i8* @uc, align 1
%151 = zext i8 %150 to i16
%152 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%153 = atomicrmw and i16* %152, i16 %151 monotonic
%154 = and i16 %153, %151
store i16 %154, i16* @ss, align 2
- %155 = load i8* @uc, align 1
+ %155 = load i8, i8* @uc, align 1
%156 = zext i8 %155 to i16
%157 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%158 = atomicrmw and i16* %157, i16 %156 monotonic
%159 = and i16 %158, %156
store i16 %159, i16* @us, align 2
- %160 = load i8* @uc, align 1
+ %160 = load i8, i8* @uc, align 1
%161 = zext i8 %160 to i32
%162 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%163 = atomicrmw and i32* %162, i32 %161 monotonic
%164 = and i32 %163, %161
store i32 %164, i32* @si, align 4
- %165 = load i8* @uc, align 1
+ %165 = load i8, i8* @uc, align 1
%166 = zext i8 %165 to i32
%167 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%168 = atomicrmw and i32* %167, i32 %166 monotonic
%169 = and i32 %168, %166
store i32 %169, i32* @ui, align 4
- %170 = load i8* @uc, align 1
+ %170 = load i8, i8* @uc, align 1
%171 = zext i8 %170 to i64
%172 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%173 = atomicrmw and i64* %172, i64 %171 monotonic
%174 = and i64 %173, %171
store i64 %174, i64* @sl, align 8
- %175 = load i8* @uc, align 1
+ %175 = load i8, i8* @uc, align 1
%176 = zext i8 %175 to i64
%177 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%178 = atomicrmw and i64* %177, i64 %176 monotonic
%179 = and i64 %178, %176
store i64 %179, i64* @ul, align 8
- %180 = load i8* @uc, align 1
+ %180 = load i8, i8* @uc, align 1
%181 = atomicrmw nand i8* @sc, i8 %180 monotonic
%182 = xor i8 %181, -1
%183 = and i8 %182, %180
store i8 %183, i8* @sc, align 1
- %184 = load i8* @uc, align 1
+ %184 = load i8, i8* @uc, align 1
%185 = atomicrmw nand i8* @uc, i8 %184 monotonic
%186 = xor i8 %185, -1
%187 = and i8 %186, %184
store i8 %187, i8* @uc, align 1
- %188 = load i8* @uc, align 1
+ %188 = load i8, i8* @uc, align 1
%189 = zext i8 %188 to i16
%190 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%191 = atomicrmw nand i16* %190, i16 %189 monotonic
%192 = xor i16 %191, -1
%193 = and i16 %192, %189
store i16 %193, i16* @ss, align 2
- %194 = load i8* @uc, align 1
+ %194 = load i8, i8* @uc, align 1
%195 = zext i8 %194 to i16
%196 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%197 = atomicrmw nand i16* %196, i16 %195 monotonic
%198 = xor i16 %197, -1
%199 = and i16 %198, %195
store i16 %199, i16* @us, align 2
- %200 = load i8* @uc, align 1
+ %200 = load i8, i8* @uc, align 1
%201 = zext i8 %200 to i32
%202 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%203 = atomicrmw nand i32* %202, i32 %201 monotonic
%204 = xor i32 %203, -1
%205 = and i32 %204, %201
store i32 %205, i32* @si, align 4
- %206 = load i8* @uc, align 1
+ %206 = load i8, i8* @uc, align 1
%207 = zext i8 %206 to i32
%208 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%209 = atomicrmw nand i32* %208, i32 %207 monotonic
%210 = xor i32 %209, -1
%211 = and i32 %210, %207
store i32 %211, i32* @ui, align 4
- %212 = load i8* @uc, align 1
+ %212 = load i8, i8* @uc, align 1
%213 = zext i8 %212 to i64
%214 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%215 = atomicrmw nand i64* %214, i64 %213 monotonic
%216 = xor i64 %215, -1
%217 = and i64 %216, %213
store i64 %217, i64* @sl, align 8
- %218 = load i8* @uc, align 1
+ %218 = load i8, i8* @uc, align 1
%219 = zext i8 %218 to i64
%220 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%221 = atomicrmw nand i64* %220, i64 %219 monotonic
@@ -534,73 +534,73 @@ return: ; preds = %entry
define void @test_compare_and_swap() nounwind {
entry:
- %0 = load i8* @uc, align 1
- %1 = load i8* @sc, align 1
+ %0 = load i8, i8* @uc, align 1
+ %1 = load i8, i8* @sc, align 1
%2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic monotonic
store i8 %2, i8* @sc, align 1
- %3 = load i8* @uc, align 1
- %4 = load i8* @sc, align 1
+ %3 = load i8, i8* @uc, align 1
+ %4 = load i8, i8* @sc, align 1
%5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic monotonic
store i8 %5, i8* @uc, align 1
- %6 = load i8* @uc, align 1
+ %6 = load i8, i8* @uc, align 1
%7 = zext i8 %6 to i16
- %8 = load i8* @sc, align 1
+ %8 = load i8, i8* @sc, align 1
%9 = sext i8 %8 to i16
%10 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic monotonic
store i16 %11, i16* @ss, align 2
- %12 = load i8* @uc, align 1
+ %12 = load i8, i8* @uc, align 1
%13 = zext i8 %12 to i16
- %14 = load i8* @sc, align 1
+ %14 = load i8, i8* @sc, align 1
%15 = sext i8 %14 to i16
%16 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic monotonic
store i16 %17, i16* @us, align 2
- %18 = load i8* @uc, align 1
+ %18 = load i8, i8* @uc, align 1
%19 = zext i8 %18 to i32
- %20 = load i8* @sc, align 1
+ %20 = load i8, i8* @sc, align 1
%21 = sext i8 %20 to i32
%22 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic monotonic
store i32 %23, i32* @si, align 4
- %24 = load i8* @uc, align 1
+ %24 = load i8, i8* @uc, align 1
%25 = zext i8 %24 to i32
- %26 = load i8* @sc, align 1
+ %26 = load i8, i8* @sc, align 1
%27 = sext i8 %26 to i32
%28 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic monotonic
store i32 %29, i32* @ui, align 4
- %30 = load i8* @uc, align 1
+ %30 = load i8, i8* @uc, align 1
%31 = zext i8 %30 to i64
- %32 = load i8* @sc, align 1
+ %32 = load i8, i8* @sc, align 1
%33 = sext i8 %32 to i64
%34 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%35 = cmpxchg i64* %34, i64 %31, i64 %33 monotonic monotonic
store i64 %35, i64* @sl, align 8
- %36 = load i8* @uc, align 1
+ %36 = load i8, i8* @uc, align 1
%37 = zext i8 %36 to i64
- %38 = load i8* @sc, align 1
+ %38 = load i8, i8* @sc, align 1
%39 = sext i8 %38 to i64
%40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%41 = cmpxchg i64* %40, i64 %37, i64 %39 monotonic monotonic
store i64 %41, i64* @ul, align 8
- %42 = load i8* @uc, align 1
- %43 = load i8* @sc, align 1
+ %42 = load i8, i8* @uc, align 1
+ %43 = load i8, i8* @sc, align 1
%44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic monotonic
%45 = icmp eq i8 %44, %42
%46 = zext i1 %45 to i8
%47 = zext i8 %46 to i32
store i32 %47, i32* @ui, align 4
- %48 = load i8* @uc, align 1
- %49 = load i8* @sc, align 1
+ %48 = load i8, i8* @uc, align 1
+ %49 = load i8, i8* @sc, align 1
%50 = cmpxchg i8* @uc, i8 %48, i8 %49 monotonic monotonic
%51 = icmp eq i8 %50, %48
%52 = zext i1 %51 to i8
%53 = zext i8 %52 to i32
store i32 %53, i32* @ui, align 4
- %54 = load i8* @uc, align 1
+ %54 = load i8, i8* @uc, align 1
%55 = zext i8 %54 to i16
- %56 = load i8* @sc, align 1
+ %56 = load i8, i8* @sc, align 1
%57 = sext i8 %56 to i16
%58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%59 = cmpxchg i16* %58, i16 %55, i16 %57 monotonic monotonic
@@ -608,9 +608,9 @@ entry:
%61 = zext i1 %60 to i8
%62 = zext i8 %61 to i32
store i32 %62, i32* @ui, align 4
- %63 = load i8* @uc, align 1
+ %63 = load i8, i8* @uc, align 1
%64 = zext i8 %63 to i16
- %65 = load i8* @sc, align 1
+ %65 = load i8, i8* @sc, align 1
%66 = sext i8 %65 to i16
%67 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%68 = cmpxchg i16* %67, i16 %64, i16 %66 monotonic monotonic
@@ -618,9 +618,9 @@ entry:
%70 = zext i1 %69 to i8
%71 = zext i8 %70 to i32
store i32 %71, i32* @ui, align 4
- %72 = load i8* @uc, align 1
+ %72 = load i8, i8* @uc, align 1
%73 = zext i8 %72 to i32
- %74 = load i8* @sc, align 1
+ %74 = load i8, i8* @sc, align 1
%75 = sext i8 %74 to i32
%76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%77 = cmpxchg i32* %76, i32 %73, i32 %75 monotonic monotonic
@@ -628,9 +628,9 @@ entry:
%79 = zext i1 %78 to i8
%80 = zext i8 %79 to i32
store i32 %80, i32* @ui, align 4
- %81 = load i8* @uc, align 1
+ %81 = load i8, i8* @uc, align 1
%82 = zext i8 %81 to i32
- %83 = load i8* @sc, align 1
+ %83 = load i8, i8* @sc, align 1
%84 = sext i8 %83 to i32
%85 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%86 = cmpxchg i32* %85, i32 %82, i32 %84 monotonic monotonic
@@ -638,9 +638,9 @@ entry:
%88 = zext i1 %87 to i8
%89 = zext i8 %88 to i32
store i32 %89, i32* @ui, align 4
- %90 = load i8* @uc, align 1
+ %90 = load i8, i8* @uc, align 1
%91 = zext i8 %90 to i64
- %92 = load i8* @sc, align 1
+ %92 = load i8, i8* @sc, align 1
%93 = sext i8 %92 to i64
%94 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%95 = cmpxchg i64* %94, i64 %91, i64 %93 monotonic monotonic
@@ -648,9 +648,9 @@ entry:
%97 = zext i1 %96 to i8
%98 = zext i8 %97 to i32
store i32 %98, i32* @ui, align 4
- %99 = load i8* @uc, align 1
+ %99 = load i8, i8* @uc, align 1
%100 = zext i8 %99 to i64
- %101 = load i8* @sc, align 1
+ %101 = load i8, i8* @sc, align 1
%102 = sext i8 %101 to i64
%103 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%104 = cmpxchg i64* %103, i64 %100, i64 %102 monotonic monotonic
diff --git a/test/CodeGen/PowerPC/MergeConsecutiveStores.ll b/test/CodeGen/PowerPC/MergeConsecutiveStores.ll
new file mode 100644
index 000000000000..977b3b701cce
--- /dev/null
+++ b/test/CodeGen/PowerPC/MergeConsecutiveStores.ll
@@ -0,0 +1,68 @@
+; RUN: llc -march=ppc32 -mtriple=powerpc-unknown-linux-gnu -mattr=+altivec < %s | FileCheck %s
+
+;; This test ensures that MergeConsecutiveStores does not attempt to
+;; merge stores or loads when doing so would result in unaligned
+;; memory operations (unless the target supports those, e.g. X86).
+
+;; This issue happen in other situations for other targets, but PPC
+;; with Altivec extensions was chosen for the test because it does not
+;; support unaligned access with AltiVec instructions. If the 4
+;; load/stores get merged to an v4i32 vector type severely bad code
+;; gets generated: it painstakingly copies the values to a temporary
+;; location on the stack, with vector ops, in order to then use
+;; integer ops to load from the temporary stack location and store to
+;; the final location. Yuck!
+
+%struct.X = type { i32, i32, i32, i32 }
+
+@fx = common global %struct.X zeroinitializer, align 4
+@fy = common global %struct.X zeroinitializer, align 4
+
+;; In this test case, lvx and stvx instructions should NOT be
+;; generated, as the alignment is not sufficient for it to be
+;; worthwhile.
+
+;; CHECK-LABEL: f:
+;; CHECK: lwzu
+;; CHECK-NEXT: lwz
+;; CHECK-NEXT: lwz
+;; CHECK-NEXT: lwz
+;; CHECK-NEXT: stwu
+;; CHECK-NEXT: stw
+;; CHECK-NEXT: stw
+;; CHECK-NEXT: stw
+;; CHECK-NEXT: blr
+define void @f() {
+entry:
+ %0 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 0), align 4
+ %1 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 1), align 4
+ %2 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 2), align 4
+ %3 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 3), align 4
+ store i32 %0, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 0), align 4
+ store i32 %1, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 1), align 4
+ store i32 %2, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 2), align 4
+ store i32 %3, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 3), align 4
+ ret void
+}
+
+@gx = common global %struct.X zeroinitializer, align 16
+@gy = common global %struct.X zeroinitializer, align 16
+
+;; In this test, lvx and stvx instructions SHOULD be generated, as
+;; the 16-byte alignment of the new load/store is acceptable.
+;; CHECK-LABEL: g:
+;; CHECK: lvx
+;; CHECK: stvx
+;; CHECK: blr
+define void @g() {
+entry:
+ %0 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 0), align 16
+ %1 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 1), align 4
+ %2 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 2), align 4
+ %3 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 3), align 4
+ store i32 %0, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 0), align 16
+ store i32 %1, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 1), align 4
+ store i32 %2, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 2), align 4
+ store i32 %3, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 3), align 4
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/a2-fp-basic.ll b/test/CodeGen/PowerPC/a2-fp-basic.ll
index de3aa7c31766..0324e38e1691 100644
--- a/test/CodeGen/PowerPC/a2-fp-basic.ll
+++ b/test/CodeGen/PowerPC/a2-fp-basic.ll
@@ -4,28 +4,28 @@
define void @maybe_an_fma(%0* sret %agg.result, %0* byval %a, %0* byval %b, %0* byval %c) nounwind {
entry:
- %a.realp = getelementptr inbounds %0* %a, i32 0, i32 0
- %a.real = load double* %a.realp
- %a.imagp = getelementptr inbounds %0* %a, i32 0, i32 1
- %a.imag = load double* %a.imagp
- %b.realp = getelementptr inbounds %0* %b, i32 0, i32 0
- %b.real = load double* %b.realp
- %b.imagp = getelementptr inbounds %0* %b, i32 0, i32 1
- %b.imag = load double* %b.imagp
+ %a.realp = getelementptr inbounds %0, %0* %a, i32 0, i32 0
+ %a.real = load double, double* %a.realp
+ %a.imagp = getelementptr inbounds %0, %0* %a, i32 0, i32 1
+ %a.imag = load double, double* %a.imagp
+ %b.realp = getelementptr inbounds %0, %0* %b, i32 0, i32 0
+ %b.real = load double, double* %b.realp
+ %b.imagp = getelementptr inbounds %0, %0* %b, i32 0, i32 1
+ %b.imag = load double, double* %b.imagp
%mul.rl = fmul double %a.real, %b.real
%mul.rr = fmul double %a.imag, %b.imag
%mul.r = fsub double %mul.rl, %mul.rr
%mul.il = fmul double %a.imag, %b.real
%mul.ir = fmul double %a.real, %b.imag
%mul.i = fadd double %mul.il, %mul.ir
- %c.realp = getelementptr inbounds %0* %c, i32 0, i32 0
- %c.real = load double* %c.realp
- %c.imagp = getelementptr inbounds %0* %c, i32 0, i32 1
- %c.imag = load double* %c.imagp
+ %c.realp = getelementptr inbounds %0, %0* %c, i32 0, i32 0
+ %c.real = load double, double* %c.realp
+ %c.imagp = getelementptr inbounds %0, %0* %c, i32 0, i32 1
+ %c.imag = load double, double* %c.imagp
%add.r = fadd double %mul.r, %c.real
%add.i = fadd double %mul.i, %c.imag
- %real = getelementptr inbounds %0* %agg.result, i32 0, i32 0
- %imag = getelementptr inbounds %0* %agg.result, i32 0, i32 1
+ %real = getelementptr inbounds %0, %0* %agg.result, i32 0, i32 0
+ %imag = getelementptr inbounds %0, %0* %agg.result, i32 0, i32 1
store double %add.r, double* %real
store double %add.i, double* %imag
ret void
diff --git a/test/CodeGen/PowerPC/add-fi.ll b/test/CodeGen/PowerPC/add-fi.ll
index 18892c8cdf5e..010602e516a4 100644
--- a/test/CodeGen/PowerPC/add-fi.ll
+++ b/test/CodeGen/PowerPC/add-fi.ll
@@ -4,7 +4,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define i32* @test1() {
%X = alloca { i32, i32 }
- %Y = getelementptr {i32,i32}* %X, i32 0, i32 1
+ %Y = getelementptr {i32,i32}, {i32,i32}* %X, i32 0, i32 1
ret i32* %Y
; CHECK-LABEL: @test1
@@ -14,7 +14,7 @@ define i32* @test1() {
define i32* @test2() {
%X = alloca { i32, i32, i32, i32 }
- %Y = getelementptr {i32,i32,i32,i32}* %X, i32 0, i32 3
+ %Y = getelementptr {i32,i32,i32,i32}, {i32,i32,i32,i32}* %X, i32 0, i32 3
ret i32* %Y
; CHECK-LABEL: @test2
diff --git a/test/CodeGen/PowerPC/addi-licm.ll b/test/CodeGen/PowerPC/addi-licm.ll
index b52cb678a969..b6cfeec0e6f7 100644
--- a/test/CodeGen/PowerPC/addi-licm.ll
+++ b/test/CodeGen/PowerPC/addi-licm.ll
@@ -1,4 +1,5 @@
-; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -mcpu=pwr7 -disable-ppc-preinc-prep < %s | FileCheck %s
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s -check-prefix=PIP
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -21,14 +22,22 @@ entry:
; CHECK-DAG: lfsx {{[0-9]+}}, [[REG2]],
; CHECK: blr
+; PIP-LABEL: @foo
+; PIP: addi [[REG1:[0-9]+]], 1,
+; PIP: addi [[REG2:[0-9]+]], 1,
+; PIP: %for.body.i
+; PIP-DAG: lfsu {{[0-9]+}}, 4([[REG1]])
+; PIP-DAG: lfsu {{[0-9]+}}, 4([[REG2]])
+; PIP: blr
+
for.body.i: ; preds = %for.body.i.preheader, %for.body.i
%accumulator.09.i = phi double [ %add.i, %for.body.i ], [ 0.000000e+00, %entry ]
%i.08.i = phi i64 [ %inc.i, %for.body.i ], [ 0, %entry ]
- %arrayidx.i = getelementptr inbounds [2048 x float]* %x, i64 0, i64 %i.08.i
- %v14 = load float* %arrayidx.i, align 4
+ %arrayidx.i = getelementptr inbounds [2048 x float], [2048 x float]* %x, i64 0, i64 %i.08.i
+ %v14 = load float, float* %arrayidx.i, align 4
%conv.i = fpext float %v14 to double
- %arrayidx1.i = getelementptr inbounds [2048 x float]* %y, i64 0, i64 %i.08.i
- %v15 = load float* %arrayidx1.i, align 4
+ %arrayidx1.i = getelementptr inbounds [2048 x float], [2048 x float]* %y, i64 0, i64 %i.08.i
+ %v15 = load float, float* %arrayidx1.i, align 4
%conv2.i = fpext float %v15 to double
%mul.i = fmul double %conv.i, %conv2.i
%add.i = fadd double %accumulator.09.i, %mul.i
diff --git a/test/CodeGen/PowerPC/addi-reassoc.ll b/test/CodeGen/PowerPC/addi-reassoc.ll
index 2b71ce65f6bc..3624ce638c7d 100644
--- a/test/CodeGen/PowerPC/addi-reassoc.ll
+++ b/test/CodeGen/PowerPC/addi-reassoc.ll
@@ -4,15 +4,15 @@
define i32 @test1([4 x i32]* %P, i32 %i) {
%tmp.2 = add i32 %i, 2 ; <i32> [#uses=1]
- %tmp.4 = getelementptr [4 x i32]* %P, i32 %tmp.2, i32 1 ; <i32*> [#uses=1]
- %tmp.5 = load i32* %tmp.4 ; <i32> [#uses=1]
+ %tmp.4 = getelementptr [4 x i32], [4 x i32]* %P, i32 %tmp.2, i32 1 ; <i32*> [#uses=1]
+ %tmp.5 = load i32, i32* %tmp.4 ; <i32> [#uses=1]
ret i32 %tmp.5
}
define i32 @test2(%struct.X* %P, i32 %i) {
%tmp.2 = add i32 %i, 2 ; <i32> [#uses=1]
- %tmp.5 = getelementptr %struct.X* %P, i32 %tmp.2, i32 0, i32 1 ; <i8*> [#uses=1]
- %tmp.6 = load i8* %tmp.5 ; <i8> [#uses=1]
+ %tmp.5 = getelementptr %struct.X, %struct.X* %P, i32 %tmp.2, i32 0, i32 1 ; <i8*> [#uses=1]
+ %tmp.6 = load i8, i8* %tmp.5 ; <i8> [#uses=1]
%tmp.7 = sext i8 %tmp.6 to i32 ; <i32> [#uses=1]
ret i32 %tmp.7
}
diff --git a/test/CodeGen/PowerPC/alias.ll b/test/CodeGen/PowerPC/alias.ll
index 86e41148a0d7..524abd5da3ef 100644
--- a/test/CodeGen/PowerPC/alias.ll
+++ b/test/CodeGen/PowerPC/alias.ll
@@ -10,8 +10,8 @@
; CHECK-LABEL: bar:
define i32 @bar() {
; MEDIUM: addis 3, 2, fooa@toc@ha
-; LARGE: addis 3, 2, .LC1@toc@ha
- %a = load i32* @fooa
+; LARGE: addis 3, 2, .L[[L0:.*]]@toc@ha
+ %a = load i32, i32* @fooa
ret i32 %a
}
@@ -19,13 +19,13 @@ define i32 @bar() {
define i64 @bar2() {
; MEDIUM: addis 3, 2, foo2a@toc@ha
; MEDIUM: addi 3, 3, foo2a@toc@l
-; LARGE: addis 3, 2, .LC3@toc@ha
- %a = load i64* @foo2a
+; LARGE: addis 3, 2, .L[[L1:.*]]@toc@ha
+ %a = load i64, i64* @foo2a
ret i64 %a
}
-; LARGE: .LC1:
+; LARGE: .L[[L0]]:
; LARGE-NEXT: .tc fooa[TC],fooa
-; LARGE: .LC3:
+; LARGE: .L[[L1]]:
; LARGE-NEXT: .tc foo2a[TC],foo2a
diff --git a/test/CodeGen/PowerPC/and-branch.ll b/test/CodeGen/PowerPC/and-branch.ll
index 0484f882ec72..1543205f1a58 100644
--- a/test/CodeGen/PowerPC/and-branch.ll
+++ b/test/CodeGen/PowerPC/and-branch.ll
@@ -7,7 +7,7 @@ entry:
%tmp4 = and i1 %tmp3, %tmp ; <i1> [#uses=1]
br i1 %tmp4, label %cond_true, label %UnifiedReturnBlock
cond_true: ; preds = %entry
- %tmp5 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
+ %tmp5 = tail call i32 (...) @bar( ) ; <i32> [#uses=0]
ret void
UnifiedReturnBlock: ; preds = %entry
ret void
diff --git a/test/CodeGen/PowerPC/and-elim.ll b/test/CodeGen/PowerPC/and-elim.ll
index a1ec29b16f14..f1738b2c1517 100644
--- a/test/CodeGen/PowerPC/and-elim.ll
+++ b/test/CodeGen/PowerPC/and-elim.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=ppc32 | not grep rlwin
define void @test(i8* %P) {
- %W = load i8* %P
+ %W = load i8, i8* %P
%X = shl i8 %W, 1
%Y = add i8 %X, 2
%Z = and i8 %Y, 254 ; dead and
diff --git a/test/CodeGen/PowerPC/anon_aggr.ll b/test/CodeGen/PowerPC/anon_aggr.ll
index 6c4f140de127..9f9eed019c15 100644
--- a/test/CodeGen/PowerPC/anon_aggr.ll
+++ b/test/CodeGen/PowerPC/anon_aggr.ll
@@ -21,7 +21,7 @@ unequal:
}
; CHECK-LABEL: func1:
-; CHECK: cmpld {{[0-9]+}}, 4, 5
+; CHECK: cmpld {{([0-9]+,)?}}4, 5
; CHECK-DAG: std 4, -[[OFFSET1:[0-9]+]]
; CHECK-DAG: std 5, -[[OFFSET2:[0-9]+]]
; CHECK: ld 3, -[[OFFSET1]](1)
@@ -31,7 +31,7 @@ unequal:
; DARWIN32: mr
; DARWIN32: mr r[[REG1:[0-9]+]], r[[REGA:[0-9]+]]
; DARWIN32: mr r[[REG2:[0-9]+]], r[[REGB:[0-9]+]]
-; DARWIN32: cmplw cr{{[0-9]+}}, r[[REGA]], r[[REGB]]
+; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REGA]], r[[REGB]]
; DARWIN32: stw r[[REG1]], -[[OFFSET1:[0-9]+]]
; DARWIN32: stw r[[REG2]], -[[OFFSET2:[0-9]+]]
; DARWIN32: lwz r3, -[[OFFSET1]]
@@ -41,7 +41,7 @@ unequal:
; DARWIN64: mr
; DARWIN64: mr r[[REG1:[0-9]+]], r[[REGA:[0-9]+]]
; DARWIN64: mr r[[REG2:[0-9]+]], r[[REGB:[0-9]+]]
-; DARWIN64: cmpld cr{{[0-9]+}}, r[[REGA]], r[[REGB]]
+; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REGA]], r[[REGB]]
; DARWIN64: std r[[REG1]], -[[OFFSET1:[0-9]+]]
; DARWIN64: std r[[REG2]], -[[OFFSET2:[0-9]+]]
; DARWIN64: ld r3, -[[OFFSET1]]
@@ -51,8 +51,8 @@ unequal:
define i8* @func2({ i64, i8* } %array1, %tarray* byval %array2) {
entry:
%array1_ptr = extractvalue {i64, i8* } %array1, 1
- %tmp = getelementptr inbounds %tarray* %array2, i32 0, i32 1
- %array2_ptr = load i8** %tmp
+ %tmp = getelementptr inbounds %tarray, %tarray* %array2, i32 0, i32 1
+ %array2_ptr = load i8*, i8** %tmp
%cond = icmp eq i8* %array1_ptr, %array2_ptr
br i1 %cond, label %equal, label %unequal
equal:
@@ -63,7 +63,7 @@ unequal:
; CHECK-LABEL: func2:
; CHECK: ld [[REG2:[0-9]+]], 72(1)
-; CHECK: cmpld {{[0-9]+}}, 4, [[REG2]]
+; CHECK: cmpld {{([0-9]+,)?}}4, [[REG2]]
; CHECK-DAG: std [[REG2]], -[[OFFSET1:[0-9]+]]
; CHECK-DAG: std 4, -[[OFFSET2:[0-9]+]]
; CHECK: ld 3, -[[OFFSET2]](1)
@@ -74,7 +74,7 @@ unequal:
; DARWIN32: lwz r[[REG2:[0-9]+]], 44(r[[REGSP]])
; DARWIN32: mr
; DARWIN32: mr r[[REG3:[0-9]+]], r[[REGA:[0-9]+]]
-; DARWIN32: cmplw cr{{[0-9]+}}, r[[REGA]], r[[REG2]]
+; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REGA]], r[[REG2]]
; DARWIN32: stw r[[REG3]], -[[OFFSET1:[0-9]+]]
; DARWIN32: stw r[[REG2]], -[[OFFSET2:[0-9]+]]
; DARWIN32: lwz r3, -[[OFFSET1]]
@@ -84,7 +84,7 @@ unequal:
; DARWIN64: ld r[[REG2:[0-9]+]], 72(r1)
; DARWIN64: mr
; DARWIN64: mr r[[REG3:[0-9]+]], r[[REGA:[0-9]+]]
-; DARWIN64: cmpld cr{{[0-9]+}}, r[[REGA]], r[[REG2]]
+; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REGA]], r[[REG2]]
; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]]
; DARWIN64: std r[[REG2]], -[[OFFSET2:[0-9]+]]
; DARWIN64: ld r3, -[[OFFSET1]]
@@ -93,10 +93,10 @@ unequal:
define i8* @func3({ i64, i8* }* byval %array1, %tarray* byval %array2) {
entry:
- %tmp1 = getelementptr inbounds { i64, i8* }* %array1, i32 0, i32 1
- %array1_ptr = load i8** %tmp1
- %tmp2 = getelementptr inbounds %tarray* %array2, i32 0, i32 1
- %array2_ptr = load i8** %tmp2
+ %tmp1 = getelementptr inbounds { i64, i8* }, { i64, i8* }* %array1, i32 0, i32 1
+ %array1_ptr = load i8*, i8** %tmp1
+ %tmp2 = getelementptr inbounds %tarray, %tarray* %array2, i32 0, i32 1
+ %array2_ptr = load i8*, i8** %tmp2
%cond = icmp eq i8* %array1_ptr, %array2_ptr
br i1 %cond, label %equal, label %unequal
equal:
@@ -108,7 +108,7 @@ unequal:
; CHECK-LABEL: func3:
; CHECK: ld [[REG3:[0-9]+]], 72(1)
; CHECK: ld [[REG4:[0-9]+]], 56(1)
-; CHECK: cmpld {{[0-9]+}}, [[REG4]], [[REG3]]
+; CHECK: cmpld {{([0-9]+,)?}}[[REG4]], [[REG3]]
; CHECK: std [[REG3]], -[[OFFSET1:[0-9]+]](1)
; CHECK: std [[REG4]], -[[OFFSET2:[0-9]+]](1)
; CHECK: ld 3, -[[OFFSET2]](1)
@@ -119,7 +119,7 @@ unequal:
; DARWIN32: addi r[[REG2:[0-9]+]], r[[REGSP]], 24
; DARWIN32: lwz r[[REG3:[0-9]+]], 44(r[[REGSP]])
; DARWIN32: lwz r[[REG4:[0-9]+]], 32(r[[REGSP]])
-; DARWIN32: cmplw cr{{[0-9]+}}, r[[REG4]], r[[REG3]]
+; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REG4]], r[[REG3]]
; DARWIN32: stw r[[REG3]], -[[OFFSET1:[0-9]+]]
; DARWIN32: stw r[[REG4]], -[[OFFSET2:[0-9]+]]
; DARWIN32: lwz r3, -[[OFFSET2]]
@@ -128,7 +128,7 @@ unequal:
; DARWIN64: _func3:
; DARWIN64: ld r[[REG3:[0-9]+]], 72(r1)
; DARWIN64: ld r[[REG4:[0-9]+]], 56(r1)
-; DARWIN64: cmpld cr{{[0-9]+}}, r[[REG4]], r[[REG3]]
+; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REG4]], r[[REG3]]
; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]]
; DARWIN64: std r[[REG4]], -[[OFFSET2:[0-9]+]]
; DARWIN64: ld r3, -[[OFFSET2]]
@@ -140,8 +140,8 @@ define i8* @func4(i64 %p1, i64 %p2, i64 %p3, i64 %p4,
{ i64, i8* } %array1, %tarray* byval %array2) {
entry:
%array1_ptr = extractvalue {i64, i8* } %array1, 1
- %tmp = getelementptr inbounds %tarray* %array2, i32 0, i32 1
- %array2_ptr = load i8** %tmp
+ %tmp = getelementptr inbounds %tarray, %tarray* %array2, i32 0, i32 1
+ %array2_ptr = load i8*, i8** %tmp
%cond = icmp eq i8* %array1_ptr, %array2_ptr
br i1 %cond, label %equal, label %unequal
equal:
@@ -153,7 +153,7 @@ unequal:
; CHECK-LABEL: func4:
; CHECK: ld [[REG3:[0-9]+]], 136(1)
; CHECK: ld [[REG2:[0-9]+]], 120(1)
-; CHECK: cmpld {{[0-9]+}}, [[REG2]], [[REG3]]
+; CHECK: cmpld {{([0-9]+,)?}}[[REG2]], [[REG3]]
; CHECK: std [[REG3]], -[[OFFSET2:[0-9]+]](1)
; CHECK: std [[REG2]], -[[OFFSET1:[0-9]+]](1)
; CHECK: ld 3, -[[OFFSET1]](1)
@@ -164,8 +164,8 @@ unequal:
; DARWIN32: addi r[[REG1:[0-9]+]], r1, 100
; DARWIN32: lwz r[[REG3:[0-9]+]], 108(r1)
; DARWIN32: mr r[[REG2:[0-9]+]], r[[REG4]]
-; DARWIN32: cmplw cr{{[0-9]+}}, r[[REG4]], r[[REG3]]
-; DARWIN32: stw r[[REG4]], -[[OFFSET1:[0-9]+]]
+; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REG4]], r[[REG3]]
+; DARWIN32: stw r[[REG2]], -[[OFFSET1:[0-9]+]]
; DARWIN32: stw r[[REG3]], -[[OFFSET2:[0-9]+]]
; DARWIN32: lwz r[[REG1]], -[[OFFSET1]]
; DARWIN32: lwz r[[REG1]], -[[OFFSET2]]
@@ -174,7 +174,7 @@ unequal:
; DARWIN64: ld r[[REG2:[0-9]+]], 120(r1)
; DARWIN64: ld r[[REG3:[0-9]+]], 136(r1)
; DARWIN64: mr r[[REG4:[0-9]+]], r[[REG2]]
-; DARWIN64: cmpld cr{{[0-9]+}}, r[[REG2]], r[[REG3]]
+; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REG2]], r[[REG3]]
; DARWIN64: std r[[REG4]], -[[OFFSET1:[0-9]+]]
; DARWIN64: std r[[REG3]], -[[OFFSET2:[0-9]+]]
; DARWIN64: ld r3, -[[OFFSET1]]
diff --git a/test/CodeGen/PowerPC/asm-constraints.ll b/test/CodeGen/PowerPC/asm-constraints.ll
index 9bf8b75e0ace..2d9b0eb591d3 100644
--- a/test/CodeGen/PowerPC/asm-constraints.ll
+++ b/test/CodeGen/PowerPC/asm-constraints.ll
@@ -23,7 +23,7 @@ entry:
%addr.addr = alloca i8*, align 8
store i32 %result, i32* %result.addr, align 4
store i8* %addr, i8** %addr.addr, align 8
- %0 = load i8** %addr.addr, align 8
+ %0 = load i8*, i8** %addr.addr, align 8
%1 = call i32 asm sideeffect "ld${1:U}${1:X} $0,$1\0Acmpw $0,$0\0Abne- 1f\0A1: isync\0A", "=r,*m,~{memory},~{cr0}"(i8* %0) #1, !srcloc !1
store i32 %1, i32* %result.addr, align 4
ret void
@@ -31,7 +31,7 @@ entry:
; CHECK-LABEL: @foo
; CHECK: ld [[REG:[0-9]+]], 0(4)
-; CHECK: cmpw 0, [[REG]], [[REG]]
+; CHECK: cmpw [[REG]], [[REG]]
; CHECK: bne- 0, .Ltmp[[TMP:[0-9]+]]
; CHECK: .Ltmp[[TMP]]:
; CHECK: isync
diff --git a/test/CodeGen/PowerPC/atomic-2.ll b/test/CodeGen/PowerPC/atomic-2.ll
index 9cb0fa5be5c9..1857d5d697e6 100644
--- a/test/CodeGen/PowerPC/atomic-2.ll
+++ b/test/CodeGen/PowerPC/atomic-2.ll
@@ -1,4 +1,6 @@
; RUN: llc < %s -march=ppc64 | FileCheck %s
+; RUN: llc < %s -march=ppc64 -mcpu=pwr7 | FileCheck %s
+; RUN: llc < %s -march=ppc64 -mcpu=pwr8 | FileCheck %s -check-prefix=CHECK-P8U
define i64 @exchange_and_add(i64* %mem, i64 %val) nounwind {
; CHECK-LABEL: exchange_and_add:
@@ -8,6 +10,22 @@ define i64 @exchange_and_add(i64* %mem, i64 %val) nounwind {
ret i64 %tmp
}
+define i8 @exchange_and_add8(i8* %mem, i8 %val) nounwind {
+; CHECK-LABEL: exchange_and_add8:
+; CHECK-P8U: lbarx
+ %tmp = atomicrmw add i8* %mem, i8 %val monotonic
+; CHECK-P8U: stbcx.
+ ret i8 %tmp
+}
+
+define i16 @exchange_and_add16(i16* %mem, i16 %val) nounwind {
+; CHECK-LABEL: exchange_and_add16:
+; CHECK-P8U: lharx
+ %tmp = atomicrmw add i16* %mem, i16 %val monotonic
+; CHECK-P8U: sthcx.
+ ret i16 %tmp
+}
+
define i64 @exchange_and_cmp(i64* %mem) nounwind {
; CHECK-LABEL: exchange_and_cmp:
; CHECK: ldarx
@@ -18,6 +36,26 @@ define i64 @exchange_and_cmp(i64* %mem) nounwind {
ret i64 %tmp
}
+define i8 @exchange_and_cmp8(i8* %mem) nounwind {
+; CHECK-LABEL: exchange_and_cmp8:
+; CHECK-P8U: lbarx
+ %tmppair = cmpxchg i8* %mem, i8 0, i8 1 monotonic monotonic
+ %tmp = extractvalue { i8, i1 } %tmppair, 0
+; CHECK-P8U: stbcx.
+; CHECK-P8U: stbcx.
+ ret i8 %tmp
+}
+
+define i16 @exchange_and_cmp16(i16* %mem) nounwind {
+; CHECK-LABEL: exchange_and_cmp16:
+; CHECK-P8U: lharx
+ %tmppair = cmpxchg i16* %mem, i16 0, i16 1 monotonic monotonic
+ %tmp = extractvalue { i16, i1 } %tmppair, 0
+; CHECK-P8U: sthcx.
+; CHECK-P8U: sthcx.
+ ret i16 %tmp
+}
+
define i64 @exchange(i64* %mem, i64 %val) nounwind {
; CHECK-LABEL: exchange:
; CHECK: ldarx
@@ -26,11 +64,27 @@ define i64 @exchange(i64* %mem, i64 %val) nounwind {
ret i64 %tmp
}
+define i8 @exchange8(i8* %mem, i8 %val) nounwind {
+; CHECK-LABEL: exchange8:
+; CHECK-P8U: lbarx
+ %tmp = atomicrmw xchg i8* %mem, i8 1 monotonic
+; CHECK-P8U: stbcx.
+ ret i8 %tmp
+}
+
+define i16 @exchange16(i16* %mem, i16 %val) nounwind {
+; CHECK-LABEL: exchange16:
+; CHECK-P8U: lharx
+ %tmp = atomicrmw xchg i16* %mem, i16 1 monotonic
+; CHECK-P8U: sthcx.
+ ret i16 %tmp
+}
+
define void @atomic_store(i64* %mem, i64 %val) nounwind {
entry:
; CHECK: @atomic_store
store atomic i64 %val, i64* %mem release, align 64
-; CHECK: sync 1
+; CHECK: lwsync
; CHECK-NOT: stdcx
; CHECK: std
ret void
@@ -39,10 +93,10 @@ entry:
define i64 @atomic_load(i64* %mem) nounwind {
entry:
; CHECK: @atomic_load
- %tmp = load atomic i64* %mem acquire, align 64
+ %tmp = load atomic i64, i64* %mem acquire, align 64
; CHECK-NOT: ldarx
; CHECK: ld
-; CHECK: sync 1
+; CHECK: lwsync
ret i64 %tmp
}
diff --git a/test/CodeGen/PowerPC/atomics-fences.ll b/test/CodeGen/PowerPC/atomics-fences.ll
index 862bd173fdaf..c015fa6eefb0 100644
--- a/test/CodeGen/PowerPC/atomics-fences.ll
+++ b/test/CodeGen/PowerPC/atomics-fences.ll
@@ -5,24 +5,23 @@
; Fences
define void @fence_acquire() {
; CHECK-LABEL: fence_acquire
-; CHECK: sync 1
-; PPC440-NOT: sync 1
+; CHECK: lwsync
+; PPC440-NOT: lwsync
; PPC440: msync
fence acquire
ret void
}
define void @fence_release() {
; CHECK-LABEL: fence_release
-; CHECK: sync 1
-; PPC440-NOT: sync 1
+; CHECK: lwsync
+; PPC440-NOT: lwsync
; PPC440: msync
fence release
ret void
}
define void @fence_seq_cst() {
; CHECK-LABEL: fence_seq_cst
-; CHECK: sync 0
-; PPC440-NOT: sync 0
+; CHECK: sync
; PPC440: msync
fence seq_cst
ret void
diff --git a/test/CodeGen/PowerPC/atomics-indexed.ll b/test/CodeGen/PowerPC/atomics-indexed.ll
index bb9ca0401966..7a0dde034d68 100644
--- a/test/CodeGen/PowerPC/atomics-indexed.ll
+++ b/test/CodeGen/PowerPC/atomics-indexed.ll
@@ -9,27 +9,27 @@
; Indexed version of loads
define i8 @load_x_i8_seq_cst([100000 x i8]* %mem) {
; CHECK-LABEL: load_x_i8_seq_cst
-; CHECK: sync 0
+; CHECK: sync
; CHECK: lbzx
-; CHECK: sync 1
- %ptr = getelementptr inbounds [100000 x i8]* %mem, i64 0, i64 90000
- %val = load atomic i8* %ptr seq_cst, align 1
+; CHECK: lwsync
+ %ptr = getelementptr inbounds [100000 x i8], [100000 x i8]* %mem, i64 0, i64 90000
+ %val = load atomic i8, i8* %ptr seq_cst, align 1
ret i8 %val
}
define i16 @load_x_i16_acquire([100000 x i16]* %mem) {
; CHECK-LABEL: load_x_i16_acquire
; CHECK: lhzx
-; CHECK: sync 1
- %ptr = getelementptr inbounds [100000 x i16]* %mem, i64 0, i64 90000
- %val = load atomic i16* %ptr acquire, align 2
+; CHECK: lwsync
+ %ptr = getelementptr inbounds [100000 x i16], [100000 x i16]* %mem, i64 0, i64 90000
+ %val = load atomic i16, i16* %ptr acquire, align 2
ret i16 %val
}
define i32 @load_x_i32_monotonic([100000 x i32]* %mem) {
; CHECK-LABEL: load_x_i32_monotonic
; CHECK: lwzx
; CHECK-NOT: sync
- %ptr = getelementptr inbounds [100000 x i32]* %mem, i64 0, i64 90000
- %val = load atomic i32* %ptr monotonic, align 4
+ %ptr = getelementptr inbounds [100000 x i32], [100000 x i32]* %mem, i64 0, i64 90000
+ %val = load atomic i32, i32* %ptr monotonic, align 4
ret i32 %val
}
define i64 @load_x_i64_unordered([100000 x i64]* %mem) {
@@ -38,25 +38,25 @@ define i64 @load_x_i64_unordered([100000 x i64]* %mem) {
; PPC64-NOT: __sync_
; PPC64: ldx
; CHECK-NOT: sync
- %ptr = getelementptr inbounds [100000 x i64]* %mem, i64 0, i64 90000
- %val = load atomic i64* %ptr unordered, align 8
+ %ptr = getelementptr inbounds [100000 x i64], [100000 x i64]* %mem, i64 0, i64 90000
+ %val = load atomic i64, i64* %ptr unordered, align 8
ret i64 %val
}
; Indexed version of stores
define void @store_x_i8_seq_cst([100000 x i8]* %mem) {
; CHECK-LABEL: store_x_i8_seq_cst
-; CHECK: sync 0
+; CHECK: sync
; CHECK: stbx
- %ptr = getelementptr inbounds [100000 x i8]* %mem, i64 0, i64 90000
+ %ptr = getelementptr inbounds [100000 x i8], [100000 x i8]* %mem, i64 0, i64 90000
store atomic i8 42, i8* %ptr seq_cst, align 1
ret void
}
define void @store_x_i16_release([100000 x i16]* %mem) {
; CHECK-LABEL: store_x_i16_release
-; CHECK: sync 1
+; CHECK: lwsync
; CHECK: sthx
- %ptr = getelementptr inbounds [100000 x i16]* %mem, i64 0, i64 90000
+ %ptr = getelementptr inbounds [100000 x i16], [100000 x i16]* %mem, i64 0, i64 90000
store atomic i16 42, i16* %ptr release, align 2
ret void
}
@@ -64,18 +64,17 @@ define void @store_x_i32_monotonic([100000 x i32]* %mem) {
; CHECK-LABEL: store_x_i32_monotonic
; CHECK-NOT: sync
; CHECK: stwx
- %ptr = getelementptr inbounds [100000 x i32]* %mem, i64 0, i64 90000
+ %ptr = getelementptr inbounds [100000 x i32], [100000 x i32]* %mem, i64 0, i64 90000
store atomic i32 42, i32* %ptr monotonic, align 4
ret void
}
define void @store_x_i64_unordered([100000 x i64]* %mem) {
; CHECK-LABEL: store_x_i64_unordered
-; CHECK-NOT: sync 0
-; CHECK-NOT: sync 1
+; CHECK-NOT: sync
; PPC32: __sync_
; PPC64-NOT: __sync_
; PPC64: stdx
- %ptr = getelementptr inbounds [100000 x i64]* %mem, i64 0, i64 90000
+ %ptr = getelementptr inbounds [100000 x i64], [100000 x i64]* %mem, i64 0, i64 90000
store atomic i64 42, i64* %ptr unordered, align 8
ret void
}
diff --git a/test/CodeGen/PowerPC/atomics.ll b/test/CodeGen/PowerPC/atomics.ll
index 5f6a6a4dcdf1..2e1eff0f634d 100644
--- a/test/CodeGen/PowerPC/atomics.ll
+++ b/test/CodeGen/PowerPC/atomics.ll
@@ -13,31 +13,31 @@ define i8 @load_i8_unordered(i8* %mem) {
; CHECK-LABEL: load_i8_unordered
; CHECK: lbz
; CHECK-NOT: sync
- %val = load atomic i8* %mem unordered, align 1
+ %val = load atomic i8, i8* %mem unordered, align 1
ret i8 %val
}
define i16 @load_i16_monotonic(i16* %mem) {
; CHECK-LABEL: load_i16_monotonic
; CHECK: lhz
; CHECK-NOT: sync
- %val = load atomic i16* %mem monotonic, align 2
+ %val = load atomic i16, i16* %mem monotonic, align 2
ret i16 %val
}
define i32 @load_i32_acquire(i32* %mem) {
; CHECK-LABEL: load_i32_acquire
; CHECK: lwz
- %val = load atomic i32* %mem acquire, align 4
-; CHECK: sync 1
+ %val = load atomic i32, i32* %mem acquire, align 4
+; CHECK: lwsync
ret i32 %val
}
define i64 @load_i64_seq_cst(i64* %mem) {
; CHECK-LABEL: load_i64_seq_cst
-; CHECK: sync 0
+; CHECK: sync
; PPC32: __sync_
; PPC64-NOT: __sync_
; PPC64: ld
- %val = load atomic i64* %mem seq_cst, align 8
-; CHECK: sync 1
+ %val = load atomic i64, i64* %mem seq_cst, align 8
+; CHECK: lwsync
ret i64 %val
}
@@ -58,14 +58,14 @@ define void @store_i16_monotonic(i16* %mem) {
}
define void @store_i32_release(i32* %mem) {
; CHECK-LABEL: store_i32_release
-; CHECK: sync 1
+; CHECK: lwsync
; CHECK: stw
store atomic i32 42, i32* %mem release, align 4
ret void
}
define void @store_i64_seq_cst(i64* %mem) {
; CHECK-LABEL: store_i64_seq_cst
-; CHECK: sync 0
+; CHECK: sync
; PPC32: __sync_
; PPC64-NOT: __sync_
; PPC64: std
@@ -76,9 +76,9 @@ define void @store_i64_seq_cst(i64* %mem) {
; Atomic CmpXchg
define i8 @cas_strong_i8_sc_sc(i8* %mem) {
; CHECK-LABEL: cas_strong_i8_sc_sc
-; CHECK: sync 0
+; CHECK: sync
%val = cmpxchg i8* %mem, i8 0, i8 1 seq_cst seq_cst
-; CHECK: sync 1
+; CHECK: lwsync
%loaded = extractvalue { i8, i1} %val, 0
ret i8 %loaded
}
@@ -86,21 +86,21 @@ define i16 @cas_weak_i16_acquire_acquire(i16* %mem) {
; CHECK-LABEL: cas_weak_i16_acquire_acquire
;CHECK-NOT: sync
%val = cmpxchg weak i16* %mem, i16 0, i16 1 acquire acquire
-; CHECK: sync 1
+; CHECK: lwsync
%loaded = extractvalue { i16, i1} %val, 0
ret i16 %loaded
}
define i32 @cas_strong_i32_acqrel_acquire(i32* %mem) {
; CHECK-LABEL: cas_strong_i32_acqrel_acquire
-; CHECK: sync 1
+; CHECK: lwsync
%val = cmpxchg i32* %mem, i32 0, i32 1 acq_rel acquire
-; CHECK: sync 1
+; CHECK: lwsync
%loaded = extractvalue { i32, i1} %val, 0
ret i32 %loaded
}
define i64 @cas_weak_i64_release_monotonic(i64* %mem) {
; CHECK-LABEL: cas_weak_i64_release_monotonic
-; CHECK: sync 1
+; CHECK: lwsync
%val = cmpxchg weak i64* %mem, i64 0, i64 1 release monotonic
; CHECK-NOT: [sync ]
%loaded = extractvalue { i64, i1} %val, 0
@@ -116,21 +116,21 @@ define i8 @add_i8_monotonic(i8* %mem, i8 %operand) {
}
define i16 @xor_i16_seq_cst(i16* %mem, i16 %operand) {
; CHECK-LABEL: xor_i16_seq_cst
-; CHECK: sync 0
+; CHECK: sync
%val = atomicrmw xor i16* %mem, i16 %operand seq_cst
-; CHECK: sync 1
+; CHECK: lwsync
ret i16 %val
}
define i32 @xchg_i32_acq_rel(i32* %mem, i32 %operand) {
; CHECK-LABEL: xchg_i32_acq_rel
-; CHECK: sync 1
+; CHECK: lwsync
%val = atomicrmw xchg i32* %mem, i32 %operand acq_rel
-; CHECK: sync 1
+; CHECK: lwsync
ret i32 %val
}
define i64 @and_i64_release(i64* %mem, i64 %operand) {
; CHECK-LABEL: and_i64_release
-; CHECK: sync 1
+; CHECK: lwsync
%val = atomicrmw and i64* %mem, i64 %operand release
; CHECK-NOT: [sync ]
ret i64 %val
diff --git a/test/CodeGen/PowerPC/bdzlr.ll b/test/CodeGen/PowerPC/bdzlr.ll
index 29b74c6c8c66..d6506044868f 100644
--- a/test/CodeGen/PowerPC/bdzlr.ll
+++ b/test/CodeGen/PowerPC/bdzlr.ll
@@ -35,8 +35,8 @@ for.body.lr.ph: ; preds = %if.end
for.body: ; preds = %for.body.for.body_crit_edge, %for.body.lr.ph
%0 = phi %struct.lua_TValue.17.692* [ undef, %for.body.lr.ph ], [ %.pre, %for.body.for.body_crit_edge ]
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body.for.body_crit_edge ]
- %tt = getelementptr inbounds %struct.lua_TValue.17.692* %0, i64 %indvars.iv, i32 1
- %1 = load i32* %tt, align 4
+ %tt = getelementptr inbounds %struct.lua_TValue.17.692, %struct.lua_TValue.17.692* %0, i64 %indvars.iv, i32 1
+ %1 = load i32, i32* %tt, align 4
store i32 %1, i32* undef, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -44,7 +44,7 @@ for.body: ; preds = %for.body.for.body_c
br i1 %exitcond, label %for.end, label %for.body.for.body_crit_edge
for.body.for.body_crit_edge: ; preds = %for.body
- %.pre = load %struct.lua_TValue.17.692** undef, align 8
+ %.pre = load %struct.lua_TValue.17.692*, %struct.lua_TValue.17.692** undef, align 8
br label %for.body
for.end: ; preds = %for.body, %if.end, %entry
diff --git a/test/CodeGen/PowerPC/bperm.ll b/test/CodeGen/PowerPC/bperm.ll
index c489c1f90a8f..c5a728859a05 100644
--- a/test/CodeGen/PowerPC/bperm.ll
+++ b/test/CodeGen/PowerPC/bperm.ll
@@ -22,15 +22,15 @@ entry:
ret i64 %0
; CHECK-LABEL: @bs8
-; CHECK-DAG: rldicl [[REG1:[0-9]+]], 3, 16, 0
-; CHECK-DAG: rldicl [[REG2:[0-9]+]], 3, 8, 0
-; CHECK-DAG: rldicl [[REG3:[0-9]+]], 3, 24, 0
+; CHECK-DAG: rotldi [[REG1:[0-9]+]], 3, 16
+; CHECK-DAG: rotldi [[REG2:[0-9]+]], 3, 8
+; CHECK-DAG: rotldi [[REG3:[0-9]+]], 3, 24
; CHECK-DAG: rldimi [[REG2]], [[REG1]], 8, 48
-; CHECK-DAG: rldicl [[REG4:[0-9]+]], 3, 32, 0
+; CHECK-DAG: rotldi [[REG4:[0-9]+]], 3, 32
; CHECK-DAG: rldimi [[REG2]], [[REG3]], 16, 40
-; CHECK-DAG: rldicl [[REG5:[0-9]+]], 3, 48, 0
+; CHECK-DAG: rotldi [[REG5:[0-9]+]], 3, 48
; CHECK-DAG: rldimi [[REG2]], [[REG4]], 24, 32
-; CHECK-DAG: rldicl [[REG6:[0-9]+]], 3, 56, 0
+; CHECK-DAG: rotldi [[REG6:[0-9]+]], 3, 56
; CHECK-DAG: rldimi [[REG2]], [[REG5]], 40, 16
; CHECK-DAG: rldimi [[REG2]], [[REG6]], 48, 8
; CHECK-DAG: rldimi [[REG2]], 3, 56, 0
@@ -46,7 +46,7 @@ entry:
; CHECK-LABEL: @test1
; CHECK-DAG: li [[REG1:[0-9]+]], 11375
-; CHECK-DAG: rldicl [[REG3:[0-9]+]], 4, 56, 0
+; CHECK-DAG: rotldi [[REG3:[0-9]+]], 4, 56
; CHECK-DAG: sldi [[REG2:[0-9]+]], [[REG1]], 19
; CHECK: and 3, [[REG3]], [[REG2]]
; CHECK: blr
@@ -60,7 +60,7 @@ entry:
; CHECK-LABEL: @test2
; CHECK-DAG: lis [[REG1:[0-9]+]], 474
-; CHECK-DAG: rldicl [[REG5:[0-9]+]], 4, 58, 0
+; CHECK-DAG: rotldi [[REG5:[0-9]+]], 4, 58
; CHECK-DAG: ori [[REG2:[0-9]+]], [[REG1]], 3648
; CHECK-DAG: sldi [[REG3:[0-9]+]], [[REG2]], 32
; CHECK-DAG: oris [[REG4:[0-9]+]], [[REG3]], 25464
@@ -76,7 +76,7 @@ entry:
; CHECK-LABEL: @test3
; CHECK-DAG: lis [[REG1:[0-9]+]], 170
-; CHECK-DAG: rldicl [[REG4:[0-9]+]], 3, 34, 0
+; CHECK-DAG: rotldi [[REG4:[0-9]+]], 3, 34
; CHECK-DAG: ori [[REG2:[0-9]+]], [[REG1]], 22861
; CHECK-DAG: sldi [[REG3:[0-9]+]], [[REG2]], 34
; CHECK: and 3, [[REG4]], [[REG3]]
@@ -90,7 +90,7 @@ entry:
ret i64 %and
; CHECK-LABEL: @test4
-; CHECK: rldicl [[REG1:[0-9]+]], 4, 49, 0
+; CHECK: rotldi [[REG1:[0-9]+]], 4, 49
; CHECK: andis. 3, [[REG1]], 888
; CHECK: blr
}
@@ -103,7 +103,7 @@ entry:
; CHECK-LABEL: @test5
; CHECK-DAG: lis [[REG1:[0-9]+]], 3703
-; CHECK-DAG: rldicl [[REG4:[0-9]+]], 4, 12, 0
+; CHECK-DAG: rotldi [[REG4:[0-9]+]], 4, 12
; CHECK-DAG: ori [[REG2:[0-9]+]], [[REG1]], 35951
; CHECK-DAG: sldi [[REG3:[0-9]+]], [[REG2]], 19
; CHECK: and 3, [[REG4]], [[REG3]]
@@ -148,7 +148,7 @@ entry:
; CHECK-LABEL: @test8
; CHECK-DAG: lis [[REG1:[0-9]+]], 4
-; CHECK-DAG: rldicl [[REG4:[0-9]+]], 3, 63, 0
+; CHECK-DAG: rotldi [[REG4:[0-9]+]], 3, 63
; CHECK-DAG: ori [[REG2:[0-9]+]], [[REG1]], 60527
; CHECK-DAG: sldi [[REG3:[0-9]+]], [[REG2]], 19
; CHECK: and 3, [[REG4]], [[REG3]]
@@ -166,8 +166,8 @@ entry:
; CHECK-LABEL: @test9
; CHECK-DAG: lis [[REG1:[0-9]+]], 1440
-; CHECK-DAG: rldicl [[REG5:[0-9]+]], 4, 62, 0
-; CHECK-DAG: rldicl [[REG6:[0-9]+]], 4, 50, 0
+; CHECK-DAG: rotldi [[REG5:[0-9]+]], 4, 62
+; CHECK-DAG: rotldi [[REG6:[0-9]+]], 4, 50
; CHECK-DAG: ori [[REG2:[0-9]+]], [[REG1]], 4
; CHECK-DAG: rldimi [[REG6]], [[REG5]], 53, 0
; CHECK-DAG: sldi [[REG3:[0-9]+]], [[REG2]], 32
@@ -187,8 +187,8 @@ entry:
; CHECK-LABEL: @test10
; CHECK-DAG: lis [[REG1:[0-9]+]], 1
-; CHECK-DAG: rldicl [[REG6:[0-9]+]], 3, 25, 0
-; CHECK-DAG: rldicl [[REG7:[0-9]+]], 3, 37, 0
+; CHECK-DAG: rotldi [[REG6:[0-9]+]], 3, 25
+; CHECK-DAG: rotldi [[REG7:[0-9]+]], 3, 37
; CHECK-DAG: ori [[REG2:[0-9]+]], [[REG1]], 8183
; CHECK-DAG: ori [[REG3:[0-9]+]], [[REG1]], 50017
; CHECK-DAG: sldi [[REG4:[0-9]+]], [[REG2]], 25
diff --git a/test/CodeGen/PowerPC/branch-opt.ll b/test/CodeGen/PowerPC/branch-opt.ll
index dda1538f1cdf..d6928dde2a7d 100644
--- a/test/CodeGen/PowerPC/branch-opt.ll
+++ b/test/CodeGen/PowerPC/branch-opt.ll
@@ -11,7 +11,7 @@ entry:
br i1 %tmp1.upgrd.1, label %cond_false, label %bb5
bb: ; preds = %bb5, %bb
%indvar77 = phi i32 [ %indvar.next78, %bb ], [ 0, %bb5 ] ; <i32> [#uses=1]
- %tmp2 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
+ %tmp2 = tail call i32 (...) @bar( ) ; <i32> [#uses=0]
%indvar.next78 = add i32 %indvar77, 1 ; <i32> [#uses=2]
%exitcond79 = icmp eq i32 %indvar.next78, %X ; <i1> [#uses=1]
br i1 %exitcond79, label %cond_next48, label %bb
@@ -24,7 +24,7 @@ cond_false: ; preds = %entry
br i1 %tmp10.upgrd.2, label %cond_false20, label %bb16
bb12: ; preds = %bb16, %bb12
%indvar72 = phi i32 [ %indvar.next73, %bb12 ], [ 0, %bb16 ] ; <i32> [#uses=1]
- %tmp13 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
+ %tmp13 = tail call i32 (...) @bar( ) ; <i32> [#uses=0]
%indvar.next73 = add i32 %indvar72, 1 ; <i32> [#uses=2]
%exitcond74 = icmp eq i32 %indvar.next73, %Y ; <i1> [#uses=1]
br i1 %exitcond74, label %cond_next48, label %bb12
@@ -37,7 +37,7 @@ cond_false20: ; preds = %cond_false
br i1 %tmp23.upgrd.3, label %cond_false33, label %bb29
bb25: ; preds = %bb29, %bb25
%indvar67 = phi i32 [ %indvar.next68, %bb25 ], [ 0, %bb29 ] ; <i32> [#uses=1]
- %tmp26 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
+ %tmp26 = tail call i32 (...) @bar( ) ; <i32> [#uses=0]
%indvar.next68 = add i32 %indvar67, 1 ; <i32> [#uses=2]
%exitcond69 = icmp eq i32 %indvar.next68, %Z ; <i1> [#uses=1]
br i1 %exitcond69, label %cond_next48, label %bb25
@@ -49,7 +49,7 @@ cond_false33: ; preds = %cond_false20
%tmp36.upgrd.4 = icmp eq i32 %tmp36, 0 ; <i1> [#uses=1]
br i1 %tmp36.upgrd.4, label %cond_next48, label %bb42
bb38: ; preds = %bb42
- %tmp39 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
+ %tmp39 = tail call i32 (...) @bar( ) ; <i32> [#uses=0]
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
br label %bb42
bb42: ; preds = %bb38, %cond_false33
@@ -62,7 +62,7 @@ cond_next48: ; preds = %bb42, %cond_false33, %bb29, %bb25, %bb16, %bb12, %bb5,
%tmp50 = icmp eq i32 %W_addr.1, 0 ; <i1> [#uses=1]
br i1 %tmp50, label %UnifiedReturnBlock, label %cond_true51
cond_true51: ; preds = %cond_next48
- %tmp52 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
+ %tmp52 = tail call i32 (...) @bar( ) ; <i32> [#uses=0]
ret void
UnifiedReturnBlock: ; preds = %cond_next48
ret void
diff --git a/test/CodeGen/PowerPC/bswap-load-store.ll b/test/CodeGen/PowerPC/bswap-load-store.ll
index 53bbc52167c4..cee1f0cdaa99 100644
--- a/test/CodeGen/PowerPC/bswap-load-store.ll
+++ b/test/CodeGen/PowerPC/bswap-load-store.ll
@@ -5,7 +5,7 @@
define void @STWBRX(i32 %i, i8* %ptr, i32 %off) {
- %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %ptr, i32 %off ; <i8*> [#uses=1]
%tmp1.upgrd.1 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1]
%tmp13 = tail call i32 @llvm.bswap.i32( i32 %i ) ; <i32> [#uses=1]
store i32 %tmp13, i32* %tmp1.upgrd.1
@@ -13,15 +13,15 @@ define void @STWBRX(i32 %i, i8* %ptr, i32 %off) {
}
define i32 @LWBRX(i8* %ptr, i32 %off) {
- %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %ptr, i32 %off ; <i8*> [#uses=1]
%tmp1.upgrd.2 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1]
- %tmp = load i32* %tmp1.upgrd.2 ; <i32> [#uses=1]
+ %tmp = load i32, i32* %tmp1.upgrd.2 ; <i32> [#uses=1]
%tmp14 = tail call i32 @llvm.bswap.i32( i32 %tmp ) ; <i32> [#uses=1]
ret i32 %tmp14
}
define void @STHBRX(i16 %s, i8* %ptr, i32 %off) {
- %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %ptr, i32 %off ; <i8*> [#uses=1]
%tmp1.upgrd.3 = bitcast i8* %tmp1 to i16* ; <i16*> [#uses=1]
%tmp5 = call i16 @llvm.bswap.i16( i16 %s ) ; <i16> [#uses=1]
store i16 %tmp5, i16* %tmp1.upgrd.3
@@ -29,15 +29,15 @@ define void @STHBRX(i16 %s, i8* %ptr, i32 %off) {
}
define i16 @LHBRX(i8* %ptr, i32 %off) {
- %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %ptr, i32 %off ; <i8*> [#uses=1]
%tmp1.upgrd.4 = bitcast i8* %tmp1 to i16* ; <i16*> [#uses=1]
- %tmp = load i16* %tmp1.upgrd.4 ; <i16> [#uses=1]
+ %tmp = load i16, i16* %tmp1.upgrd.4 ; <i16> [#uses=1]
%tmp6 = call i16 @llvm.bswap.i16( i16 %tmp ) ; <i16> [#uses=1]
ret i16 %tmp6
}
define void @STDBRX(i64 %i, i8* %ptr, i64 %off) {
- %tmp1 = getelementptr i8* %ptr, i64 %off ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %ptr, i64 %off ; <i8*> [#uses=1]
%tmp1.upgrd.1 = bitcast i8* %tmp1 to i64* ; <i64*> [#uses=1]
%tmp13 = tail call i64 @llvm.bswap.i64( i64 %i ) ; <i64> [#uses=1]
store i64 %tmp13, i64* %tmp1.upgrd.1
@@ -45,9 +45,9 @@ define void @STDBRX(i64 %i, i8* %ptr, i64 %off) {
}
define i64 @LDBRX(i8* %ptr, i64 %off) {
- %tmp1 = getelementptr i8* %ptr, i64 %off ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %ptr, i64 %off ; <i8*> [#uses=1]
%tmp1.upgrd.2 = bitcast i8* %tmp1 to i64* ; <i64*> [#uses=1]
- %tmp = load i64* %tmp1.upgrd.2 ; <i64> [#uses=1]
+ %tmp = load i64, i64* %tmp1.upgrd.2 ; <i64> [#uses=1]
%tmp14 = tail call i64 @llvm.bswap.i64( i64 %tmp ) ; <i64> [#uses=1]
ret i64 %tmp14
}
diff --git a/test/CodeGen/PowerPC/buildvec_canonicalize.ll b/test/CodeGen/PowerPC/buildvec_canonicalize.ll
index b70671bfd5cb..6c591912d9b2 100644
--- a/test/CodeGen/PowerPC/buildvec_canonicalize.ll
+++ b/test/CodeGen/PowerPC/buildvec_canonicalize.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -mattr=-vsx -march=ppc32 -mattr=+altivec --enable-unsafe-fp-math | FileCheck %s
define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
- %tmp = load <4 x float>* %P3 ; <<4 x float>> [#uses=1]
- %tmp3 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
+ %tmp = load <4 x float>, <4 x float>* %P3 ; <<4 x float>> [#uses=1]
+ %tmp3 = load <4 x float>, <4 x float>* %P1 ; <<4 x float>> [#uses=1]
%tmp4 = fmul <4 x float> %tmp, %tmp3 ; <<4 x float>> [#uses=1]
store <4 x float> %tmp4, <4 x float>* %P3
store <4 x float> zeroinitializer, <4 x float>* %P1
diff --git a/test/CodeGen/PowerPC/byval-aliased.ll b/test/CodeGen/PowerPC/byval-aliased.ll
index 9ef2f02f036a..8668e64d4d51 100644
--- a/test/CodeGen/PowerPC/byval-aliased.ll
+++ b/test/CodeGen/PowerPC/byval-aliased.ll
@@ -7,8 +7,8 @@ target triple = "powerpc-apple-macosx10.5.0"
; Function Attrs: nounwind ssp
define void @foo(%struct.sm* byval %s) #0 {
entry:
- %a = getelementptr inbounds %struct.sm* %s, i32 0, i32 0
- %0 = load i8* %a, align 1
+ %a = getelementptr inbounds %struct.sm, %struct.sm* %s, i32 0, i32 0
+ %0 = load i8, i8* %a, align 1
%conv2 = zext i8 %0 to i32
%add = add nuw nsw i32 %conv2, 3
%conv1 = trunc i32 %add to i8
diff --git a/test/CodeGen/PowerPC/cmpb-ppc32.ll b/test/CodeGen/PowerPC/cmpb-ppc32.ll
index 639ed887b978..b5cb0935d886 100644
--- a/test/CodeGen/PowerPC/cmpb-ppc32.ll
+++ b/test/CodeGen/PowerPC/cmpb-ppc32.ll
@@ -17,7 +17,7 @@ entry:
; CHECK-LABEL: @test16
; CHECK: cmpb [[REG1:[0-9]+]], 4, 3
-; CHECK: rlwinm 3, [[REG1]], 0, 16, 31
+; CHECK: clrlwi 3, [[REG1]], 16
; CHECK: blr
}
diff --git a/test/CodeGen/PowerPC/cmpb.ll b/test/CodeGen/PowerPC/cmpb.ll
index 7d0c0ab3316b..d1c951df962e 100644
--- a/test/CodeGen/PowerPC/cmpb.ll
+++ b/test/CodeGen/PowerPC/cmpb.ll
@@ -17,7 +17,7 @@ entry:
; CHECK-LABEL: @test16
; CHECK: cmpb [[REG1:[0-9]+]], 4, 3
-; CHECK: rldicl 3, [[REG1]], 0, 48
+; CHECK: clrldi 3, [[REG1]], 48
; CHECK: blr
}
@@ -73,7 +73,7 @@ entry:
; CHECK-LABEL: @test16p3
; CHECK: cmpb [[REG1:[0-9]+]], 4, 3
-; CHECK: rldicl [[REG2:[0-9]+]], [[REG1]], 0, 55
+; CHECK: clrldi [[REG2:[0-9]+]], [[REG1]], 55
; CHECK: xori 3, [[REG2]], 1280
; CHECK: blr
}
@@ -99,7 +99,7 @@ entry:
; CHECK-LABEL: @test32
; CHECK: cmpb [[REG1:[0-9]+]], 4, 3
-; CHECK: rldicl 3, [[REG1]], 0, 32
+; CHECK: clrldi 3, [[REG1]], 32
; CHECK: blr
}
diff --git a/test/CodeGen/PowerPC/code-align.ll b/test/CodeGen/PowerPC/code-align.ll
index 306230be5005..19d1b236ce0d 100644
--- a/test/CodeGen/PowerPC/code-align.ll
+++ b/test/CodeGen/PowerPC/code-align.ll
@@ -44,6 +44,9 @@ entry:
; GENERIC-NOT: .align
; BASIC: .align 4
; PWR: .align 4
+; GENERIC: lwzu
+; BASIC: lwzu
+; PWR: lwzu
; GENERIC: bdnz
; BASIC: bdnz
; PWR: bdnz
@@ -51,17 +54,19 @@ entry:
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%induction45 = or i64 %index, 1
- %0 = getelementptr inbounds i32* %a, i64 %index
- %1 = getelementptr inbounds i32* %a, i64 %induction45
- %2 = load i32* %0, align 4
- %3 = load i32* %1, align 4
+ %0 = getelementptr inbounds i32, i32* %a, i64 %index
+ %1 = getelementptr inbounds i32, i32* %a, i64 %induction45
+ %2 = load i32, i32* %0, align 4
+ %3 = load i32, i32* %1, align 4
%4 = add nsw i32 %2, 4
%5 = add nsw i32 %3, 4
- store i32 %4, i32* %0, align 4
- store i32 %5, i32* %1, align 4
+ %6 = mul nsw i32 %4, 3
+ %7 = mul nsw i32 %5, 3
+ store i32 %6, i32* %0, align 4
+ store i32 %7, i32* %1, align 4
%index.next = add i64 %index, 2
- %6 = icmp eq i64 %index.next, 2048
- br i1 %6, label %for.end, label %vector.body
+ %8 = icmp eq i64 %index.next, 2048
+ br i1 %8, label %for.end, label %vector.body
for.end: ; preds = %vector.body
ret void
@@ -87,10 +92,11 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, 4
- store i32 %add, i32* %arrayidx, align 4
+ %mul = mul nsw i32 %add, 3
+ store i32 %mul, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 2048
br i1 %exitcond, label %for.end, label %for.body
diff --git a/test/CodeGen/PowerPC/compare-simm.ll b/test/CodeGen/PowerPC/compare-simm.ll
index 94c5c0290f58..12eff7bb1813 100644
--- a/test/CodeGen/PowerPC/compare-simm.ll
+++ b/test/CodeGen/PowerPC/compare-simm.ll
@@ -1,7 +1,9 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 | \
-; RUN: grep "cmpwi cr0, r3, -1"
+; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 | FileCheck %s
define i32 @test(i32 %x) nounwind {
+; CHECK-LABEL: @test
+; CHECK: cmpwi r3, -1
+
%c = icmp eq i32 %x, -1
br i1 %c, label %T, label %F
T:
diff --git a/test/CodeGen/PowerPC/complex-return.ll b/test/CodeGen/PowerPC/complex-return.ll
index 9d25e619d2e5..e419f0799522 100644
--- a/test/CodeGen/PowerPC/complex-return.ll
+++ b/test/CodeGen/PowerPC/complex-return.ll
@@ -7,19 +7,19 @@ define { ppc_fp128, ppc_fp128 } @foo() nounwind {
entry:
%retval = alloca { ppc_fp128, ppc_fp128 }, align 16
%x = alloca { ppc_fp128, ppc_fp128 }, align 16
- %real = getelementptr inbounds { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 0
- %imag = getelementptr inbounds { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 1
+ %real = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 0
+ %imag = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 1
store ppc_fp128 0xM400C0000000000000000000000000000, ppc_fp128* %real
store ppc_fp128 0xMC00547AE147AE1483CA47AE147AE147A, ppc_fp128* %imag
- %x.realp = getelementptr inbounds { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 0
- %x.real = load ppc_fp128* %x.realp
- %x.imagp = getelementptr inbounds { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 1
- %x.imag = load ppc_fp128* %x.imagp
- %real1 = getelementptr inbounds { ppc_fp128, ppc_fp128 }* %retval, i32 0, i32 0
- %imag2 = getelementptr inbounds { ppc_fp128, ppc_fp128 }* %retval, i32 0, i32 1
+ %x.realp = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 0
+ %x.real = load ppc_fp128, ppc_fp128* %x.realp
+ %x.imagp = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 1
+ %x.imag = load ppc_fp128, ppc_fp128* %x.imagp
+ %real1 = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %retval, i32 0, i32 0
+ %imag2 = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %retval, i32 0, i32 1
store ppc_fp128 %x.real, ppc_fp128* %real1
store ppc_fp128 %x.imag, ppc_fp128* %imag2
- %0 = load { ppc_fp128, ppc_fp128 }* %retval
+ %0 = load { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %retval
ret { ppc_fp128, ppc_fp128 } %0
}
@@ -33,19 +33,19 @@ define { float, float } @oof() nounwind {
entry:
%retval = alloca { float, float }, align 4
%x = alloca { float, float }, align 4
- %real = getelementptr inbounds { float, float }* %x, i32 0, i32 0
- %imag = getelementptr inbounds { float, float }* %x, i32 0, i32 1
+ %real = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 0
+ %imag = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 1
store float 3.500000e+00, float* %real
store float 0xC00547AE20000000, float* %imag
- %x.realp = getelementptr inbounds { float, float }* %x, i32 0, i32 0
- %x.real = load float* %x.realp
- %x.imagp = getelementptr inbounds { float, float }* %x, i32 0, i32 1
- %x.imag = load float* %x.imagp
- %real1 = getelementptr inbounds { float, float }* %retval, i32 0, i32 0
- %imag2 = getelementptr inbounds { float, float }* %retval, i32 0, i32 1
+ %x.realp = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 0
+ %x.real = load float, float* %x.realp
+ %x.imagp = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 1
+ %x.imag = load float, float* %x.imagp
+ %real1 = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
+ %imag2 = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
store float %x.real, float* %real1
store float %x.imag, float* %imag2
- %0 = load { float, float }* %retval
+ %0 = load { float, float }, { float, float }* %retval
ret { float, float } %0
}
diff --git a/test/CodeGen/PowerPC/cr-spills.ll b/test/CodeGen/PowerPC/cr-spills.ll
index be0dbad6289e..1a903115c0da 100644
--- a/test/CodeGen/PowerPC/cr-spills.ll
+++ b/test/CodeGen/PowerPC/cr-spills.ll
@@ -31,7 +31,7 @@ land.rhs: ; preds = %land.lhs.true, %lan
land.end: ; preds = %land.rhs, %land.lhs.true, %entry
%0 = phi i1 [ %tobool21, %land.rhs ], [ false, %land.lhs.true ], [ false, %entry ]
- %cond = load i32** undef, align 8
+ %cond = load i32*, i32** undef, align 8
br i1 undef, label %if.then95, label %for.body.lr.ph
if.then95: ; preds = %land.end
@@ -52,12 +52,12 @@ for.cond286.preheader: ; preds = %for.body252
br label %for.cond290.preheader
for.cond290.preheader: ; preds = %for.end520, %for.cond286.preheader
- %srcptr.31595 = phi i16* [ getelementptr inbounds ([768 x i16]* @SetupFastFullPelSearch.orig_pels, i64 0, i64 0), %for.cond286.preheader ], [ null, %for.end520 ]
- %1 = load i32* undef, align 4
- %2 = load i32* @weight_luma, align 4
- %3 = load i32* @wp_luma_round, align 4
- %4 = load i32* @luma_log_weight_denom, align 4
- %5 = load i32* @offset_luma, align 4
+ %srcptr.31595 = phi i16* [ getelementptr inbounds ([768 x i16], [768 x i16]* @SetupFastFullPelSearch.orig_pels, i64 0, i64 0), %for.cond286.preheader ], [ null, %for.end520 ]
+ %1 = load i32, i32* undef, align 4
+ %2 = load i32, i32* @weight_luma, align 4
+ %3 = load i32, i32* @wp_luma_round, align 4
+ %4 = load i32, i32* @luma_log_weight_denom, align 4
+ %5 = load i32, i32* @offset_luma, align 4
%incdec.ptr502.sum = add i64 undef, 16
br label %for.body293
@@ -68,7 +68,7 @@ for.body293: ; preds = %for.body293, %for.c
%LineSadBlk1.01587 = phi i32 [ 0, %for.cond290.preheader ], [ %add402, %for.body293 ]
%LineSadBlk3.01586 = phi i32 [ 0, %for.cond290.preheader ], [ %add514, %for.body293 ]
%LineSadBlk2.01585 = phi i32 [ 0, %for.cond290.preheader ], [ %add458, %for.body293 ]
- %6 = load i16* %refptr.11590, align 2
+ %6 = load i16, i16* %refptr.11590, align 2
%conv294 = zext i16 %6 to i32
%mul295 = mul nsw i32 %conv294, %2
%add296 = add nsw i32 %mul295, %3
@@ -78,16 +78,16 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i.i1514 = select i1 %cmp.i.i1513, i32 %add297, i32 0
%cmp.i4.i1515 = icmp slt i32 %cond.i.i1514, %1
%cond.i5.i1516 = select i1 %cmp.i4.i1515, i32 %cond.i.i1514, i32 %1
- %7 = load i16* %srcptr.41591, align 2
+ %7 = load i16, i16* %srcptr.41591, align 2
%conv300 = zext i16 %7 to i32
%sub301 = sub nsw i32 %cond.i5.i1516, %conv300
%idxprom302 = sext i32 %sub301 to i64
- %arrayidx303 = getelementptr inbounds i32* %cond, i64 %idxprom302
- %8 = load i32* %arrayidx303, align 4
+ %arrayidx303 = getelementptr inbounds i32, i32* %cond, i64 %idxprom302
+ %8 = load i32, i32* %arrayidx303, align 4
%add304 = add nsw i32 %8, %LineSadBlk0.01588
- %9 = load i32* undef, align 4
+ %9 = load i32, i32* undef, align 4
%add318 = add nsw i32 %add304, %9
- %10 = load i16* undef, align 2
+ %10 = load i16, i16* undef, align 2
%conv321 = zext i16 %10 to i32
%mul322 = mul nsw i32 %conv321, %2
%add323 = add nsw i32 %mul322, %3
@@ -99,23 +99,23 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i5.i1508 = select i1 %cmp.i4.i1507, i32 %cond.i.i1506, i32 %1
%sub329 = sub nsw i32 %cond.i5.i1508, 0
%idxprom330 = sext i32 %sub329 to i64
- %arrayidx331 = getelementptr inbounds i32* %cond, i64 %idxprom330
- %11 = load i32* %arrayidx331, align 4
+ %arrayidx331 = getelementptr inbounds i32, i32* %cond, i64 %idxprom330
+ %11 = load i32, i32* %arrayidx331, align 4
%add332 = add nsw i32 %add318, %11
%cmp.i.i1501 = icmp sgt i32 undef, 0
%cond.i.i1502 = select i1 %cmp.i.i1501, i32 undef, i32 0
%cmp.i4.i1503 = icmp slt i32 %cond.i.i1502, %1
%cond.i5.i1504 = select i1 %cmp.i4.i1503, i32 %cond.i.i1502, i32 %1
- %incdec.ptr341 = getelementptr inbounds i16* %srcptr.41591, i64 4
- %12 = load i16* null, align 2
+ %incdec.ptr341 = getelementptr inbounds i16, i16* %srcptr.41591, i64 4
+ %12 = load i16, i16* null, align 2
%conv342 = zext i16 %12 to i32
%sub343 = sub nsw i32 %cond.i5.i1504, %conv342
%idxprom344 = sext i32 %sub343 to i64
- %arrayidx345 = getelementptr inbounds i32* %cond, i64 %idxprom344
- %13 = load i32* %arrayidx345, align 4
+ %arrayidx345 = getelementptr inbounds i32, i32* %cond, i64 %idxprom344
+ %13 = load i32, i32* %arrayidx345, align 4
%add346 = add nsw i32 %add332, %13
- %incdec.ptr348 = getelementptr inbounds i16* %refptr.11590, i64 5
- %14 = load i16* null, align 2
+ %incdec.ptr348 = getelementptr inbounds i16, i16* %refptr.11590, i64 5
+ %14 = load i16, i16* null, align 2
%conv349 = zext i16 %14 to i32
%mul350 = mul nsw i32 %conv349, %2
%add351 = add nsw i32 %mul350, %3
@@ -125,16 +125,16 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i.i1498 = select i1 %cmp.i.i1497, i32 %add353, i32 0
%cmp.i4.i1499 = icmp slt i32 %cond.i.i1498, %1
%cond.i5.i1500 = select i1 %cmp.i4.i1499, i32 %cond.i.i1498, i32 %1
- %incdec.ptr355 = getelementptr inbounds i16* %srcptr.41591, i64 5
- %15 = load i16* %incdec.ptr341, align 2
+ %incdec.ptr355 = getelementptr inbounds i16, i16* %srcptr.41591, i64 5
+ %15 = load i16, i16* %incdec.ptr341, align 2
%conv356 = zext i16 %15 to i32
%sub357 = sub nsw i32 %cond.i5.i1500, %conv356
%idxprom358 = sext i32 %sub357 to i64
- %arrayidx359 = getelementptr inbounds i32* %cond, i64 %idxprom358
- %16 = load i32* %arrayidx359, align 4
+ %arrayidx359 = getelementptr inbounds i32, i32* %cond, i64 %idxprom358
+ %16 = load i32, i32* %arrayidx359, align 4
%add360 = add nsw i32 %16, %LineSadBlk1.01587
- %incdec.ptr362 = getelementptr inbounds i16* %refptr.11590, i64 6
- %17 = load i16* %incdec.ptr348, align 2
+ %incdec.ptr362 = getelementptr inbounds i16, i16* %refptr.11590, i64 6
+ %17 = load i16, i16* %incdec.ptr348, align 2
%conv363 = zext i16 %17 to i32
%mul364 = mul nsw i32 %conv363, %2
%add365 = add nsw i32 %mul364, %3
@@ -144,16 +144,16 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i.i1494 = select i1 %cmp.i.i1493, i32 %add367, i32 0
%cmp.i4.i1495 = icmp slt i32 %cond.i.i1494, %1
%cond.i5.i1496 = select i1 %cmp.i4.i1495, i32 %cond.i.i1494, i32 %1
- %incdec.ptr369 = getelementptr inbounds i16* %srcptr.41591, i64 6
- %18 = load i16* %incdec.ptr355, align 2
+ %incdec.ptr369 = getelementptr inbounds i16, i16* %srcptr.41591, i64 6
+ %18 = load i16, i16* %incdec.ptr355, align 2
%conv370 = zext i16 %18 to i32
%sub371 = sub nsw i32 %cond.i5.i1496, %conv370
%idxprom372 = sext i32 %sub371 to i64
- %arrayidx373 = getelementptr inbounds i32* %cond, i64 %idxprom372
- %19 = load i32* %arrayidx373, align 4
+ %arrayidx373 = getelementptr inbounds i32, i32* %cond, i64 %idxprom372
+ %19 = load i32, i32* %arrayidx373, align 4
%add374 = add nsw i32 %add360, %19
- %incdec.ptr376 = getelementptr inbounds i16* %refptr.11590, i64 7
- %20 = load i16* %incdec.ptr362, align 2
+ %incdec.ptr376 = getelementptr inbounds i16, i16* %refptr.11590, i64 7
+ %20 = load i16, i16* %incdec.ptr362, align 2
%conv377 = zext i16 %20 to i32
%mul378 = mul nsw i32 %conv377, %2
%add379 = add nsw i32 %mul378, %3
@@ -163,15 +163,15 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i.i1490 = select i1 %cmp.i.i1489, i32 %add381, i32 0
%cmp.i4.i1491 = icmp slt i32 %cond.i.i1490, %1
%cond.i5.i1492 = select i1 %cmp.i4.i1491, i32 %cond.i.i1490, i32 %1
- %incdec.ptr383 = getelementptr inbounds i16* %srcptr.41591, i64 7
- %21 = load i16* %incdec.ptr369, align 2
+ %incdec.ptr383 = getelementptr inbounds i16, i16* %srcptr.41591, i64 7
+ %21 = load i16, i16* %incdec.ptr369, align 2
%conv384 = zext i16 %21 to i32
%sub385 = sub nsw i32 %cond.i5.i1492, %conv384
%idxprom386 = sext i32 %sub385 to i64
- %arrayidx387 = getelementptr inbounds i32* %cond, i64 %idxprom386
- %22 = load i32* %arrayidx387, align 4
+ %arrayidx387 = getelementptr inbounds i32, i32* %cond, i64 %idxprom386
+ %22 = load i32, i32* %arrayidx387, align 4
%add388 = add nsw i32 %add374, %22
- %23 = load i16* %incdec.ptr376, align 2
+ %23 = load i16, i16* %incdec.ptr376, align 2
%conv391 = zext i16 %23 to i32
%mul392 = mul nsw i32 %conv391, %2
%add395 = add nsw i32 0, %5
@@ -179,26 +179,26 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i.i1486 = select i1 %cmp.i.i1485, i32 %add395, i32 0
%cmp.i4.i1487 = icmp slt i32 %cond.i.i1486, %1
%cond.i5.i1488 = select i1 %cmp.i4.i1487, i32 %cond.i.i1486, i32 %1
- %incdec.ptr397 = getelementptr inbounds i16* %srcptr.41591, i64 8
- %24 = load i16* %incdec.ptr383, align 2
+ %incdec.ptr397 = getelementptr inbounds i16, i16* %srcptr.41591, i64 8
+ %24 = load i16, i16* %incdec.ptr383, align 2
%conv398 = zext i16 %24 to i32
%sub399 = sub nsw i32 %cond.i5.i1488, %conv398
%idxprom400 = sext i32 %sub399 to i64
- %arrayidx401 = getelementptr inbounds i32* %cond, i64 %idxprom400
- %25 = load i32* %arrayidx401, align 4
+ %arrayidx401 = getelementptr inbounds i32, i32* %cond, i64 %idxprom400
+ %25 = load i32, i32* %arrayidx401, align 4
%add402 = add nsw i32 %add388, %25
- %incdec.ptr404 = getelementptr inbounds i16* %refptr.11590, i64 9
+ %incdec.ptr404 = getelementptr inbounds i16, i16* %refptr.11590, i64 9
%cmp.i4.i1483 = icmp slt i32 undef, %1
%cond.i5.i1484 = select i1 %cmp.i4.i1483, i32 undef, i32 %1
- %26 = load i16* %incdec.ptr397, align 2
+ %26 = load i16, i16* %incdec.ptr397, align 2
%conv412 = zext i16 %26 to i32
%sub413 = sub nsw i32 %cond.i5.i1484, %conv412
%idxprom414 = sext i32 %sub413 to i64
- %arrayidx415 = getelementptr inbounds i32* %cond, i64 %idxprom414
- %27 = load i32* %arrayidx415, align 4
+ %arrayidx415 = getelementptr inbounds i32, i32* %cond, i64 %idxprom414
+ %27 = load i32, i32* %arrayidx415, align 4
%add416 = add nsw i32 %27, %LineSadBlk2.01585
- %incdec.ptr418 = getelementptr inbounds i16* %refptr.11590, i64 10
- %28 = load i16* %incdec.ptr404, align 2
+ %incdec.ptr418 = getelementptr inbounds i16, i16* %refptr.11590, i64 10
+ %28 = load i16, i16* %incdec.ptr404, align 2
%conv419 = zext i16 %28 to i32
%mul420 = mul nsw i32 %conv419, %2
%add421 = add nsw i32 %mul420, %3
@@ -208,14 +208,14 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i.i1478 = select i1 %cmp.i.i1477, i32 %add423, i32 0
%cmp.i4.i1479 = icmp slt i32 %cond.i.i1478, %1
%cond.i5.i1480 = select i1 %cmp.i4.i1479, i32 %cond.i.i1478, i32 %1
- %incdec.ptr425 = getelementptr inbounds i16* %srcptr.41591, i64 10
+ %incdec.ptr425 = getelementptr inbounds i16, i16* %srcptr.41591, i64 10
%sub427 = sub nsw i32 %cond.i5.i1480, 0
%idxprom428 = sext i32 %sub427 to i64
- %arrayidx429 = getelementptr inbounds i32* %cond, i64 %idxprom428
- %29 = load i32* %arrayidx429, align 4
+ %arrayidx429 = getelementptr inbounds i32, i32* %cond, i64 %idxprom428
+ %29 = load i32, i32* %arrayidx429, align 4
%add430 = add nsw i32 %add416, %29
- %incdec.ptr432 = getelementptr inbounds i16* %refptr.11590, i64 11
- %30 = load i16* %incdec.ptr418, align 2
+ %incdec.ptr432 = getelementptr inbounds i16, i16* %refptr.11590, i64 11
+ %30 = load i16, i16* %incdec.ptr418, align 2
%conv433 = zext i16 %30 to i32
%mul434 = mul nsw i32 %conv433, %2
%add435 = add nsw i32 %mul434, %3
@@ -225,15 +225,15 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i.i1474 = select i1 %cmp.i.i1473, i32 %add437, i32 0
%cmp.i4.i1475 = icmp slt i32 %cond.i.i1474, %1
%cond.i5.i1476 = select i1 %cmp.i4.i1475, i32 %cond.i.i1474, i32 %1
- %31 = load i16* %incdec.ptr425, align 2
+ %31 = load i16, i16* %incdec.ptr425, align 2
%conv440 = zext i16 %31 to i32
%sub441 = sub nsw i32 %cond.i5.i1476, %conv440
%idxprom442 = sext i32 %sub441 to i64
- %arrayidx443 = getelementptr inbounds i32* %cond, i64 %idxprom442
- %32 = load i32* %arrayidx443, align 4
+ %arrayidx443 = getelementptr inbounds i32, i32* %cond, i64 %idxprom442
+ %32 = load i32, i32* %arrayidx443, align 4
%add444 = add nsw i32 %add430, %32
- %incdec.ptr446 = getelementptr inbounds i16* %refptr.11590, i64 12
- %33 = load i16* %incdec.ptr432, align 2
+ %incdec.ptr446 = getelementptr inbounds i16, i16* %refptr.11590, i64 12
+ %33 = load i16, i16* %incdec.ptr432, align 2
%conv447 = zext i16 %33 to i32
%mul448 = mul nsw i32 %conv447, %2
%add449 = add nsw i32 %mul448, %3
@@ -243,16 +243,16 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i.i1470 = select i1 %cmp.i.i1469, i32 %add451, i32 0
%cmp.i4.i1471 = icmp slt i32 %cond.i.i1470, %1
%cond.i5.i1472 = select i1 %cmp.i4.i1471, i32 %cond.i.i1470, i32 %1
- %incdec.ptr453 = getelementptr inbounds i16* %srcptr.41591, i64 12
- %34 = load i16* undef, align 2
+ %incdec.ptr453 = getelementptr inbounds i16, i16* %srcptr.41591, i64 12
+ %34 = load i16, i16* undef, align 2
%conv454 = zext i16 %34 to i32
%sub455 = sub nsw i32 %cond.i5.i1472, %conv454
%idxprom456 = sext i32 %sub455 to i64
- %arrayidx457 = getelementptr inbounds i32* %cond, i64 %idxprom456
- %35 = load i32* %arrayidx457, align 4
+ %arrayidx457 = getelementptr inbounds i32, i32* %cond, i64 %idxprom456
+ %35 = load i32, i32* %arrayidx457, align 4
%add458 = add nsw i32 %add444, %35
- %incdec.ptr460 = getelementptr inbounds i16* %refptr.11590, i64 13
- %36 = load i16* %incdec.ptr446, align 2
+ %incdec.ptr460 = getelementptr inbounds i16, i16* %refptr.11590, i64 13
+ %36 = load i16, i16* %incdec.ptr446, align 2
%conv461 = zext i16 %36 to i32
%mul462 = mul nsw i32 %conv461, %2
%add463 = add nsw i32 %mul462, %3
@@ -262,15 +262,15 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i.i1466 = select i1 %cmp.i.i1465, i32 %add465, i32 0
%cmp.i4.i1467 = icmp slt i32 %cond.i.i1466, %1
%cond.i5.i1468 = select i1 %cmp.i4.i1467, i32 %cond.i.i1466, i32 %1
- %incdec.ptr467 = getelementptr inbounds i16* %srcptr.41591, i64 13
- %37 = load i16* %incdec.ptr453, align 2
+ %incdec.ptr467 = getelementptr inbounds i16, i16* %srcptr.41591, i64 13
+ %37 = load i16, i16* %incdec.ptr453, align 2
%conv468 = zext i16 %37 to i32
%sub469 = sub nsw i32 %cond.i5.i1468, %conv468
%idxprom470 = sext i32 %sub469 to i64
- %arrayidx471 = getelementptr inbounds i32* %cond, i64 %idxprom470
- %38 = load i32* %arrayidx471, align 4
+ %arrayidx471 = getelementptr inbounds i32, i32* %cond, i64 %idxprom470
+ %38 = load i32, i32* %arrayidx471, align 4
%add472 = add nsw i32 %38, %LineSadBlk3.01586
- %incdec.ptr474 = getelementptr inbounds i16* %refptr.11590, i64 14
+ %incdec.ptr474 = getelementptr inbounds i16, i16* %refptr.11590, i64 14
%add477 = add nsw i32 0, %3
%shr478 = ashr i32 %add477, %4
%add479 = add nsw i32 %shr478, %5
@@ -278,16 +278,16 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i.i1462 = select i1 %cmp.i.i1461, i32 %add479, i32 0
%cmp.i4.i1463 = icmp slt i32 %cond.i.i1462, %1
%cond.i5.i1464 = select i1 %cmp.i4.i1463, i32 %cond.i.i1462, i32 %1
- %incdec.ptr481 = getelementptr inbounds i16* %srcptr.41591, i64 14
- %39 = load i16* %incdec.ptr467, align 2
+ %incdec.ptr481 = getelementptr inbounds i16, i16* %srcptr.41591, i64 14
+ %39 = load i16, i16* %incdec.ptr467, align 2
%conv482 = zext i16 %39 to i32
%sub483 = sub nsw i32 %cond.i5.i1464, %conv482
%idxprom484 = sext i32 %sub483 to i64
- %arrayidx485 = getelementptr inbounds i32* %cond, i64 %idxprom484
- %40 = load i32* %arrayidx485, align 4
+ %arrayidx485 = getelementptr inbounds i32, i32* %cond, i64 %idxprom484
+ %40 = load i32, i32* %arrayidx485, align 4
%add486 = add nsw i32 %add472, %40
- %incdec.ptr488 = getelementptr inbounds i16* %refptr.11590, i64 15
- %41 = load i16* %incdec.ptr474, align 2
+ %incdec.ptr488 = getelementptr inbounds i16, i16* %refptr.11590, i64 15
+ %41 = load i16, i16* %incdec.ptr474, align 2
%conv489 = zext i16 %41 to i32
%mul490 = mul nsw i32 %conv489, %2
%add491 = add nsw i32 %mul490, %3
@@ -297,15 +297,15 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i.i1458 = select i1 %cmp.i.i1457, i32 %add493, i32 0
%cmp.i4.i1459 = icmp slt i32 %cond.i.i1458, %1
%cond.i5.i1460 = select i1 %cmp.i4.i1459, i32 %cond.i.i1458, i32 %1
- %incdec.ptr495 = getelementptr inbounds i16* %srcptr.41591, i64 15
- %42 = load i16* %incdec.ptr481, align 2
+ %incdec.ptr495 = getelementptr inbounds i16, i16* %srcptr.41591, i64 15
+ %42 = load i16, i16* %incdec.ptr481, align 2
%conv496 = zext i16 %42 to i32
%sub497 = sub nsw i32 %cond.i5.i1460, %conv496
%idxprom498 = sext i32 %sub497 to i64
- %arrayidx499 = getelementptr inbounds i32* %cond, i64 %idxprom498
- %43 = load i32* %arrayidx499, align 4
+ %arrayidx499 = getelementptr inbounds i32, i32* %cond, i64 %idxprom498
+ %43 = load i32, i32* %arrayidx499, align 4
%add500 = add nsw i32 %add486, %43
- %44 = load i16* %incdec.ptr488, align 2
+ %44 = load i16, i16* %incdec.ptr488, align 2
%conv503 = zext i16 %44 to i32
%mul504 = mul nsw i32 %conv503, %2
%add505 = add nsw i32 %mul504, %3
@@ -315,14 +315,14 @@ for.body293: ; preds = %for.body293, %for.c
%cond.i.i1454 = select i1 %cmp.i.i1453, i32 %add507, i32 0
%cmp.i4.i1455 = icmp slt i32 %cond.i.i1454, %1
%cond.i5.i1456 = select i1 %cmp.i4.i1455, i32 %cond.i.i1454, i32 %1
- %45 = load i16* %incdec.ptr495, align 2
+ %45 = load i16, i16* %incdec.ptr495, align 2
%conv510 = zext i16 %45 to i32
%sub511 = sub nsw i32 %cond.i5.i1456, %conv510
%idxprom512 = sext i32 %sub511 to i64
- %arrayidx513 = getelementptr inbounds i32* %cond, i64 %idxprom512
- %46 = load i32* %arrayidx513, align 4
+ %arrayidx513 = getelementptr inbounds i32, i32* %cond, i64 %idxprom512
+ %46 = load i32, i32* %arrayidx513, align 4
%add514 = add nsw i32 %add500, %46
- %add.ptr517 = getelementptr inbounds i16* %refptr.11590, i64 %incdec.ptr502.sum
+ %add.ptr517 = getelementptr inbounds i16, i16* %refptr.11590, i64 %incdec.ptr502.sum
%exitcond1692 = icmp eq i32 undef, 4
br i1 %exitcond1692, label %for.end520, label %for.body293
diff --git a/test/CodeGen/PowerPC/cr1eq-no-extra-moves.ll b/test/CodeGen/PowerPC/cr1eq-no-extra-moves.ll
index afa1ea8e75a1..2b3ab9bcceaa 100644
--- a/test/CodeGen/PowerPC/cr1eq-no-extra-moves.ll
+++ b/test/CodeGen/PowerPC/cr1eq-no-extra-moves.ll
@@ -7,14 +7,14 @@ target triple = "powerpc-unknown-linux"
define void @test(i32 %count) nounwind {
entry:
; CHECK: crxor 6, 6, 6
- %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i32 1) nounwind
+ %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i32 1) nounwind
%cmp2 = icmp sgt i32 %count, 0
br i1 %cmp2, label %for.body, label %for.end
for.body: ; preds = %entry, %for.body
%i.03 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
; CHECK: crxor 6, 6, 6
- %call1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i32 1) nounwind
+ %call1 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i32 1) nounwind
%inc = add nsw i32 %i.03, 1
%exitcond = icmp eq i32 %inc, %count
br i1 %exitcond, label %for.end, label %for.body
diff --git a/test/CodeGen/PowerPC/cr1eq.ll b/test/CodeGen/PowerPC/cr1eq.ll
index fb9c9695d176..43cd4544424c 100644
--- a/test/CodeGen/PowerPC/cr1eq.ll
+++ b/test/CodeGen/PowerPC/cr1eq.ll
@@ -9,9 +9,9 @@ target triple = "powerpc-unknown-freebsd"
define void @foo() nounwind {
entry:
; CHECK: crxor 6, 6, 6
- %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 1)
+ %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 1)
; CHECK: creqv 6, 6, 6
- %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str1, i32 0, i32 0), double 1.100000e+00)
+ %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str1, i32 0, i32 0), double 1.100000e+00)
ret void
}
diff --git a/test/CodeGen/PowerPC/cr_spilling.ll b/test/CodeGen/PowerPC/cr_spilling.ll
index 8bd809fe5948..8ac4e7271ac6 100644
--- a/test/CodeGen/PowerPC/cr_spilling.ll
+++ b/test/CodeGen/PowerPC/cr_spilling.ll
@@ -10,7 +10,7 @@ entry:
br i1 false, label %cond_true94, label %cond_next99
cond_true94: ; preds = %entry
- %tmp98 = call i32 (i8*, ...)* @printf(i8* getelementptr ([3 x i8]* @.str242, i32 0, i32 0), i8* null) ; <i32> [#uses=0]
+ %tmp98 = call i32 (i8*, ...) @printf(i8* getelementptr ([3 x i8], [3 x i8]* @.str242, i32 0, i32 0), i8* null) ; <i32> [#uses=0]
%tmp20971 = icmp sgt i32 %tmp86, 0 ; <i1> [#uses=1]
br i1 %tmp20971, label %bb101, label %bb212
diff --git a/test/CodeGen/PowerPC/crbit-asm.ll b/test/CodeGen/PowerPC/crbit-asm.ll
index 373e334f02bd..36de3435a081 100644
--- a/test/CodeGen/PowerPC/crbit-asm.ll
+++ b/test/CodeGen/PowerPC/crbit-asm.ll
@@ -12,7 +12,7 @@ entry:
; CHECK-LABEL: @testi1
; CHECK-DAG: andi. {{[0-9]+}}, 3, 1
; CHECK-DAG: li [[REG1:[0-9]+]], 0
-; CHECK-DAG: cror [[REG2:[0-9]+]], 1, 1
+; CHECK-DAG: crmove [[REG2:[0-9]+]], 1
; CHECK-DAG: andi. {{[0-9]+}}, 4, 1
; CHECK-DAG: crand [[REG3:[0-9]+]], [[REG2]], 1
; CHECK-DAG: li [[REG4:[0-9]+]], 1
@@ -31,7 +31,7 @@ entry:
; CHECK-LABEL: @testi32
; CHECK-DAG: andi. {{[0-9]+}}, 3, 1
; CHECK-DAG: li [[REG1:[0-9]+]], 0
-; CHECK-DAG: cror [[REG2:[0-9]+]], 1, 1
+; CHECK-DAG: crmove [[REG2:[0-9]+]], 1
; CHECK-DAG: andi. {{[0-9]+}}, 4, 1
; CHECK-DAG: crand [[REG3:[0-9]+]], [[REG2]], 1
; CHECK-DAG: li [[REG4:[0-9]+]], -1
@@ -47,7 +47,7 @@ entry:
; CHECK-LABEL: @testi8
; CHECK-DAG: andi. {{[0-9]+}}, 3, 1
; CHECK-DAG: li [[REG1:[0-9]+]], 0
-; CHECK-DAG: cror [[REG2:[0-9]+]], 1, 1
+; CHECK-DAG: crmove [[REG2:[0-9]+]], 1
; CHECK-DAG: andi. {{[0-9]+}}, 4, 1
; CHECK-DAG: crand [[REG3:[0-9]+]], [[REG2]], 1
; CHECK-DAG: li [[REG4:[0-9]+]], 1
diff --git a/test/CodeGen/PowerPC/crbits.ll b/test/CodeGen/PowerPC/crbits.ll
index 06e90019db76..ab8655c5c8c6 100644
--- a/test/CodeGen/PowerPC/crbits.ll
+++ b/test/CodeGen/PowerPC/crbits.ll
@@ -107,7 +107,7 @@ entry:
; CHECK-LABEL: @test6
; CHECK-DAG: andi. {{[0-9]+}}, 3, 1
; CHECK-DAG: cmpwi {{[0-9]+}}, 5, -2
-; CHECK-DAG: cror [[REG1:[0-9]+]], 1, 1
+; CHECK-DAG: crmove [[REG1:[0-9]+]], 1
; CHECK-DAG: andi. {{[0-9]+}}, 4, 1
; CHECK-DAG: li [[REG2:[0-9]+]], 1
; CHECK-DAG: crorc [[REG4:[0-9]+]], 1,
@@ -145,7 +145,7 @@ entry:
define zeroext i32 @exttest8() #0 {
entry:
- %v0 = load i64* undef, align 8
+ %v0 = load i64, i64* undef, align 8
%sub = sub i64 80, %v0
%div = lshr i64 %sub, 1
%conv13 = trunc i64 %div to i32
diff --git a/test/CodeGen/PowerPC/crsave.ll b/test/CodeGen/PowerPC/crsave.ll
index 602ba94dc094..8121e1b6e639 100644
--- a/test/CodeGen/PowerPC/crsave.ll
+++ b/test/CodeGen/PowerPC/crsave.ll
@@ -9,7 +9,7 @@ entry:
%0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmpw 2,$2,$1\0A\09mfcr $0", "=r,r,r,r,r,~{cr2}"(i32 1, i32 2, i32 3, i32 0) nounwind
store i32 %0, i32* %ret, align 4
call void @foo()
- %1 = load i32* %ret, align 4
+ %1 = load i32, i32* %ret, align 4
ret i32 %1
}
@@ -38,7 +38,7 @@ entry:
%0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmpw 2,$2,$1\0A\09cmpw 3,$2,$2\0A\09cmpw 4,$2,$3\0A\09mfcr $0", "=r,r,r,r,r,~{cr2},~{cr3},~{cr4}"(i32 1, i32 2, i32 3, i32 0) nounwind
store i32 %0, i32* %ret, align 4
call void @foo()
- %1 = load i32* %ret, align 4
+ %1 = load i32, i32* %ret, align 4
ret i32 %1
}
diff --git a/test/CodeGen/PowerPC/crypto_bifs.ll b/test/CodeGen/PowerPC/crypto_bifs.ll
new file mode 100644
index 000000000000..f58935b85b66
--- /dev/null
+++ b/test/CodeGen/PowerPC/crypto_bifs.ll
@@ -0,0 +1,275 @@
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=+crypto < %s | FileCheck %s
+; FIXME: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
+; FIXME: The original intent was to add a check-next for the blr after every check.
+; However, this currently fails since we don't eliminate stores of the unused
+; locals. These stores are sometimes scheduled after the crypto instruction
+
+; Function Attrs: nounwind
+define <16 x i8> @test_vpmsumb() #0 {
+entry:
+ %a = alloca <16 x i8>, align 16
+ %b = alloca <16 x i8>, align 16
+ store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8>* %a, align 16
+ store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %b, align 16
+ %0 = load <16 x i8>, <16 x i8>* %a, align 16
+ %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %2 = call <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8> %0, <16 x i8> %1)
+ ret <16 x i8> %2
+; CHECK: vpmsumb 2,
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8>, <16 x i8>) #1
+
+; Function Attrs: nounwind
+define <8 x i16> @test_vpmsumh() #0 {
+entry:
+ %a = alloca <8 x i16>, align 16
+ %b = alloca <8 x i16>, align 16
+ store <8 x i16> <i16 258, i16 772, i16 1286, i16 1800, i16 2314, i16 2828, i16 3342, i16 3856>, <8 x i16>* %a, align 16
+ store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %b, align 16
+ %0 = load <8 x i16>, <8 x i16>* %a, align 16
+ %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ %2 = call <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16> %0, <8 x i16> %1)
+ ret <8 x i16> %2
+; CHECK: vpmsumh 2,
+}
+
+; Function Attrs: nounwind readnone
+declare <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16>, <8 x i16>) #1
+
+; Function Attrs: nounwind
+define <4 x i32> @test_vpmsumw() #0 {
+entry:
+ %a = alloca <4 x i32>, align 16
+ %b = alloca <4 x i32>, align 16
+ store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
+ store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %b, align 16
+ %0 = load <4 x i32>, <4 x i32>* %a, align 16
+ %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ %2 = call <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32> %0, <4 x i32> %1)
+ ret <4 x i32> %2
+; CHECK: vpmsumw 2,
+}
+
+; Function Attrs: nounwind readnone
+declare <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32>, <4 x i32>) #1
+
+; Function Attrs: nounwind
+define <2 x i64> @test_vpmsumd() #0 {
+entry:
+ %a = alloca <2 x i64>, align 16
+ %b = alloca <2 x i64>, align 16
+ store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, <2 x i64>* %a, align 16
+ %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64> %0, <2 x i64> %1)
+ ret <2 x i64> %2
+; CHECK: vpmsumd 2,
+}
+
+; Function Attrs: nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64>, <2 x i64>) #1
+
+; Function Attrs: nounwind
+define <2 x i64> @test_vsbox() #0 {
+entry:
+ %a = alloca <2 x i64>, align 16
+ store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
+ %0 = load <2 x i64>, <2 x i64>* %a, align 16
+ %1 = call <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64> %0)
+ ret <2 x i64> %1
+; CHECK: vsbox 2,
+}
+
+; Function Attrs: nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64>) #1
+
+; Function Attrs: nounwind
+define <16 x i8> @test_vpermxorb() #0 {
+entry:
+ %a = alloca <16 x i8>, align 16
+ %b = alloca <16 x i8>, align 16
+ %c = alloca <16 x i8>, align 16
+ store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8>* %a, align 16
+ store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %b, align 16
+ store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %c, align 16
+ %0 = load <16 x i8>, <16 x i8>* %a, align 16
+ %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %2 = load <16 x i8>, <16 x i8>* %c, align 16
+ %3 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+ ret <16 x i8> %3
+; CHECK: vpermxor 2,
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8>, <16 x i8>, <16 x i8>) #1
+
+; Function Attrs: nounwind
+define <8 x i16> @test_vpermxorh() #0 {
+entry:
+ %a = alloca <8 x i16>, align 16
+ %b = alloca <8 x i16>, align 16
+ %c = alloca <8 x i16>, align 16
+ store <8 x i16> <i16 258, i16 772, i16 1286, i16 1800, i16 2314, i16 2828, i16 3342, i16 3856>, <8 x i16>* %a, align 16
+ store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %b, align 16
+ store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %c, align 16
+ %0 = load <8 x i16>, <8 x i16>* %a, align 16
+ %1 = bitcast <8 x i16> %0 to <16 x i8>
+ %2 = load <8 x i16>, <8 x i16>* %b, align 16
+ %3 = bitcast <8 x i16> %2 to <16 x i8>
+ %4 = load <8 x i16>, <8 x i16>* %c, align 16
+ %5 = bitcast <8 x i16> %4 to <16 x i8>
+ %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <8 x i16>
+ ret <8 x i16> %7
+; CHECK: vpermxor 2,
+}
+
+; Function Attrs: nounwind
+define <4 x i32> @test_vpermxorw() #0 {
+entry:
+ %a = alloca <4 x i32>, align 16
+ %b = alloca <4 x i32>, align 16
+ %c = alloca <4 x i32>, align 16
+ store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
+ store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %b, align 16
+ store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %c, align 16
+ %0 = load <4 x i32>, <4 x i32>* %a, align 16
+ %1 = bitcast <4 x i32> %0 to <16 x i8>
+ %2 = load <4 x i32>, <4 x i32>* %b, align 16
+ %3 = bitcast <4 x i32> %2 to <16 x i8>
+ %4 = load <4 x i32>, <4 x i32>* %c, align 16
+ %5 = bitcast <4 x i32> %4 to <16 x i8>
+ %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <4 x i32>
+ ret <4 x i32> %7
+; CHECK: vpermxor 2,
+}
+
+; Function Attrs: nounwind
+define <2 x i64> @test_vpermxord() #0 {
+entry:
+ %a = alloca <2 x i64>, align 16
+ %b = alloca <2 x i64>, align 16
+ %c = alloca <2 x i64>, align 16
+ store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %c, align 16
+ %0 = load <2 x i64>, <2 x i64>* %a, align 16
+ %1 = bitcast <2 x i64> %0 to <16 x i8>
+ %2 = load <2 x i64>, <2 x i64>* %b, align 16
+ %3 = bitcast <2 x i64> %2 to <16 x i8>
+ %4 = load <2 x i64>, <2 x i64>* %c, align 16
+ %5 = bitcast <2 x i64> %4 to <16 x i8>
+ %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <2 x i64>
+ ret <2 x i64> %7
+; CHECK: vpermxor 2,
+}
+
+; Function Attrs: nounwind
+define <2 x i64> @test_vcipher() #0 {
+entry:
+ %a = alloca <2 x i64>, align 16
+ %b = alloca <2 x i64>, align 16
+ store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, <2 x i64>* %a, align 16
+ %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vcipher(<2 x i64> %0, <2 x i64> %1)
+ ret <2 x i64> %2
+; CHECK: vcipher 2,
+}
+
+; Function Attrs: nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.crypto.vcipher(<2 x i64>, <2 x i64>) #1
+
+; Function Attrs: nounwind
+define <2 x i64> @test_vcipherlast() #0 {
+entry:
+ %a = alloca <2 x i64>, align 16
+ %b = alloca <2 x i64>, align 16
+ store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, <2 x i64>* %a, align 16
+ %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vcipherlast(<2 x i64> %0, <2 x i64> %1)
+ ret <2 x i64> %2
+; CHECK: vcipherlast 2,
+}
+
+; Function Attrs: nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.crypto.vcipherlast(<2 x i64>, <2 x i64>) #1
+
+; Function Attrs: nounwind
+define <2 x i64> @test_vncipher() #0 {
+entry:
+ %a = alloca <2 x i64>, align 16
+ %b = alloca <2 x i64>, align 16
+ store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, <2 x i64>* %a, align 16
+ %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vncipher(<2 x i64> %0, <2 x i64> %1)
+ ret <2 x i64> %2
+; CHECK: vncipher 2,
+}
+
+; Function Attrs: nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.crypto.vncipher(<2 x i64>, <2 x i64>) #1
+
+; Function Attrs: nounwind
+define <2 x i64> @test_vncipherlast() #0 {
+entry:
+ %a = alloca <2 x i64>, align 16
+ %b = alloca <2 x i64>, align 16
+ store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, <2 x i64>* %a, align 16
+ %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vncipherlast(<2 x i64> %0, <2 x i64> %1)
+ ret <2 x i64> %2
+; CHECK: vncipherlast 2,
+}
+
+; Function Attrs: nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.crypto.vncipherlast(<2 x i64>, <2 x i64>) #1
+
+; Function Attrs: nounwind
+define <4 x i32> @test_vshasigmaw() #0 {
+entry:
+ %a = alloca <4 x i32>, align 16
+ store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
+ %0 = load <4 x i32>, <4 x i32>* %a, align 16
+ %1 = call <4 x i32> @llvm.ppc.altivec.crypto.vshasigmaw(<4 x i32> %0, i32 1, i32 15)
+ ret <4 x i32> %1
+; CHECK: vshasigmaw 2,
+}
+
+; Function Attrs: nounwind readnone
+declare <4 x i32> @llvm.ppc.altivec.crypto.vshasigmaw(<4 x i32>, i32, i32) #1
+
+; Function Attrs: nounwind
+define <2 x i64> @test_vshasigmad() #0 {
+entry:
+ %a = alloca <2 x i64>, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %a, align 16
+ %0 = load <2 x i64>, <2 x i64>* %a, align 16
+ %1 = call <2 x i64> @llvm.ppc.altivec.crypto.vshasigmad(<2 x i64> %0, i32 1, i32 15)
+ ret <2 x i64> %1
+; CHECK: vshasigmad 2,
+}
+
+; Function Attrs: nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.crypto.vshasigmad(<2 x i64>, i32, i32) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"clang version 3.7.0 (trunk 230949) (llvm/trunk 230946)"}
diff --git a/test/CodeGen/PowerPC/ctrloop-cpsgn.ll b/test/CodeGen/PowerPC/ctrloop-cpsgn.ll
index 2f0440912cc9..fcfcf154ef58 100644
--- a/test/CodeGen/PowerPC/ctrloop-cpsgn.ll
+++ b/test/CodeGen/PowerPC/ctrloop-cpsgn.ll
@@ -10,8 +10,8 @@ entry:
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi ppc_fp128 [ %d, %entry ], [ %conv, %for.body ]
- %arrayidx = getelementptr inbounds ppc_fp128* %n, i32 %i.06
- %0 = load ppc_fp128* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds ppc_fp128, ppc_fp128* %n, i32 %i.06
+ %0 = load ppc_fp128, ppc_fp128* %arrayidx, align 8
%conv = tail call ppc_fp128 @copysignl(ppc_fp128 %x.05, ppc_fp128 %d) nounwind readonly
%inc = add nsw i32 %i.06, 1
%exitcond = icmp eq i32 %inc, 2048
diff --git a/test/CodeGen/PowerPC/ctrloop-fp64.ll b/test/CodeGen/PowerPC/ctrloop-fp64.ll
index 77555ac58de2..6128d7cbdf6f 100644
--- a/test/CodeGen/PowerPC/ctrloop-fp64.ll
+++ b/test/CodeGen/PowerPC/ctrloop-fp64.ll
@@ -10,8 +10,8 @@ entry:
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
- %arrayidx = getelementptr inbounds double* %n, i32 %i.06
- %0 = load double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, double* %n, i32 %i.06
+ %0 = load double, double* %arrayidx, align 8
%conv = sitofp i64 %x.05 to double
%add = fadd double %conv, %0
%conv1 = fptosi double %add to i64
@@ -31,7 +31,7 @@ for.end: ; preds = %for.body
define i32 @main(i32 %argc, i8** nocapture %argv) {
entry:
- %0 = load double* @init_value, align 8
+ %0 = load double, double* @init_value, align 8
%conv = fptosi double %0 to i64
%broadcast.splatinsert.i = insertelement <2 x i64> undef, i64 %conv, i32 0
%broadcast.splat.i = shufflevector <2 x i64> %broadcast.splatinsert.i, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -39,11 +39,11 @@ entry:
vector.body.i: ; preds = %vector.body.i, %entry
%index.i = phi i32 [ 0, %entry ], [ %index.next.i, %vector.body.i ]
- %next.gep.i = getelementptr [8000 x i64]* @data64, i32 0, i32 %index.i
+ %next.gep.i = getelementptr [8000 x i64], [8000 x i64]* @data64, i32 0, i32 %index.i
%1 = bitcast i64* %next.gep.i to <2 x i64>*
store <2 x i64> %broadcast.splat.i, <2 x i64>* %1, align 8
%next.gep.sum24.i = or i32 %index.i, 2
- %2 = getelementptr [8000 x i64]* @data64, i32 0, i32 %next.gep.sum24.i
+ %2 = getelementptr [8000 x i64], [8000 x i64]* @data64, i32 0, i32 %next.gep.sum24.i
%3 = bitcast i64* %2 to <2 x i64>*
store <2 x i64> %broadcast.splat.i, <2 x i64>* %3, align 8
%index.next.i = add i32 %index.i, 4
diff --git a/test/CodeGen/PowerPC/ctrloop-i64.ll b/test/CodeGen/PowerPC/ctrloop-i64.ll
index 9e01392a458f..5c66a6865ae2 100644
--- a/test/CodeGen/PowerPC/ctrloop-i64.ll
+++ b/test/CodeGen/PowerPC/ctrloop-i64.ll
@@ -10,8 +10,8 @@ entry:
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
- %arrayidx = getelementptr inbounds i64* %n, i32 %i.06
- %0 = load i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
+ %0 = load i64, i64* %arrayidx, align 8
%conv = udiv i64 %x.05, %d
%conv1 = add i64 %conv, %0
%inc = add nsw i32 %i.06, 1
@@ -32,8 +32,8 @@ entry:
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
- %arrayidx = getelementptr inbounds i64* %n, i32 %i.06
- %0 = load i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
+ %0 = load i64, i64* %arrayidx, align 8
%conv = sdiv i64 %x.05, %d
%conv1 = add i64 %conv, %0
%inc = add nsw i32 %i.06, 1
@@ -54,8 +54,8 @@ entry:
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
- %arrayidx = getelementptr inbounds i64* %n, i32 %i.06
- %0 = load i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
+ %0 = load i64, i64* %arrayidx, align 8
%conv = urem i64 %x.05, %d
%conv1 = add i64 %conv, %0
%inc = add nsw i32 %i.06, 1
@@ -76,8 +76,8 @@ entry:
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
- %arrayidx = getelementptr inbounds i64* %n, i32 %i.06
- %0 = load i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
+ %0 = load i64, i64* %arrayidx, align 8
%conv = srem i64 %x.05, %d
%conv1 = add i64 %conv, %0
%inc = add nsw i32 %i.06, 1
diff --git a/test/CodeGen/PowerPC/ctrloop-le.ll b/test/CodeGen/PowerPC/ctrloop-le.ll
index 60b0536f9924..bef043703247 100644
--- a/test/CodeGen/PowerPC/ctrloop-le.ll
+++ b/test/CodeGen/PowerPC/ctrloop-le.ll
@@ -18,8 +18,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 28395, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -47,8 +47,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 9073, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -76,8 +76,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 21956, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -105,8 +105,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 16782, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -134,8 +134,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 19097, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -163,8 +163,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -192,8 +192,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -221,8 +221,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -250,8 +250,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -279,8 +279,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -309,8 +309,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -339,8 +339,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -369,8 +369,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -399,8 +399,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -429,8 +429,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
diff --git a/test/CodeGen/PowerPC/ctrloop-lt.ll b/test/CodeGen/PowerPC/ctrloop-lt.ll
index a9dc42c1c971..fa910aab4e0d 100644
--- a/test/CodeGen/PowerPC/ctrloop-lt.ll
+++ b/test/CodeGen/PowerPC/ctrloop-lt.ll
@@ -18,8 +18,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 8531, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -48,8 +48,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 9152, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -78,8 +78,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 18851, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -107,8 +107,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 25466, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -136,8 +136,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 9295, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -165,8 +165,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -194,8 +194,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -223,8 +223,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -252,8 +252,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -281,8 +281,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -310,8 +310,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -339,8 +339,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -368,8 +368,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -397,8 +397,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -426,8 +426,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
diff --git a/test/CodeGen/PowerPC/ctrloop-ne.ll b/test/CodeGen/PowerPC/ctrloop-ne.ll
index 636030a15dd2..13a9909c3d61 100644
--- a/test/CodeGen/PowerPC/ctrloop-ne.ll
+++ b/test/CodeGen/PowerPC/ctrloop-ne.ll
@@ -15,8 +15,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 32623, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -45,8 +45,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 29554, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -75,8 +75,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 15692, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -105,8 +105,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 10449, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -135,8 +135,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 32087, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -164,8 +164,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -194,8 +194,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -224,8 +224,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -254,8 +254,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -284,8 +284,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -313,8 +313,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -343,8 +343,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -373,8 +373,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -403,8 +403,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
@@ -433,8 +433,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
- %0 = load i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
+ %0 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
diff --git a/test/CodeGen/PowerPC/ctrloop-s000.ll b/test/CodeGen/PowerPC/ctrloop-s000.ll
index 4d8ef50501f2..344bbf3b341d 100644
--- a/test/CodeGen/PowerPC/ctrloop-s000.ll
+++ b/test/CodeGen/PowerPC/ctrloop-s000.ll
@@ -35,100 +35,100 @@ for.cond1.preheader: ; preds = %for.end, %entry
for.body3: ; preds = %for.body3, %for.cond1.preheader
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next.15, %for.body3 ]
- %arrayidx = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv
- %0 = load double* %arrayidx, align 32
+ %arrayidx = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv
+ %0 = load double, double* %arrayidx, align 32
%add = fadd double %0, 1.000000e+00
- %arrayidx5 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv
+ %arrayidx5 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv
store double %add, double* %arrayidx5, align 32
%indvars.iv.next11 = or i64 %indvars.iv, 1
- %arrayidx.1 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next11
- %1 = load double* %arrayidx.1, align 8
+ %arrayidx.1 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next11
+ %1 = load double, double* %arrayidx.1, align 8
%add.1 = fadd double %1, 1.000000e+00
- %arrayidx5.1 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next11
+ %arrayidx5.1 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next11
store double %add.1, double* %arrayidx5.1, align 8
%indvars.iv.next.112 = or i64 %indvars.iv, 2
- %arrayidx.2 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.112
- %2 = load double* %arrayidx.2, align 16
+ %arrayidx.2 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.112
+ %2 = load double, double* %arrayidx.2, align 16
%add.2 = fadd double %2, 1.000000e+00
- %arrayidx5.2 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.112
+ %arrayidx5.2 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.112
store double %add.2, double* %arrayidx5.2, align 16
%indvars.iv.next.213 = or i64 %indvars.iv, 3
- %arrayidx.3 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.213
- %3 = load double* %arrayidx.3, align 8
+ %arrayidx.3 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.213
+ %3 = load double, double* %arrayidx.3, align 8
%add.3 = fadd double %3, 1.000000e+00
- %arrayidx5.3 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.213
+ %arrayidx5.3 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.213
store double %add.3, double* %arrayidx5.3, align 8
%indvars.iv.next.314 = or i64 %indvars.iv, 4
- %arrayidx.4 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.314
- %4 = load double* %arrayidx.4, align 32
+ %arrayidx.4 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.314
+ %4 = load double, double* %arrayidx.4, align 32
%add.4 = fadd double %4, 1.000000e+00
- %arrayidx5.4 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.314
+ %arrayidx5.4 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.314
store double %add.4, double* %arrayidx5.4, align 32
%indvars.iv.next.415 = or i64 %indvars.iv, 5
- %arrayidx.5 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.415
- %5 = load double* %arrayidx.5, align 8
+ %arrayidx.5 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.415
+ %5 = load double, double* %arrayidx.5, align 8
%add.5 = fadd double %5, 1.000000e+00
- %arrayidx5.5 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.415
+ %arrayidx5.5 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.415
store double %add.5, double* %arrayidx5.5, align 8
%indvars.iv.next.516 = or i64 %indvars.iv, 6
- %arrayidx.6 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.516
- %6 = load double* %arrayidx.6, align 16
+ %arrayidx.6 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.516
+ %6 = load double, double* %arrayidx.6, align 16
%add.6 = fadd double %6, 1.000000e+00
- %arrayidx5.6 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.516
+ %arrayidx5.6 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.516
store double %add.6, double* %arrayidx5.6, align 16
%indvars.iv.next.617 = or i64 %indvars.iv, 7
- %arrayidx.7 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.617
- %7 = load double* %arrayidx.7, align 8
+ %arrayidx.7 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.617
+ %7 = load double, double* %arrayidx.7, align 8
%add.7 = fadd double %7, 1.000000e+00
- %arrayidx5.7 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.617
+ %arrayidx5.7 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.617
store double %add.7, double* %arrayidx5.7, align 8
%indvars.iv.next.718 = or i64 %indvars.iv, 8
- %arrayidx.8 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.718
- %8 = load double* %arrayidx.8, align 32
+ %arrayidx.8 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.718
+ %8 = load double, double* %arrayidx.8, align 32
%add.8 = fadd double %8, 1.000000e+00
- %arrayidx5.8 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.718
+ %arrayidx5.8 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.718
store double %add.8, double* %arrayidx5.8, align 32
%indvars.iv.next.819 = or i64 %indvars.iv, 9
- %arrayidx.9 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.819
- %9 = load double* %arrayidx.9, align 8
+ %arrayidx.9 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.819
+ %9 = load double, double* %arrayidx.9, align 8
%add.9 = fadd double %9, 1.000000e+00
- %arrayidx5.9 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.819
+ %arrayidx5.9 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.819
store double %add.9, double* %arrayidx5.9, align 8
%indvars.iv.next.920 = or i64 %indvars.iv, 10
- %arrayidx.10 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.920
- %10 = load double* %arrayidx.10, align 16
+ %arrayidx.10 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.920
+ %10 = load double, double* %arrayidx.10, align 16
%add.10 = fadd double %10, 1.000000e+00
- %arrayidx5.10 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.920
+ %arrayidx5.10 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.920
store double %add.10, double* %arrayidx5.10, align 16
%indvars.iv.next.1021 = or i64 %indvars.iv, 11
- %arrayidx.11 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1021
- %11 = load double* %arrayidx.11, align 8
+ %arrayidx.11 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1021
+ %11 = load double, double* %arrayidx.11, align 8
%add.11 = fadd double %11, 1.000000e+00
- %arrayidx5.11 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1021
+ %arrayidx5.11 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1021
store double %add.11, double* %arrayidx5.11, align 8
%indvars.iv.next.1122 = or i64 %indvars.iv, 12
- %arrayidx.12 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1122
- %12 = load double* %arrayidx.12, align 32
+ %arrayidx.12 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1122
+ %12 = load double, double* %arrayidx.12, align 32
%add.12 = fadd double %12, 1.000000e+00
- %arrayidx5.12 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1122
+ %arrayidx5.12 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1122
store double %add.12, double* %arrayidx5.12, align 32
%indvars.iv.next.1223 = or i64 %indvars.iv, 13
- %arrayidx.13 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1223
- %13 = load double* %arrayidx.13, align 8
+ %arrayidx.13 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1223
+ %13 = load double, double* %arrayidx.13, align 8
%add.13 = fadd double %13, 1.000000e+00
- %arrayidx5.13 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1223
+ %arrayidx5.13 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1223
store double %add.13, double* %arrayidx5.13, align 8
%indvars.iv.next.1324 = or i64 %indvars.iv, 14
- %arrayidx.14 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1324
- %14 = load double* %arrayidx.14, align 16
+ %arrayidx.14 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1324
+ %14 = load double, double* %arrayidx.14, align 16
%add.14 = fadd double %14, 1.000000e+00
- %arrayidx5.14 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1324
+ %arrayidx5.14 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1324
store double %add.14, double* %arrayidx5.14, align 16
%indvars.iv.next.1425 = or i64 %indvars.iv, 15
- %arrayidx.15 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1425
- %15 = load double* %arrayidx.15, align 8
+ %arrayidx.15 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1425
+ %15 = load double, double* %arrayidx.15, align 8
%add.15 = fadd double %15, 1.000000e+00
- %arrayidx5.15 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1425
+ %arrayidx5.15 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1425
store double %add.15, double* %arrayidx5.15, align 8
%indvars.iv.next.15 = add i64 %indvars.iv, 16
%lftr.wideiv.15 = trunc i64 %indvars.iv.next.15 to i32
@@ -136,7 +136,7 @@ for.body3: ; preds = %for.body3, %for.con
br i1 %exitcond.15, label %for.end, label %for.body3
for.end: ; preds = %for.body3
- %call = tail call i32 @dummy(double* getelementptr inbounds ([16000 x double]* @X, i64 0, i64 0), double* getelementptr inbounds ([16000 x double]* @Y, i64 0, i64 0), double* getelementptr inbounds ([16000 x double]* @Z, i64 0, i64 0), double* getelementptr inbounds ([16000 x double]* @U, i64 0, i64 0), double* getelementptr inbounds ([16000 x double]* @V, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]]* @aa, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]]* @bb, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]]* @cc, i64 0, i64 0), double 0.000000e+00) nounwind
+ %call = tail call i32 @dummy(double* getelementptr inbounds ([16000 x double], [16000 x double]* @X, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @Y, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @Z, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @U, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @V, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]], [256 x [256 x double]]* @aa, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]], [256 x [256 x double]]* @bb, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]], [256 x [256 x double]]* @cc, i64 0, i64 0), double 0.000000e+00) nounwind
%inc7 = add nsw i32 %nl.010, 1
%exitcond = icmp eq i32 %inc7, 400000
br i1 %exitcond, label %for.end8, label %for.cond1.preheader
diff --git a/test/CodeGen/PowerPC/ctrloop-sh.ll b/test/CodeGen/PowerPC/ctrloop-sh.ll
index d8e6fc79a665..540f0d6b57e6 100644
--- a/test/CodeGen/PowerPC/ctrloop-sh.ll
+++ b/test/CodeGen/PowerPC/ctrloop-sh.ll
@@ -9,8 +9,8 @@ entry:
for.body: ; preds = %for.body, %entry
%i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %0 = load i128* %b, align 16
- %1 = load i128* %c, align 16
+ %0 = load i128, i128* %b, align 16
+ %1 = load i128, i128* %c, align 16
%shl = shl i128 %0, %1
store i128 %shl, i128* %a, align 16
%inc = add nsw i32 %i.02, 1
@@ -31,8 +31,8 @@ entry:
for.body: ; preds = %for.body, %entry
%i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %0 = load i128* %b, align 16
- %1 = load i128* %c, align 16
+ %0 = load i128, i128* %b, align 16
+ %1 = load i128, i128* %c, align 16
%shl = ashr i128 %0, %1
store i128 %shl, i128* %a, align 16
%inc = add nsw i32 %i.02, 1
@@ -53,8 +53,8 @@ entry:
for.body: ; preds = %for.body, %entry
%i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %0 = load i128* %b, align 16
- %1 = load i128* %c, align 16
+ %0 = load i128, i128* %b, align 16
+ %1 = load i128, i128* %c, align 16
%shl = lshr i128 %0, %1
store i128 %shl, i128* %a, align 16
%inc = add nsw i32 %i.02, 1
diff --git a/test/CodeGen/PowerPC/ctrloop-sums.ll b/test/CodeGen/PowerPC/ctrloop-sums.ll
index d9965f280e72..056ee3448c75 100644
--- a/test/CodeGen/PowerPC/ctrloop-sums.ll
+++ b/test/CodeGen/PowerPC/ctrloop-sums.ll
@@ -23,8 +23,8 @@ for.inc6.us: ; preds = %for.body3.us
for.body3.us: ; preds = %for.body3.us, %for.body3.lr.ph.us
%indvars.iv = phi i64 [ 0, %for.body3.lr.ph.us ], [ %indvars.iv.next, %for.body3.us ]
%Result.111.us = phi i32 [ %Result.014.us, %for.body3.lr.ph.us ], [ %add.us, %for.body3.us ]
- %arrayidx5.us = getelementptr inbounds [100 x i32]* %Array, i64 %indvars.iv16, i64 %indvars.iv
- %0 = load i32* %arrayidx5.us, align 4
+ %arrayidx5.us = getelementptr inbounds [100 x i32], [100 x i32]* %Array, i64 %indvars.iv16, i64 %indvars.iv
+ %0 = load i32, i32* %arrayidx5.us, align 4
%add.us = add nsw i32 %0, %Result.111.us
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -59,7 +59,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv33 = phi i64 [ 0, %entry ], [ %indvars.iv.next34, %for.body ]
%0 = trunc i64 %indvars.iv33 to i32
%sub = sub i32 0, %0
- %arrayidx2 = getelementptr inbounds [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv33, i64 %indvars.iv33
+ %arrayidx2 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv33, i64 %indvars.iv33
store i32 %sub, i32* %arrayidx2, align 4
%indvars.iv.next34 = add i64 %indvars.iv33, 1
%lftr.wideiv35 = trunc i64 %indvars.iv.next34 to i32
@@ -79,7 +79,7 @@ for.body8: ; preds = %for.inc14, %for.con
if.then: ; preds = %for.body8
%3 = add i64 %indvars.iv, %indvars.iv29
- %arrayidx13 = getelementptr inbounds [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv29, i64 %indvars.iv
+ %arrayidx13 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv29, i64 %indvars.iv
%4 = trunc i64 %3 to i32
store i32 %4, i32* %arrayidx13, align 4
br label %for.inc14
@@ -105,8 +105,8 @@ for.inc6.us.i: ; preds = %for.body3.us.i
for.body3.us.i: ; preds = %for.body3.lr.ph.us.i, %for.body3.us.i
%indvars.iv.i = phi i64 [ 0, %for.body3.lr.ph.us.i ], [ %indvars.iv.next.i, %for.body3.us.i ]
%Result.111.us.i = phi i32 [ %Result.014.us.i, %for.body3.lr.ph.us.i ], [ %add.us.i, %for.body3.us.i ]
- %arrayidx5.us.i = getelementptr inbounds [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv16.i, i64 %indvars.iv.i
- %5 = load i32* %arrayidx5.us.i, align 4
+ %arrayidx5.us.i = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv16.i, i64 %indvars.iv.i
+ %5 = load i32, i32* %arrayidx5.us.i, align 4
%add.us.i = add nsw i32 %5, %Result.111.us.i
%indvars.iv.next.i = add i64 %indvars.iv.i, 1
%lftr.wideiv = trunc i64 %indvars.iv.next.i to i32
@@ -119,7 +119,7 @@ for.body3.lr.ph.us.i: ; preds = %for.inc17, %for.inc
br label %for.body3.us.i
SumArray.exit: ; preds = %for.inc6.us.i
- %call20 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([23 x i8]* @.str, i64 0, i64 0), i32 100, i32 100, i32 %add.us.i) nounwind
+ %call20 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([23 x i8], [23 x i8]* @.str, i64 0, i64 0), i32 100, i32 100, i32 %add.us.i) nounwind
ret i32 0
; CHECK: @main
diff --git a/test/CodeGen/PowerPC/ctrloops.ll b/test/CodeGen/PowerPC/ctrloops.ll
index ccab7cb7a0ba..fff9e20d2626 100644
--- a/test/CodeGen/PowerPC/ctrloops.ll
+++ b/test/CodeGen/PowerPC/ctrloops.ll
@@ -10,7 +10,7 @@ entry:
for.body: ; preds = %for.body, %entry
%i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %0 = load volatile i32* @a, align 4
+ %0 = load volatile i32, i32* @a, align 4
%add = add nsw i32 %0, %c
store volatile i32 %add, i32* @a, align 4
%inc = add nsw i32 %i.01, 1
@@ -34,7 +34,7 @@ entry:
for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
- %0 = load volatile i32* @a, align 4
+ %0 = load volatile i32, i32* @a, align 4
%add = add nsw i32 %0, %c
store volatile i32 %add, i32* @a, align 4
%inc = add nsw i32 %i.02, 1
@@ -58,7 +58,7 @@ entry:
for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%mul = mul nsw i32 %i.02, %c
- %0 = load volatile i32* @a, align 4
+ %0 = load volatile i32, i32* @a, align 4
%add = add nsw i32 %0, %mul
store volatile i32 %add, i32* @a, align 4
%inc = add nsw i32 %i.02, 1
diff --git a/test/CodeGen/PowerPC/cttz-ctlz-spec.ll b/test/CodeGen/PowerPC/cttz-ctlz-spec.ll
deleted file mode 100644
index 13b017a746ec..000000000000
--- a/test/CodeGen/PowerPC/cttz-ctlz-spec.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: opt -S -codegenprepare < %s | FileCheck %s
-target datalayout = "E-m:e-i64:64-n32:64"
-target triple = "powerpc64-unknown-linux-gnu"
-
-define i64 @test1(i64 %A) {
-; CHECK-LABEL: @test1(
-; CHECK: [[CTLZ:%[A-Za-z0-9]+]] = call i64 @llvm.ctlz.i64(i64 %A, i1 false)
-; CHECK-NEXT: ret i64 [[CTLZ]]
-entry:
- %tobool = icmp eq i64 %A, 0
- br i1 %tobool, label %cond.end, label %cond.true
-
-cond.true: ; preds = %entry
- %0 = tail call i64 @llvm.ctlz.i64(i64 %A, i1 true)
- br label %cond.end
-
-cond.end: ; preds = %entry, %cond.true
- %cond = phi i64 [ %0, %cond.true ], [ 64, %entry ]
- ret i64 %cond
-}
-
-define i64 @test1b(i64 %A) {
-; CHECK-LABEL: @test1b(
-; CHECK: [[CTTZ:%[A-Za-z0-9]+]] = call i64 @llvm.cttz.i64(i64 %A, i1 false)
-; CHECK-NEXT: ret i64 [[CTTZ]]
-entry:
- %tobool = icmp eq i64 %A, 0
- br i1 %tobool, label %cond.end, label %cond.true
-
-cond.true: ; preds = %entry
- %0 = tail call i64 @llvm.cttz.i64(i64 %A, i1 true)
- br label %cond.end
-
-cond.end: ; preds = %entry, %cond.true
- %cond = phi i64 [ %0, %cond.true ], [ 64, %entry ]
- ret i64 %cond
-}
-
-declare i64 @llvm.ctlz.i64(i64, i1)
-declare i64 @llvm.cttz.i64(i64, i1)
-
diff --git a/test/CodeGen/PowerPC/cttz.ll b/test/CodeGen/PowerPC/cttz.ll
index 3757fa3e2f29..60de982d91a1 100644
--- a/test/CodeGen/PowerPC/cttz.ll
+++ b/test/CodeGen/PowerPC/cttz.ll
@@ -6,7 +6,7 @@ declare i32 @llvm.cttz.i32(i32, i1)
define i32 @bar(i32 %x) {
entry:
; CHECK: @bar
-; CHECK: cntlzw
+; CHECK: cntlz
%tmp.1 = call i32 @llvm.cttz.i32( i32 %x, i1 true ) ; <i32> [#uses=1]
ret i32 %tmp.1
}
diff --git a/test/CodeGen/PowerPC/dbg.ll b/test/CodeGen/PowerPC/dbg.ll
index bd153674eab1..87914025b733 100644
--- a/test/CodeGen/PowerPC/dbg.ll
+++ b/test/CodeGen/PowerPC/dbg.ll
@@ -6,8 +6,8 @@ target triple = "powerpc64-unknown-linux-gnu"
define i32 @main(i32 %argc, i8** nocapture %argv) nounwind readnone {
entry:
- tail call void @llvm.dbg.value(metadata i32 %argc, i64 0, metadata !15, metadata !{!"0x102"}), !dbg !17
- tail call void @llvm.dbg.value(metadata i8** %argv, i64 0, metadata !16, metadata !{!"0x102"}), !dbg !18
+ tail call void @llvm.dbg.value(metadata i32 %argc, i64 0, metadata !15, metadata !DIExpression()), !dbg !17
+ tail call void @llvm.dbg.value(metadata i8** %argv, i64 0, metadata !16, metadata !DIExpression()), !dbg !18
%add = add nsw i32 %argc, 1, !dbg !19
ret i32 %add, !dbg !19
}
@@ -17,23 +17,23 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!22}
-!0 = !{!"0x11\0012\00clang version 3.1\001\00\000\00\000", !21, !1, !1, !3, !1, !""} ; [ DW_TAG_compile_unit ]
+!0 = !DICompileUnit(language: DW_LANG_C99, producer: "clang version 3.1", isOptimized: true, emissionKind: 0, file: !21, enums: !1, retainedTypes: !1, subprograms: !3, globals: !1, imports: !1)
!1 = !{}
!3 = !{!5}
-!5 = !{!"0x2e\00main\00main\00\001\000\001\000\006\00256\001\000", !21, null, !7, null, i32 (i32, i8**)* @main, null, null, !13} ; [ DW_TAG_subprogram ]
-!6 = !{!"0x29", !21} ; [ DW_TAG_file_type ]
-!7 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !8, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!5 = !DISubprogram(name: "main", line: 1, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, file: !21, scope: null, type: !7, function: i32 (i32, i8**)* @main, variables: !13)
+!6 = !DIFile(filename: "dbg.c", directory: "/src")
+!7 = !DISubroutineType(types: !8)
!8 = !{!9, !9, !10}
-!9 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ]
-!10 = !{!"0xf\00\000\0064\0064\000\000", null, null, !11} ; [ DW_TAG_pointer_type ]
-!11 = !{!"0xf\00\000\0064\0064\000\000", null, null, !12} ; [ DW_TAG_pointer_type ]
-!12 = !{!"0x24\00char\000\008\008\000\000\008", null, null} ; [ DW_TAG_base_type ]
+!9 = !DIBasicType(tag: DW_TAG_base_type, name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+!10 = !DIDerivedType(tag: DW_TAG_pointer_type, size: 64, align: 64, baseType: !11)
+!11 = !DIDerivedType(tag: DW_TAG_pointer_type, size: 64, align: 64, baseType: !12)
+!12 = !DIBasicType(tag: DW_TAG_base_type, name: "char", size: 8, align: 8, encoding: DW_ATE_unsigned_char)
!13 = !{!15, !16}
-!15 = !{!"0x101\00argc\0016777217\000", !5, !6, !9} ; [ DW_TAG_arg_variable ]
-!16 = !{!"0x101\00argv\0033554433\000", !5, !6, !10} ; [ DW_TAG_arg_variable ]
-!17 = !MDLocation(line: 1, column: 14, scope: !5)
-!18 = !MDLocation(line: 1, column: 26, scope: !5)
-!19 = !MDLocation(line: 2, column: 3, scope: !20)
-!20 = !{!"0xb\001\0034\000", !21, !5} ; [ DW_TAG_lexical_block ]
-!21 = !{!"dbg.c", !"/src"}
-!22 = !{i32 1, !"Debug Info Version", i32 2}
+!15 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "argc", line: 1, arg: 1, scope: !5, file: !6, type: !9)
+!16 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "argv", line: 1, arg: 2, scope: !5, file: !6, type: !10)
+!17 = !DILocation(line: 1, column: 14, scope: !5)
+!18 = !DILocation(line: 1, column: 26, scope: !5)
+!19 = !DILocation(line: 2, column: 3, scope: !20)
+!20 = distinct !DILexicalBlock(line: 1, column: 34, file: !21, scope: !5)
+!21 = !DIFile(filename: "dbg.c", directory: "/src")
+!22 = !{i32 1, !"Debug Info Version", i32 3}
diff --git a/test/CodeGen/PowerPC/dcbt-sched.ll b/test/CodeGen/PowerPC/dcbt-sched.ll
index dfa1b75bd7db..51d58b47ee96 100644
--- a/test/CodeGen/PowerPC/dcbt-sched.ll
+++ b/test/CodeGen/PowerPC/dcbt-sched.ll
@@ -4,9 +4,9 @@ target triple = "powerpc64-unknown-linux-gnu"
define i8 @test1(i8* noalias %a, i8* noalias %b, i8* noalias %c) nounwind {
entry:
- %q = load i8* %b
+ %q = load i8, i8* %b
call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 1)
- %r = load i8* %c
+ %r = load i8, i8* %c
%s = add i8 %q, %r
ret i8 %s
}
diff --git a/test/CodeGen/PowerPC/delete-node.ll b/test/CodeGen/PowerPC/delete-node.ll
index a26c21154824..999af54a8a94 100644
--- a/test/CodeGen/PowerPC/delete-node.ll
+++ b/test/CodeGen/PowerPC/delete-node.ll
@@ -9,11 +9,11 @@ entry:
br label %bb1
bb1: ; preds = %bb1, %entry
- %0 = load i16* null, align 2 ; <i16> [#uses=1]
+ %0 = load i16, i16* null, align 2 ; <i16> [#uses=1]
%1 = ashr i16 %0, 4 ; <i16> [#uses=1]
%2 = sext i16 %1 to i32 ; <i32> [#uses=1]
- %3 = getelementptr i8* null, i32 %2 ; <i8*> [#uses=1]
- %4 = load i8* %3, align 1 ; <i8> [#uses=1]
+ %3 = getelementptr i8, i8* null, i32 %2 ; <i8*> [#uses=1]
+ %4 = load i8, i8* %3, align 1 ; <i8> [#uses=1]
%5 = zext i8 %4 to i32 ; <i32> [#uses=1]
%6 = shl i32 %5, 24 ; <i32> [#uses=1]
%7 = or i32 0, %6 ; <i32> [#uses=1]
diff --git a/test/CodeGen/PowerPC/div-e-32.ll b/test/CodeGen/PowerPC/div-e-32.ll
new file mode 100644
index 000000000000..588756bb8dd8
--- /dev/null
+++ b/test/CodeGen/PowerPC/div-e-32.ll
@@ -0,0 +1,31 @@
+; RUN: llc -mtriple=powerpc-unknown-linux-gnu -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+
+; Function Attrs: nounwind
+define signext i32 @test1() #0 {
+entry:
+ %0 = call i32 @llvm.ppc.divwe(i32 32, i32 16)
+ ret i32 %0
+; CHECK: divwe 3, 4, 3
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.ppc.divwe(i32, i32) #1
+
+; Function Attrs: nounwind
+define signext i32 @test2() #0 {
+entry:
+ %0 = call i32 @llvm.ppc.divweu(i32 32, i32 16)
+ ret i32 %0
+; CHECK: divweu 3, 4, 3
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.ppc.divweu(i32, i32) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"clang version 3.7.0 (trunk 231831) (llvm/trunk 231828:231843M)"}
diff --git a/test/CodeGen/PowerPC/div-e-all.ll b/test/CodeGen/PowerPC/div-e-all.ll
new file mode 100644
index 000000000000..912deeb2b3e0
--- /dev/null
+++ b/test/CodeGen/PowerPC/div-e-all.ll
@@ -0,0 +1,54 @@
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+
+; Function Attrs: nounwind
+define signext i32 @test1() #0 {
+entry:
+ %0 = call i32 @llvm.ppc.divwe(i32 32, i32 16)
+ ret i32 %0
+; CHECK: divwe 3, 4, 3
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.ppc.divwe(i32, i32) #1
+
+; Function Attrs: nounwind
+define signext i32 @test2() #0 {
+entry:
+ %0 = call i32 @llvm.ppc.divweu(i32 32, i32 16)
+ ret i32 %0
+; CHECK: divweu 3, 4, 3
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.ppc.divweu(i32, i32) #1
+
+; Function Attrs: nounwind
+define i64 @test3() #0 {
+entry:
+ %0 = call i64 @llvm.ppc.divde(i64 32, i64 16)
+ ret i64 %0
+; CHECK: divde 3, 4, 3
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.ppc.divde(i64, i64) #1
+
+; Function Attrs: nounwind
+define i64 @test4() #0 {
+entry:
+ %0 = call i64 @llvm.ppc.divdeu(i64 32, i64 16)
+ ret i64 %0
+; CHECK: divdeu 3, 4, 3
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.ppc.divdeu(i64, i64) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"clang version 3.7.0 (trunk 231831) (llvm/trunk 231828:231843M)"}
diff --git a/test/CodeGen/PowerPC/dyn-alloca-aligned.ll b/test/CodeGen/PowerPC/dyn-alloca-aligned.ll
index a5d45b8e94a0..98b0a175f369 100644
--- a/test/CodeGen/PowerPC/dyn-alloca-aligned.ll
+++ b/test/CodeGen/PowerPC/dyn-alloca-aligned.ll
@@ -11,12 +11,12 @@ entry:
%0 = zext i32 %n to i64
%vla = alloca i32, i64 %0, align 128
%vla1 = alloca i32, i64 %0, align 128
- %a2 = getelementptr inbounds %struct.s* %a, i64 0, i32 0
- %1 = load i32* %a2, align 4
+ %a2 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
+ %1 = load i32, i32* %a2, align 4
store i32 %1, i32* %vla1, align 128
- %b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
- %2 = load i32* %b, align 4
- %arrayidx3 = getelementptr inbounds i32* %vla1, i64 1
+ %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
+ %2 = load i32, i32* %b, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %vla1, i64 1
store i32 %2, i32* %arrayidx3, align 4
call void @bar(i32* %vla1, i32* %vla) #0
ret void
diff --git a/test/CodeGen/PowerPC/early-ret.ll b/test/CodeGen/PowerPC/early-ret.ll
index 7d3e225a1e29..52cf464b9fd5 100644
--- a/test/CodeGen/PowerPC/early-ret.ll
+++ b/test/CodeGen/PowerPC/early-ret.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -45,4 +45,37 @@ if.end3: ; preds = %if.then, %if.then2,
; CHECK: blr
}
+
+@.str0 = private unnamed_addr constant [2 x i8] c"a\00"
+@.str1 = private unnamed_addr constant [2 x i8] c"b\00"
+@.str2 = private unnamed_addr constant [2 x i8] c"c\00"
+@.str3 = private unnamed_addr constant [2 x i8] c"d\00"
+@.str4 = private unnamed_addr constant [2 x i8] c"e\00"
+define i8* @dont_assert(i32 %x) {
+; LLVM would assert due to moving an early return into the jump table block and
+; removing one of its predecessors despite that block ending with an indirect
+; branch.
+entry:
+ switch i32 %x, label %sw.epilog [
+ i32 1, label %return
+ i32 2, label %sw.bb1
+ i32 3, label %sw.bb2
+ i32 4, label %sw.bb3
+ i32 255, label %sw.bb4
+ ]
+sw.bb1: br label %return
+sw.bb2: br label %return
+sw.bb3: br label %return
+sw.bb4: br label %return
+sw.epilog: br label %return
+return:
+ %retval.0 = phi i8* [ null, %sw.epilog ],
+ [ getelementptr inbounds ([2 x i8], [2 x i8]* @.str4, i64 0, i64 0), %sw.bb4 ],
+ [ getelementptr inbounds ([2 x i8], [2 x i8]* @.str3, i64 0, i64 0), %sw.bb3 ],
+ [ getelementptr inbounds ([2 x i8], [2 x i8]* @.str2, i64 0, i64 0), %sw.bb2 ],
+ [ getelementptr inbounds ([2 x i8], [2 x i8]* @.str1, i64 0, i64 0), %sw.bb1 ],
+ [ getelementptr inbounds ([2 x i8], [2 x i8]* @.str0, i64 0, i64 0), %entry ]
+ ret i8* %retval.0
+}
+
attributes #0 = { nounwind }
diff --git a/test/CodeGen/PowerPC/ec-input.ll b/test/CodeGen/PowerPC/ec-input.ll
new file mode 100644
index 000000000000..a57f69be12da
--- /dev/null
+++ b/test/CodeGen/PowerPC/ec-input.ll
@@ -0,0 +1,155 @@
+; RUN: llc -verify-machineinstrs < %s | FileCheck %s
+; This test case used to fail both with and without -verify-machineinstrs
+; (-verify-machineinstrs would catch the problem right after instruction
+; scheduling because the live intervals would not be right for the registers
+; that were both inputs to the inline asm and also early-clobber outputs).
+
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-bgq-linux"
+
+%struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713 = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker.118.8248.32638.195238.200116.211498.218002.221254.222880.224506.226132.240766.244018.245644.248896.260278.271660.281416.283042.302554.304180.325318.326944.344712*, %struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713*, i32, i32, i64, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i8*, i8*, i64, i32, [20 x i8] }
+%struct._IO_marker.118.8248.32638.195238.200116.211498.218002.221254.222880.224506.226132.240766.244018.245644.248896.260278.271660.281416.283042.302554.304180.325318.326944.344712 = type { %struct._IO_marker.118.8248.32638.195238.200116.211498.218002.221254.222880.224506.226132.240766.244018.245644.248896.260278.271660.281416.283042.302554.304180.325318.326944.344712*, %struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713*, i32 }
+
+@.str236 = external unnamed_addr constant [121 x i8], align 1
+@.str294 = external unnamed_addr constant [49 x i8], align 1
+
+; Function Attrs: nounwind
+declare void @fprintf(%struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713* nocapture, i8* nocapture readonly, ...) #0
+
+; Function Attrs: inlinehint nounwind
+define void @_ZN4PAMI6Device2MU15ResourceManager46calculatePerCoreMUResourcesBasedOnAvailabilityEv() #1 align 2 {
+; CHECK-LABEL: @_ZN4PAMI6Device2MU15ResourceManager46calculatePerCoreMUResourcesBasedOnAvailabilityEv
+; CHECK: sc
+
+entry:
+ %numFreeResourcesInSubgroup = alloca i32, align 4
+ %0 = ptrtoint i32* %numFreeResourcesInSubgroup to i64
+ br label %for.cond2.preheader
+
+for.cond2.preheader: ; preds = %if.end23.3, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %if.end23.3 ]
+ %group.098 = phi i32 [ 0, %entry ], [ %inc37, %if.end23.3 ]
+ %minFreeBatIdsPerCore.097 = phi i64 [ 32, %entry ], [ %numFreeBatIdsInGroup.0.minFreeBatIdsPerCore.0, %if.end23.3 ]
+ %minFreeRecFifosPerCore.096 = phi i64 [ 16, %entry ], [ %minFreeRecFifosPerCore.1, %if.end23.3 ]
+ %minFreeInjFifosPerCore.095 = phi i64 [ 32, %entry ], [ %numFreeInjFifosInGroup.0.minFreeInjFifosPerCore.0, %if.end23.3 ]
+ %cmp5 = icmp eq i32 undef, 0
+ br i1 %cmp5, label %if.end, label %if.then
+
+if.then: ; preds = %if.end23.2, %if.end23.1, %if.end23, %for.cond2.preheader
+ unreachable
+
+if.end: ; preds = %for.cond2.preheader
+ %1 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %conv = zext i32 %1 to i64
+ %2 = call { i64, i64, i64, i64 } asm sideeffect "sc", "=&{r0},=&{r3},=&{r4},=&{r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 1034, i64 %indvars.iv, i64 %0, i64 undef) #2
+ %cmp10 = icmp eq i32 0, 0
+ br i1 %cmp10, label %if.end14, label %if.then11
+
+if.then11: ; preds = %if.end.3, %if.end.2, %if.end.1, %if.end
+ unreachable
+
+if.end14: ; preds = %if.end
+ %3 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %cmp19 = icmp eq i32 undef, 0
+ br i1 %cmp19, label %if.end23, label %if.then20
+
+if.then20: ; preds = %if.end14.3, %if.end14.2, %if.end14.1, %if.end14
+ %conv4.i65.lcssa = phi i32 [ undef, %if.end14 ], [ 0, %if.end14.1 ], [ %conv4.i65.2, %if.end14.2 ], [ %conv4.i65.3, %if.end14.3 ]
+ call void (%struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713*, i8*, ...) @fprintf(%struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713* undef, i8* getelementptr inbounds ([121 x i8], [121 x i8]* @.str236, i64 0, i64 0), i32 signext 2503) #3
+ call void (%struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713*, i8*, ...) @fprintf(%struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713* undef, i8* getelementptr inbounds ([49 x i8], [49 x i8]* @.str294, i64 0, i64 0), i32 signext %conv4.i65.lcssa) #3
+ unreachable
+
+if.end23: ; preds = %if.end14
+ %conv15 = zext i32 %3 to i64
+ %4 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %conv24 = zext i32 %4 to i64
+ %5 = call { i64, i64, i64, i64 } asm sideeffect "sc", "=&{r0},=&{r3},=&{r4},=&{r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 1033, i64 0, i64 %0, i64 undef) #2
+ %cmp5.1 = icmp eq i32 0, 0
+ br i1 %cmp5.1, label %if.end.1, label %if.then
+
+for.end38: ; preds = %if.end23.3
+ ret void
+
+if.end.1: ; preds = %if.end23
+ %6 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %conv.1 = zext i32 %6 to i64
+ %add.1 = add nuw nsw i64 %conv.1, %conv
+ %7 = call { i64, i64, i64, i64 } asm sideeffect "sc", "=&{r0},=&{r3},=&{r4},=&{r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 1034, i64 0, i64 %0, i64 undef) #2
+ %cmp10.1 = icmp eq i32 undef, 0
+ br i1 %cmp10.1, label %if.end14.1, label %if.then11
+
+if.end14.1: ; preds = %if.end.1
+ %8 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %cmp19.1 = icmp eq i32 0, 0
+ br i1 %cmp19.1, label %if.end23.1, label %if.then20
+
+if.end23.1: ; preds = %if.end14.1
+ %conv15.1 = zext i32 %8 to i64
+ %add16.1 = add nuw nsw i64 %conv15.1, %conv15
+ %9 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %conv24.1 = zext i32 %9 to i64
+ %add25.1 = add nuw nsw i64 %conv24.1, %conv24
+ %cmp5.2 = icmp eq i32 undef, 0
+ br i1 %cmp5.2, label %if.end.2, label %if.then
+
+if.end.2: ; preds = %if.end23.1
+ %10 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %conv.2 = zext i32 %10 to i64
+ %add.2 = add nuw nsw i64 %conv.2, %add.1
+ %11 = call { i64, i64, i64, i64 } asm sideeffect "sc", "=&{r0},=&{r3},=&{r4},=&{r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 1034, i64 undef, i64 %0, i64 undef) #2
+ %cmp10.2 = icmp eq i32 0, 0
+ br i1 %cmp10.2, label %if.end14.2, label %if.then11
+
+if.end14.2: ; preds = %if.end.2
+ %12 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %13 = call { i64, i64, i64, i64 } asm sideeffect "sc", "=&{r0},=&{r3},=&{r4},=&{r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 1035, i64 undef, i64 %0, i64 0) #2
+ %asmresult1.i64.2 = extractvalue { i64, i64, i64, i64 } %13, 1
+ %conv4.i65.2 = trunc i64 %asmresult1.i64.2 to i32
+ %cmp19.2 = icmp eq i32 %conv4.i65.2, 0
+ br i1 %cmp19.2, label %if.end23.2, label %if.then20
+
+if.end23.2: ; preds = %if.end14.2
+ %conv15.2 = zext i32 %12 to i64
+ %add16.2 = add nuw nsw i64 %conv15.2, %add16.1
+ %14 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %conv24.2 = zext i32 %14 to i64
+ %add25.2 = add nuw nsw i64 %conv24.2, %add25.1
+ %cmp5.3 = icmp eq i32 0, 0
+ br i1 %cmp5.3, label %if.end.3, label %if.then
+
+if.end.3: ; preds = %if.end23.2
+ %15 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %conv.3 = zext i32 %15 to i64
+ %add.3 = add nuw nsw i64 %conv.3, %add.2
+ %cmp10.3 = icmp eq i32 undef, 0
+ br i1 %cmp10.3, label %if.end14.3, label %if.then11
+
+if.end14.3: ; preds = %if.end.3
+ %16 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %17 = call { i64, i64, i64, i64 } asm sideeffect "sc", "=&{r0},=&{r3},=&{r4},=&{r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 1035, i64 0, i64 %0, i64 0) #2
+ %asmresult1.i64.3 = extractvalue { i64, i64, i64, i64 } %17, 1
+ %conv4.i65.3 = trunc i64 %asmresult1.i64.3 to i32
+ %cmp19.3 = icmp eq i32 %conv4.i65.3, 0
+ br i1 %cmp19.3, label %if.end23.3, label %if.then20
+
+if.end23.3: ; preds = %if.end14.3
+ %conv15.3 = zext i32 %16 to i64
+ %add16.3 = add nuw nsw i64 %conv15.3, %add16.2
+ %add25.3 = add nuw nsw i64 0, %add25.2
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
+ %cmp27 = icmp ult i64 %add.3, %minFreeInjFifosPerCore.095
+ %numFreeInjFifosInGroup.0.minFreeInjFifosPerCore.0 = select i1 %cmp27, i64 %add.3, i64 %minFreeInjFifosPerCore.095
+ %cmp30 = icmp ult i64 %add16.3, %minFreeRecFifosPerCore.096
+ %minFreeRecFifosPerCore.1 = select i1 %cmp30, i64 %add16.3, i64 %minFreeRecFifosPerCore.096
+ %cmp33 = icmp ult i64 %add25.3, %minFreeBatIdsPerCore.097
+ %numFreeBatIdsInGroup.0.minFreeBatIdsPerCore.0 = select i1 %cmp33, i64 %add25.3, i64 %minFreeBatIdsPerCore.097
+ %inc37 = add nuw nsw i32 %group.098, 1
+ %cmp = icmp ult i32 %inc37, 16
+ br i1 %cmp, label %for.cond2.preheader, label %for.end38
+}
+
+attributes #0 = { nounwind "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "target-cpu"="a2q" }
+attributes #1 = { inlinehint nounwind "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "target-cpu"="a2q" }
+attributes #2 = { nounwind }
+attributes #3 = { cold nounwind }
+
diff --git a/test/CodeGen/PowerPC/empty-functions.ll b/test/CodeGen/PowerPC/empty-functions.ll
index e32a8472b835..aa760d82b1c7 100644
--- a/test/CodeGen/PowerPC/empty-functions.ll
+++ b/test/CodeGen/PowerPC/empty-functions.ll
@@ -17,16 +17,18 @@ entry:
; An empty function is perfectly fine on ELF.
; LINUX-NO-FP: func:
+; LINUX-NO-FP-NEXT: {{^}}.L[[BEGIN:.*]]:{{$}}
; LINUX-NO-FP-NEXT: .cfi_startproc
; LINUX-NO-FP-NEXT: {{^}}#
-; LINUX-NO-FP-NEXT: {{^}}.L{{.*}}:{{$}}
-; LINUX-NO-FP-NEXT: .size func, .L{{.*}}-func
+; LINUX-NO-FP-NEXT: {{^}}.L[[END:.*]]:{{$}}
+; LINUX-NO-FP-NEXT: .size func, .L[[END]]-.L[[BEGIN]]
; LINUX-NO-FP-NEXT: .cfi_endproc
; A cfi directive can point to the end of a function. It (and in fact the
; entire body) could be optimized out because of the unreachable, but we
; don't do it right now.
; LINUX-FP: func:
+; LINUX-FP-NEXT: {{^}}.L[[BEGIN:.*]]:{{$}}
; LINUX-FP-NEXT: .cfi_startproc
; LINUX-FP-NEXT: {{^}}#
; LINUX-FP-NEXT: stw 31, -4(1)
@@ -38,6 +40,6 @@ entry:
; LINUX-FP-NEXT: mr 31, 1
; LINUX-FP-NEXT:{{^}}.L{{.*}}:{{$}}
; LINUX-FP-NEXT: .cfi_def_cfa_register r31
-; LINUX-FP-NEXT:{{^}}.L{{.*}}:{{$}}
-; LINUX-FP-NEXT: .size func, .Ltmp3-func
+; LINUX-FP-NEXT: {{^}}.L[[END:.*]]:{{$}}
+; LINUX-FP-NEXT: .size func, .L[[END]]-.L[[BEGIN]]
; LINUX-FP-NEXT: .cfi_endproc
diff --git a/test/CodeGen/PowerPC/emptystruct.ll b/test/CodeGen/PowerPC/emptystruct.ll
index 47cfadd0a7bb..66cada14f9d2 100644
--- a/test/CodeGen/PowerPC/emptystruct.ll
+++ b/test/CodeGen/PowerPC/emptystruct.ll
@@ -18,7 +18,7 @@ define void @callee(%struct.empty* noalias sret %agg.result, %struct.empty* byva
entry:
%a2.addr = alloca %struct.empty*, align 8
store %struct.empty* %a2, %struct.empty** %a2.addr, align 8
- %0 = load %struct.empty** %a2.addr, align 8
+ %0 = load %struct.empty*, %struct.empty** %a2.addr, align 8
%1 = bitcast %struct.empty* %agg.result to i8*
%2 = bitcast %struct.empty* %0 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 0, i32 1, i1 false)
diff --git a/test/CodeGen/PowerPC/eqv-andc-orc-nor.ll b/test/CodeGen/PowerPC/eqv-andc-orc-nor.ll
index f99089b3bb02..f90eccb359a8 100644
--- a/test/CodeGen/PowerPC/eqv-andc-orc-nor.ll
+++ b/test/CodeGen/PowerPC/eqv-andc-orc-nor.ll
@@ -69,9 +69,9 @@ define i32 @NAND1(i32 %X, i32 %Y) nounwind {
}
define void @VNOR(<4 x float>* %P, <4 x float>* %Q) nounwind {
- %tmp = load <4 x float>* %P ; <<4 x float>> [#uses=1]
+ %tmp = load <4 x float>, <4 x float>* %P ; <<4 x float>> [#uses=1]
%tmp.upgrd.1 = bitcast <4 x float> %tmp to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp2 = load <4 x float>* %Q ; <<4 x float>> [#uses=1]
+ %tmp2 = load <4 x float>, <4 x float>* %Q ; <<4 x float>> [#uses=1]
%tmp2.upgrd.2 = bitcast <4 x float> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp3 = or <4 x i32> %tmp.upgrd.1, %tmp2.upgrd.2 ; <<4 x i32>> [#uses=1]
%tmp4 = xor <4 x i32> %tmp3, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
@@ -81,9 +81,9 @@ define void @VNOR(<4 x float>* %P, <4 x float>* %Q) nounwind {
}
define void @VANDC(<4 x float>* %P, <4 x float>* %Q) nounwind {
- %tmp = load <4 x float>* %P ; <<4 x float>> [#uses=1]
+ %tmp = load <4 x float>, <4 x float>* %P ; <<4 x float>> [#uses=1]
%tmp.upgrd.4 = bitcast <4 x float> %tmp to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp2 = load <4 x float>* %Q ; <<4 x float>> [#uses=1]
+ %tmp2 = load <4 x float>, <4 x float>* %Q ; <<4 x float>> [#uses=1]
%tmp2.upgrd.5 = bitcast <4 x float> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp4 = xor <4 x i32> %tmp2.upgrd.5, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
%tmp3 = and <4 x i32> %tmp.upgrd.4, %tmp4 ; <<4 x i32>> [#uses=1]
diff --git a/test/CodeGen/PowerPC/extra-toc-reg-deps.ll b/test/CodeGen/PowerPC/extra-toc-reg-deps.ll
new file mode 100644
index 000000000000..1056c5a57aac
--- /dev/null
+++ b/test/CodeGen/PowerPC/extra-toc-reg-deps.ll
@@ -0,0 +1,430 @@
+; RUN: llc < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-bgq-linux"
+
+%"class.Foam::messageStream.6" = type <{ %"class.Foam::string.5", i32, i32, i32, [4 x i8] }>
+%"class.Foam::string.5" = type { %"class.std::basic_string.4" }
+%"class.std::basic_string.4" = type { %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.3" }
+%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.3" = type { i8* }
+%"class.Foam::prefixOSstream.27" = type { %"class.Foam::OSstream.26", i8, %"class.Foam::string.5" }
+%"class.Foam::OSstream.26" = type { %"class.Foam::Ostream.base.9", %"class.Foam::fileName.10", %"class.std::basic_ostream.25"* }
+%"class.Foam::Ostream.base.9" = type <{ %"class.Foam::IOstream.8", i16 }>
+%"class.Foam::IOstream.8" = type { i32 (...)**, i32, [4 x i8], %"class.Foam::IOstream::versionNumber.7", i32, i32, i32, i32 }
+%"class.Foam::IOstream::versionNumber.7" = type <{ double, i32, [4 x i8] }>
+%"class.Foam::fileName.10" = type { %"class.Foam::string.5" }
+%"class.std::basic_ostream.25" = type { i32 (...)**, %"class.std::basic_ios.24" }
+%"class.std::basic_ios.24" = type { %"class.std::ios_base.16", %"class.std::basic_ostream.25"*, i8, i8, %"class.std::basic_streambuf.17"*, %"class.std::ctype.21"*, %"class.std::__gnu_cxx_ldbl128::num_put.22"*, %"class.std::__gnu_cxx_ldbl128::num_get.23"* }
+%"class.std::ios_base.16" = type { i32 (...)**, i64, i64, i32, i32, i32, %"struct.std::ios_base::_Callback_list.11"*, %"struct.std::ios_base::_Words.12", [8 x %"struct.std::ios_base::_Words.12"], i32, %"struct.std::ios_base::_Words.12"*, %"class.std::locale.15" }
+%"struct.std::ios_base::_Callback_list.11" = type { %"struct.std::ios_base::_Callback_list.11"*, void (i32, %"class.std::ios_base.16"*, i32)*, i32, i32 }
+%"struct.std::ios_base::_Words.12" = type { i8*, i64 }
+%"class.std::locale.15" = type { %"class.std::locale::_Impl.14"* }
+%"class.std::locale::_Impl.14" = type { i32, %"class.std::locale::facet.13"**, i64, %"class.std::locale::facet.13"**, i8** }
+%"class.std::locale::facet.13" = type <{ i32 (...)**, i32, [4 x i8] }>
+%"class.std::basic_streambuf.17" = type { i32 (...)**, i8*, i8*, i8*, i8*, i8*, i8*, %"class.std::locale.15" }
+%"class.std::ctype.21" = type <{ %"class.std::locale::facet.base.18", [4 x i8], %struct.__locale_struct.20*, i8, [7 x i8], i32*, i32*, i16*, i8, [256 x i8], [256 x i8], i8, [6 x i8] }>
+%"class.std::locale::facet.base.18" = type <{ i32 (...)**, i32 }>
+%struct.__locale_struct.20 = type { [13 x %struct.__locale_data.19*], i16*, i32*, i32*, [13 x i8*] }
+%struct.__locale_data.19 = type opaque
+%"class.std::__gnu_cxx_ldbl128::num_put.22" = type { %"class.std::locale::facet.base.18", [4 x i8] }
+%"class.std::__gnu_cxx_ldbl128::num_get.23" = type { %"class.std::locale::facet.base.18", [4 x i8] }
+%"class.Foam::primitiveMesh.135" = type { i32 (...)**, i32, i32, i32, i32, i32, i32, i32, i32, i32, %"class.Foam::List.116"*, %"class.Foam::List.0"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::List.5"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::DynamicList.40", %"class.Foam::HashSet.127", %"class.Foam::Field.131"*, %"class.Foam::Field.131"*, %"class.Foam::Field.11"*, %"class.Foam::Field.131"* }
+%"class.Foam::List.116" = type opaque
+%"class.Foam::List.0" = type { %"class.Foam::UList.119" }
+%"class.Foam::UList.119" = type { i32, %"class.Foam::edge.118"* }
+%"class.Foam::edge.118" = type { %"class.Foam::FixedList.117" }
+%"class.Foam::FixedList.117" = type { [2 x i32] }
+%"class.Foam::List.5" = type { %"class.Foam::UList.6" }
+%"class.Foam::UList.6" = type { i32, %"class.Foam::cell.121"* }
+%"class.Foam::cell.121" = type { %"class.Foam::List.3" }
+%"class.Foam::List.3" = type { %"class.Foam::UList.4" }
+%"class.Foam::UList.4" = type { i32, i32* }
+%"class.Foam::List.1" = type { %"class.Foam::UList.2" }
+%"class.Foam::UList.2" = type { i32, %"class.Foam::List.3"* }
+%"class.Foam::DynamicList.40" = type <{ %"class.Foam::List.3", i32, [4 x i8] }>
+%"class.Foam::HashSet.127" = type { %"class.Foam::HashTable.7" }
+%"class.Foam::HashTable.7" = type { i32, i32, %"struct.Foam::HashTable<Foam::nil, int, Foam::Hash<Foam::label> >::hashedEntry.125"** }
+%"struct.Foam::HashTable<Foam::nil, int, Foam::Hash<Foam::label> >::hashedEntry.125" = type <{ i32, [4 x i8], %"struct.Foam::HashTable<Foam::nil, int, Foam::Hash<Foam::label> >::hashedEntry.125"*, %"class.Foam::nil.124", [7 x i8] }>
+%"class.Foam::nil.124" = type { i8 }
+%"class.Foam::Field.11" = type { %"class.Foam::refCount.128", %"class.Foam::List.12" }
+%"class.Foam::refCount.128" = type { i32 }
+%"class.Foam::List.12" = type { %"class.Foam::UList.13" }
+%"class.Foam::UList.13" = type { i32, double* }
+%"class.Foam::Field.131" = type { %"class.Foam::refCount.128", %"class.Foam::List.8" }
+%"class.Foam::List.8" = type { %"class.Foam::UList.9" }
+%"class.Foam::UList.9" = type { i32, %"class.Foam::Vector.29"* }
+%"class.Foam::Vector.29" = type { %"class.Foam::VectorSpace.10" }
+%"class.Foam::VectorSpace.10" = type { [3 x double] }
+%"class.Foam::Ostream.189" = type <{ %"class.Foam::IOstream.8", i16, [6 x i8] }>
+
+@_ZN4Foam4InfoE = external global %"class.Foam::messageStream.6", align 8
+@.str27 = external unnamed_addr constant [24 x i8], align 1
+@.str28 = external unnamed_addr constant [7 x i8], align 1
+@_ZN4Foam4PoutE = external global %"class.Foam::prefixOSstream.27", align 8
+
+define void @_ZN4Foam13checkTopologyERKNS_8polyMeshEbb(i1 zeroext %allTopology) #0 {
+entry:
+ br i1 undef, label %for.body, label %for.cond.cleanup
+
+; CHECK-LABEL: @_ZN4Foam13checkTopologyERKNS_8polyMeshEbb
+
+; CHECK: addis [[REG1:[0-9]+]], 2, .LC0@toc@ha
+; CHECK: std 2, 40(1)
+; CHECK: ld {{[0-9]+}}, .LC0@toc@l([[REG1]])
+; CHECK: {{mr|ld}} 2,
+; CHECK: mtctr
+; CHECK: bctrl
+; CHECK: ld 2, 40(1)
+
+; CHECK: addis [[REG1:[0-9]+]], 2, .LC0@toc@ha
+; CHECK: std 2, 40(1)
+; CHECK: ld {{[0-9]+}}, .LC0@toc@l([[REG1]])
+; CHECK: {{mr|ld}} 2,
+; CHECK: mtctr
+; CHECK: bctrl
+; CHECK: ld 2, 40(1)
+
+for.cond.cleanup: ; preds = %entry
+ br i1 undef, label %if.then.i, label %if.else.i
+
+if.then.i: ; preds = %for.cond.cleanup
+ br i1 undef, label %if.then.i1435, label %if.else.i1436
+
+if.else.i: ; preds = %for.cond.cleanup
+ unreachable
+
+if.then.i1435: ; preds = %if.then.i
+ br label %_ZN4Foam12returnReduceIiNS_5sumOpIiEEEET_RKS3_RKT0_ii.exit
+
+if.else.i1436: ; preds = %if.then.i
+ br label %_ZN4Foam12returnReduceIiNS_5sumOpIiEEEET_RKS3_RKT0_ii.exit
+
+_ZN4Foam12returnReduceIiNS_5sumOpIiEEEET_RKS3_RKT0_ii.exit: ; preds = %if.else.i1436, %if.then.i1435
+ br i1 undef, label %for.body.i, label %_ZNK4Foam8ZoneMeshINS_8cellZoneENS_8polyMeshEE15checkDefinitionEb.exit
+
+for.body: ; preds = %entry
+ unreachable
+
+for.body.i: ; preds = %_ZN4Foam12returnReduceIiNS_5sumOpIiEEEET_RKS3_RKT0_ii.exit
+ unreachable
+
+_ZNK4Foam8ZoneMeshINS_8cellZoneENS_8polyMeshEE15checkDefinitionEb.exit: ; preds = %_ZN4Foam12returnReduceIiNS_5sumOpIiEEEET_RKS3_RKT0_ii.exit
+ br i1 undef, label %for.body.i1480, label %_ZNK4Foam8ZoneMeshINS_8faceZoneENS_8polyMeshEE15checkDefinitionEb.exit
+
+for.body.i1480: ; preds = %_ZNK4Foam8ZoneMeshINS_8cellZoneENS_8polyMeshEE15checkDefinitionEb.exit
+ unreachable
+
+_ZNK4Foam8ZoneMeshINS_8faceZoneENS_8polyMeshEE15checkDefinitionEb.exit: ; preds = %_ZNK4Foam8ZoneMeshINS_8cellZoneENS_8polyMeshEE15checkDefinitionEb.exit
+ br i1 undef, label %for.body.i1504, label %_ZNK4Foam8ZoneMeshINS_9pointZoneENS_8polyMeshEE15checkDefinitionEb.exit
+
+for.body.i1504: ; preds = %_ZNK4Foam8ZoneMeshINS_8faceZoneENS_8polyMeshEE15checkDefinitionEb.exit
+ unreachable
+
+_ZNK4Foam8ZoneMeshINS_9pointZoneENS_8polyMeshEE15checkDefinitionEb.exit: ; preds = %_ZNK4Foam8ZoneMeshINS_8faceZoneENS_8polyMeshEE15checkDefinitionEb.exit
+ invoke void @_ZN4Foam4word12stripInvalidEv()
+ to label %_ZN4Foam4wordC2EPKcb.exit unwind label %lpad.i
+
+lpad.i: ; preds = %_ZNK4Foam8ZoneMeshINS_9pointZoneENS_8polyMeshEE15checkDefinitionEb.exit
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ resume { i8*, i32 } %0
+
+_ZN4Foam4wordC2EPKcb.exit: ; preds = %_ZNK4Foam8ZoneMeshINS_9pointZoneENS_8polyMeshEE15checkDefinitionEb.exit
+ invoke void @_ZN4Foam7cellSetC1ERKNS_8polyMeshERKNS_4wordEiNS_8IOobject11writeOptionE()
+ to label %invoke.cont59 unwind label %lpad
+
+invoke.cont59: ; preds = %_ZN4Foam4wordC2EPKcb.exit
+ br i1 undef, label %_ZNSsD2Ev.exit, label %if.then.i.i, !prof !1
+
+if.then.i.i: ; preds = %invoke.cont59
+ br i1 true, label %if.then.i.i.i1508, label %if.else.i.i.i
+
+if.then.i.i.i1508: ; preds = %if.then.i.i
+ br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i
+
+if.else.i.i.i: ; preds = %if.then.i.i
+ br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i
+
+_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i: ; preds = %if.else.i.i.i, %if.then.i.i.i1508
+ br i1 undef, label %if.then4.i.i, label %_ZNSsD2Ev.exit
+
+if.then4.i.i: ; preds = %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i
+ br label %_ZNSsD2Ev.exit
+
+_ZNSsD2Ev.exit: ; preds = %if.then4.i.i, %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i, %invoke.cont59
+ br i1 undef, label %for.body70, label %for.cond.cleanup69
+
+for.cond.cleanup69: ; preds = %_ZNSsD2Ev.exit
+ br i1 undef, label %if.then121, label %if.else
+
+lpad: ; preds = %_ZN4Foam4wordC2EPKcb.exit
+ %1 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ br i1 undef, label %_ZNSsD2Ev.exit1578, label %if.then.i.i1570, !prof !1
+
+if.then.i.i1570: ; preds = %lpad
+ br i1 undef, label %if.then4.i.i1577, label %_ZNSsD2Ev.exit1578
+
+if.then4.i.i1577: ; preds = %if.then.i.i1570
+ unreachable
+
+_ZNSsD2Ev.exit1578: ; preds = %if.then.i.i1570, %lpad
+ unreachable
+
+for.body70: ; preds = %_ZNSsD2Ev.exit
+ unreachable
+
+if.then121: ; preds = %for.cond.cleanup69
+ unreachable
+
+if.else: ; preds = %for.cond.cleanup69
+ invoke void @_ZN4Foam4word12stripInvalidEv()
+ to label %_ZN4Foam4wordC2EPKcb.exit1701 unwind label %lpad.i1689
+
+lpad.i1689: ; preds = %if.else
+ %2 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ unreachable
+
+_ZN4Foam4wordC2EPKcb.exit1701: ; preds = %if.else
+ invoke void @_ZN4Foam8pointSetC1ERKNS_8polyMeshERKNS_4wordEiNS_8IOobject11writeOptionE()
+ to label %invoke.cont169 unwind label %lpad165
+
+invoke.cont169: ; preds = %_ZN4Foam4wordC2EPKcb.exit1701
+ %call177 = invoke zeroext i1 undef(%"class.Foam::primitiveMesh.135"* undef, i1 zeroext true, %"class.Foam::HashSet.127"* undef)
+ to label %invoke.cont176 unwind label %lpad175
+
+invoke.cont176: ; preds = %invoke.cont169
+ br i1 %call177, label %if.then178, label %if.end213
+
+if.then178: ; preds = %invoke.cont176
+ unreachable
+
+lpad165: ; preds = %_ZN4Foam4wordC2EPKcb.exit1701
+ %3 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ unreachable
+
+lpad175: ; preds = %invoke.cont169
+ %4 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ invoke void @_ZN4Foam8pointSetD1Ev()
+ to label %eh.resume unwind label %terminate.lpad
+
+if.end213: ; preds = %invoke.cont176
+ invoke void @_ZN4Foam4word12stripInvalidEv()
+ to label %_ZN4Foam4wordC2EPKcb.exit1777 unwind label %lpad.i1765
+
+lpad.i1765: ; preds = %if.end213
+ %5 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ br i1 undef, label %eh.resume.i1776, label %if.then.i.i.i1767, !prof !1
+
+if.then.i.i.i1767: ; preds = %lpad.i1765
+ unreachable
+
+eh.resume.i1776: ; preds = %lpad.i1765
+ resume { i8*, i32 } %5
+
+_ZN4Foam4wordC2EPKcb.exit1777: ; preds = %if.end213
+ invoke void @_ZN4Foam7faceSetC1ERKNS_8polyMeshERKNS_4wordEiNS_8IOobject11writeOptionE()
+ to label %invoke.cont221 unwind label %lpad217
+
+invoke.cont221: ; preds = %_ZN4Foam4wordC2EPKcb.exit1777
+ br i1 undef, label %_ZNSsD2Ev.exit1792, label %if.then.i.i1784, !prof !1
+
+if.then.i.i1784: ; preds = %invoke.cont221
+ br i1 undef, label %if.then4.i.i1791, label %_ZNSsD2Ev.exit1792
+
+if.then4.i.i1791: ; preds = %if.then.i.i1784
+ br label %_ZNSsD2Ev.exit1792
+
+_ZNSsD2Ev.exit1792: ; preds = %if.then4.i.i1791, %if.then.i.i1784, %invoke.cont221
+ %call232 = invoke zeroext i1 undef(%"class.Foam::primitiveMesh.135"* undef, i1 zeroext true, %"class.Foam::HashSet.127"* undef)
+ to label %invoke.cont231 unwind label %lpad230
+
+invoke.cont231: ; preds = %_ZNSsD2Ev.exit1792
+ invoke void @_ZN4Foam6reduceIiNS_5sumOpIiEEEEvRKNS_4ListINS_8UPstream11commsStructEEERT_RKT0_ii()
+ to label %invoke.cont243 unwind label %lpad230
+
+lpad217: ; preds = %_ZN4Foam4wordC2EPKcb.exit1777
+ %6 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ br label %eh.resume
+
+lpad230: ; preds = %invoke.cont231, %_ZNSsD2Ev.exit1792
+ %7 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ invoke void @_ZN4Foam7faceSetD1Ev()
+ to label %eh.resume unwind label %terminate.lpad
+
+invoke.cont243: ; preds = %invoke.cont231
+ invoke void @_ZN4Foam4word12stripInvalidEv()
+ to label %_ZN4Foam4wordC2EPKcb.exit1862 unwind label %lpad.i1850
+
+lpad.i1850: ; preds = %invoke.cont243
+ %8 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ unreachable
+
+_ZN4Foam4wordC2EPKcb.exit1862: ; preds = %invoke.cont243
+ invoke void @_ZN4Foam7faceSetC1ERKNS_8polyMeshERKNS_4wordEiNS_8IOobject11writeOptionE()
+ to label %invoke.cont280 unwind label %lpad276
+
+invoke.cont280: ; preds = %_ZN4Foam4wordC2EPKcb.exit1862
+ br i1 undef, label %_ZNSsD2Ev.exit1877, label %if.then.i.i1869, !prof !1
+
+if.then.i.i1869: ; preds = %invoke.cont280
+ unreachable
+
+_ZNSsD2Ev.exit1877: ; preds = %invoke.cont280
+ br i1 undef, label %if.then292, label %if.end328
+
+if.then292: ; preds = %_ZNSsD2Ev.exit1877
+ unreachable
+
+lpad276: ; preds = %_ZN4Foam4wordC2EPKcb.exit1862
+ %9 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ unreachable
+
+if.end328: ; preds = %_ZNSsD2Ev.exit1877
+ br i1 %allTopology, label %if.then331, label %if.end660
+
+if.then331: ; preds = %if.end328
+ unreachable
+
+if.end660: ; preds = %if.end328
+ invoke void @_ZN4Foam13messageStreamcvRNS_8OSstreamEEv()
+ to label %invoke.cont668 unwind label %lpad663
+
+invoke.cont668: ; preds = %if.end660
+ %call671 = invoke dereferenceable(56) %"class.Foam::Ostream.189"* @_ZN4FoamlsERNS_7OstreamEPKc()
+ to label %invoke.cont670 unwind label %lpad663
+
+invoke.cont670: ; preds = %invoke.cont668
+ invoke void @_ZN4FoamlsERNS_7OstreamEi()
+ to label %invoke.cont674 unwind label %lpad663
+
+invoke.cont674: ; preds = %invoke.cont670
+ %call677 = invoke dereferenceable(56) %"class.Foam::Ostream.189"* @_ZN4FoamlsERNS_7OstreamEPKc()
+ to label %invoke.cont676 unwind label %lpad663
+
+invoke.cont676: ; preds = %invoke.cont674
+ invoke void undef(%"class.Foam::Ostream.189"* %call677)
+ to label %if.end878 unwind label %lpad663
+
+lpad663: ; preds = %invoke.cont670, %if.end660, %invoke.cont668, %invoke.cont674, %invoke.cont676
+ %10 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ br i1 undef, label %_ZN4Foam4ListIiED2Ev.exit.i3073, label %delete.notnull.i.i3071
+
+if.end878: ; preds = %invoke.cont676
+ br i1 undef, label %_ZN4Foam11regionSplitD2Ev.exit, label %delete.notnull.i.i3056
+
+delete.notnull.i.i3056: ; preds = %if.end878
+ unreachable
+
+_ZN4Foam11regionSplitD2Ev.exit: ; preds = %if.end878
+ br i1 undef, label %if.then883, label %if.else888
+
+if.then883: ; preds = %_ZN4Foam11regionSplitD2Ev.exit
+ unreachable
+
+delete.notnull.i.i3071: ; preds = %lpad663
+ unreachable
+
+_ZN4Foam4ListIiED2Ev.exit.i3073: ; preds = %lpad663
+ invoke void @_ZN4Foam11regIOobjectD2Ev()
+ to label %eh.resume unwind label %terminate.lpad
+
+if.else888: ; preds = %_ZN4Foam11regionSplitD2Ev.exit
+ invoke void @_ZN4Foam4word12stripInvalidEv()
+ to label %_ZN4Foam4wordC2EPKcb.exit3098 unwind label %lpad.i3086
+
+lpad.i3086: ; preds = %if.else888
+ %11 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ unreachable
+
+_ZN4Foam4wordC2EPKcb.exit3098: ; preds = %if.else888
+ invoke void @_ZN4Foam8pointSetC1ERKNS_8polyMeshERKNS_4wordEiNS_8IOobject11writeOptionE()
+ to label %invoke.cont902 unwind label %lpad898
+
+invoke.cont902: ; preds = %_ZN4Foam4wordC2EPKcb.exit3098
+ br i1 undef, label %_ZNSsD2Ev.exit3113, label %if.then.i.i3105, !prof !1
+
+if.then.i.i3105: ; preds = %invoke.cont902
+ br i1 undef, label %if.then4.i.i3112, label %_ZNSsD2Ev.exit3113
+
+if.then4.i.i3112: ; preds = %if.then.i.i3105
+ unreachable
+
+_ZNSsD2Ev.exit3113: ; preds = %if.then.i.i3105, %invoke.cont902
+ %call.i31163117 = invoke zeroext i32 undef(%"class.Foam::IOstream.8"* getelementptr inbounds (%"class.Foam::prefixOSstream.27", %"class.Foam::prefixOSstream.27"* @_ZN4Foam4PoutE, i64 0, i32 0, i32 0, i32 0))
+ to label %call.i3116.noexc unwind label %lpad905.loopexit.split-lp
+
+call.i3116.noexc: ; preds = %_ZNSsD2Ev.exit3113
+ %call5.i3118 = invoke zeroext i32 null(%"class.Foam::IOstream.8"* getelementptr inbounds (%"class.Foam::prefixOSstream.27", %"class.Foam::prefixOSstream.27"* @_ZN4Foam4PoutE, i64 0, i32 0, i32 0, i32 0), i32 zeroext undef)
+ to label %invoke.cont906 unwind label %lpad905.loopexit.split-lp
+
+invoke.cont906: ; preds = %call.i3116.noexc
+ unreachable
+
+lpad898: ; preds = %_ZN4Foam4wordC2EPKcb.exit3098
+ %12 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ br i1 undef, label %_ZNSsD2Ev.exit3204, label %if.then.i.i3196, !prof !1
+
+if.then.i.i3196: ; preds = %lpad898
+ unreachable
+
+_ZNSsD2Ev.exit3204: ; preds = %lpad898
+ unreachable
+
+lpad905.loopexit.split-lp: ; preds = %call.i3116.noexc, %_ZNSsD2Ev.exit3113
+ %lpad.loopexit.split-lp = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ invoke void @_ZN4Foam8pointSetD1Ev()
+ to label %eh.resume unwind label %terminate.lpad
+
+eh.resume: ; preds = %_ZN4Foam4ListIiED2Ev.exit.i3073, %lpad230, %lpad175, %lpad905.loopexit.split-lp, %lpad217
+ resume { i8*, i32 } undef
+
+terminate.lpad: ; preds = %_ZN4Foam4ListIiED2Ev.exit.i3073, %lpad230, %lpad175, %lpad905.loopexit.split-lp
+ %13 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null
+ unreachable
+}
+
+declare dereferenceable(56) %"class.Foam::Ostream.189"* @_ZN4FoamlsERNS_7OstreamEPKc() #0
+
+declare void @_ZN4Foam13messageStreamcvRNS_8OSstreamEEv() #0
+
+declare i32 @__gxx_personality_v0(...)
+
+declare void @_ZN4Foam7cellSetC1ERKNS_8polyMeshERKNS_4wordEiNS_8IOobject11writeOptionE() #0
+
+declare void @_ZN4FoamlsERNS_7OstreamEi() #0
+
+declare void @_ZN4Foam8pointSetC1ERKNS_8polyMeshERKNS_4wordEiNS_8IOobject11writeOptionE() #0
+
+declare void @_ZN4Foam8pointSetD1Ev() #0
+
+declare void @_ZN4Foam7faceSetC1ERKNS_8polyMeshERKNS_4wordEiNS_8IOobject11writeOptionE() #0
+
+declare void @_ZN4Foam7faceSetD1Ev() #0
+
+; Function Attrs: inlinehint
+declare void @_ZN4Foam4word12stripInvalidEv() #1 align 2
+
+declare void @_ZN4Foam11regIOobjectD2Ev() #0
+
+declare void @_ZN4Foam6reduceIiNS_5sumOpIiEEEEvRKNS_4ListINS_8UPstream11commsStructEEERT_RKT0_ii() #0
+
+attributes #0 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="a2q" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { inlinehint "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="a2q" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.module.flags = !{!0}
+
+!0 = !{i32 1, !"PIC Level", i32 2}
+!1 = !{!"branch_weights", i32 64, i32 4}
diff --git a/test/CodeGen/PowerPC/f32-to-i64.ll b/test/CodeGen/PowerPC/f32-to-i64.ll
new file mode 100644
index 000000000000..c1381880b757
--- /dev/null
+++ b/test/CodeGen/PowerPC/f32-to-i64.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s | FileCheck %s
+target datalayout = "E-m:e-p:32:32-i64:64-n32"
+target triple = "powerpc-unknown-unknown"
+
+; Function Attrs: nounwind
+define i64 @testullf(float %arg) #0 {
+entry:
+ %arg.addr = alloca float, align 4
+ store float %arg, float* %arg.addr, align 4
+ %0 = load float, float* %arg.addr, align 4
+ %conv = fptoui float %0 to i64
+ ret i64 %conv
+
+; CHECK-LABEL: @testullf
+; CHECK: fctiduz [[REG1:[0-9]+]], 1
+; CHECK: stfd [[REG1]], [[OFF:[0-9]+]](1)
+; CHECK-DAG: lwz 3, [[OFF]](1)
+; CHECK-DAG: lwz 4, {{[0-9]+}}(1)
+; CHECK: blr
+}
+
+attributes #0 = { nounwind "target-cpu"="a2" }
+
diff --git a/test/CodeGen/PowerPC/fast-isel-GEP-coalesce.ll b/test/CodeGen/PowerPC/fast-isel-GEP-coalesce.ll
index 7bdda0494b8f..3e0e5250d2fb 100644
--- a/test/CodeGen/PowerPC/fast-isel-GEP-coalesce.ll
+++ b/test/CodeGen/PowerPC/fast-isel-GEP-coalesce.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
%struct.A = type { i32, [2 x [2 x i32]], i8, [3 x [3 x [3 x i32]]] }
%struct.B = type { i32, [2 x [2 x [2 x %struct.A]]] }
@@ -11,9 +11,9 @@ define i32* @t1() nounwind {
entry:
; ELF64: t1
%addr = alloca i32*, align 4
- store i32* getelementptr inbounds ([2 x [2 x [2 x [2 x [2 x i32]]]]]* @arr, i32 0, i32 1, i32 1, i32 1, i32 1, i32 1), i32** %addr, align 4
+ store i32* getelementptr inbounds ([2 x [2 x [2 x [2 x [2 x i32]]]]], [2 x [2 x [2 x [2 x [2 x i32]]]]]* @arr, i32 0, i32 1, i32 1, i32 1, i32 1, i32 1), i32** %addr, align 4
; ELF64: addi {{[0-9]+}}, {{[0-9]+}}, 124
- %0 = load i32** %addr, align 4
+ %0 = load i32*, i32** %addr, align 4
ret i32* %0
}
@@ -21,9 +21,9 @@ define i32* @t2() nounwind {
entry:
; ELF64: t2
%addr = alloca i32*, align 4
- store i32* getelementptr inbounds ([3 x [3 x %struct.A]]* @A, i32 0, i32 2, i32 2, i32 3, i32 1, i32 2, i32 2), i32** %addr, align 4
+ store i32* getelementptr inbounds ([3 x [3 x %struct.A]], [3 x [3 x %struct.A]]* @A, i32 0, i32 2, i32 2, i32 3, i32 1, i32 2, i32 2), i32** %addr, align 4
; ELF64: addi {{[0-9]+}}, {{[0-9]+}}, 1148
- %0 = load i32** %addr, align 4
+ %0 = load i32*, i32** %addr, align 4
ret i32* %0
}
@@ -31,9 +31,9 @@ define i32* @t3() nounwind {
entry:
; ELF64: t3
%addr = alloca i32*, align 4
- store i32* getelementptr inbounds ([3 x [3 x %struct.A]]* @A, i32 0, i32 0, i32 1, i32 1, i32 0, i32 1), i32** %addr, align 4
+ store i32* getelementptr inbounds ([3 x [3 x %struct.A]], [3 x [3 x %struct.A]]* @A, i32 0, i32 0, i32 1, i32 1, i32 0, i32 1), i32** %addr, align 4
; ELF64: addi {{[0-9]+}}, {{[0-9]+}}, 140
- %0 = load i32** %addr, align 4
+ %0 = load i32*, i32** %addr, align 4
ret i32* %0
}
@@ -41,8 +41,8 @@ define i32* @t4() nounwind {
entry:
; ELF64: t4
%addr = alloca i32*, align 4
- store i32* getelementptr inbounds ([2 x [2 x [2 x %struct.B]]]* @B, i32 0, i32 0, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 3, i32 1, i32 2, i32 1), i32** %addr, align 4
+ store i32* getelementptr inbounds ([2 x [2 x [2 x %struct.B]]], [2 x [2 x [2 x %struct.B]]]* @B, i32 0, i32 0, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 3, i32 1, i32 2, i32 1), i32** %addr, align 4
; ELF64: addi {{[0-9]+}}, {{[0-9]+}}, 1284
- %0 = load i32** %addr, align 4
+ %0 = load i32*, i32** %addr, align 4
ret i32* %0
}
diff --git a/test/CodeGen/PowerPC/fast-isel-binary.ll b/test/CodeGen/PowerPC/fast-isel-binary.ll
index 43a6cd085055..2f1513f8aa11 100644
--- a/test/CodeGen/PowerPC/fast-isel-binary.ll
+++ b/test/CodeGen/PowerPC/fast-isel-binary.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
; Test add with non-legal types
diff --git a/test/CodeGen/PowerPC/fast-isel-br-const.ll b/test/CodeGen/PowerPC/fast-isel-br-const.ll
index 2cfb8a225745..6be7fbf9e02f 100644
--- a/test/CodeGen/PowerPC/fast-isel-br-const.ll
+++ b/test/CodeGen/PowerPC/fast-isel-br-const.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
define i32 @t1(i32 %a, i32 %b) nounwind uwtable ssp {
entry:
diff --git a/test/CodeGen/PowerPC/fast-isel-call.ll b/test/CodeGen/PowerPC/fast-isel-call.ll
index b2cc75e26114..64d8f6e79195 100644
--- a/test/CodeGen/PowerPC/fast-isel-call.ll
+++ b/test/CodeGen/PowerPC/fast-isel-call.ll
@@ -1,8 +1,8 @@
; FIXME: FastISel currently returns false if it hits code that uses VSX
-; registers and with -fast-isel-abort turned on the test case will then fail.
+; registers and with -fast-isel-abort=1 turned on the test case will then fail.
; When fastisel better supports VSX fix up this test case.
;
-; RUN: llc < %s -O0 -verify-machineinstrs -mattr=-vsx -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -mattr=-vsx -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
define i32 @t1(i8 signext %a) nounwind {
%1 = sext i8 %a to i32
@@ -85,7 +85,7 @@ define i32 @bar0(i32 %i) nounwind {
;define void @foo3() uwtable {
; %fptr = alloca i32 (i32)*, align 8
; store i32 (i32)* @bar0, i32 (i32)** %fptr, align 8
-; %1 = load i32 (i32)** %fptr, align 8
+; %1 = load i32 (i32)*, i32 (i32)** %fptr, align 8
; %call = call i32 %1(i32 0)
; ret void
;}
diff --git a/test/CodeGen/PowerPC/fast-isel-cmp-imm.ll b/test/CodeGen/PowerPC/fast-isel-cmp-imm.ll
index c1f6b6327a44..5a9d15868b6b 100644
--- a/test/CodeGen/PowerPC/fast-isel-cmp-imm.ll
+++ b/test/CodeGen/PowerPC/fast-isel-cmp-imm.ll
@@ -1,8 +1,8 @@
; FIXME: FastISel currently returns false if it hits code that uses VSX
-; registers and with -fast-isel-abort turned on the test case will then fail.
+; registers and with -fast-isel-abort=1 turned on the test case will then fail.
; When fastisel better supports VSX fix up this test case.
;
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s --check-prefix=ELF64
define void @t1a(float %a) uwtable ssp {
entry:
; ELF64: t1a
@@ -201,7 +201,7 @@ define void @t12(i8 %a) uwtable ssp {
entry:
; ELF64: t12
%cmp = icmp ugt i8 %a, -113
-; ELF64: rlwinm
+; ELF64: clrlwi
; ELF64: cmplwi
br i1 %cmp, label %if.then, label %if.end
diff --git a/test/CodeGen/PowerPC/fast-isel-const.ll b/test/CodeGen/PowerPC/fast-isel-const.ll
index 1057d0a0ce2b..a751a2be6c69 100644
--- a/test/CodeGen/PowerPC/fast-isel-const.ll
+++ b/test/CodeGen/PowerPC/fast-isel-const.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s --check-prefix=ELF64
define zeroext i1 @testi1(i8 %in) nounwind uwtable ssp {
entry:
diff --git a/test/CodeGen/PowerPC/fast-isel-conversion-p5.ll b/test/CodeGen/PowerPC/fast-isel-conversion-p5.ll
index ac41e8c27700..cfb934c6ab02 100644
--- a/test/CodeGen/PowerPC/fast-isel-conversion-p5.ll
+++ b/test/CodeGen/PowerPC/fast-isel-conversion-p5.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr5 | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr5 | FileCheck %s --check-prefix=ELF64
; Test sitofp
diff --git a/test/CodeGen/PowerPC/fast-isel-conversion.ll b/test/CodeGen/PowerPC/fast-isel-conversion.ll
index b0e29c1274a2..f7557d456858 100644
--- a/test/CodeGen/PowerPC/fast-isel-conversion.ll
+++ b/test/CodeGen/PowerPC/fast-isel-conversion.ll
@@ -1,12 +1,12 @@
; FIXME: FastISel currently returns false if it hits code that uses VSX
-; registers and with -fast-isel-abort turned on the test case will then fail.
+; registers and with -fast-isel-abort=1 turned on the test case will then fail.
; When fastisel better supports VSX fix up this test case.
;
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s --check-prefix=ELF64
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx | FileCheck %s --check-prefix=ELF64LE
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx | FileCheck %s --check-prefix=ELF64LE
; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=970 -mattr=-vsx | FileCheck %s --check-prefix=PPC970
-;; Tests for 970 don't use -fast-isel-abort because we intentionally punt
+;; Tests for 970 don't use -fast-isel-abort=1 because we intentionally punt
;; to SelectionDAG in some cases.
; Test sitofp
@@ -253,7 +253,7 @@ entry:
; ELF64LE: std
; ELF64LE: lfd
; ELF64LE: fcfidus
-; PPC970: rlwinm {{[0-9]+}}, {{[0-9]+}}, 0, 16, 31
+; PPC970: clrlwi {{[0-9]+}}, {{[0-9]+}}, 16
; PPC970: std
; PPC970: lfd
; PPC970: fcfid
@@ -277,7 +277,7 @@ entry:
; ELF64LE: std
; ELF64LE: lfd
; ELF64LE: fcfidus
-; PPC970: rlwinm {{[0-9]+}}, {{[0-9]+}}, 0, 24, 31
+; PPC970: clrlwi {{[0-9]+}}, {{[0-9]+}}, 24
; PPC970: std
; PPC970: lfd
; PPC970: fcfid
@@ -342,7 +342,7 @@ entry:
; ELF64LE: std
; ELF64LE: lfd
; ELF64LE: fcfidu
-; PPC970: rlwinm {{[0-9]+}}, {{[0-9]+}}, 0, 16, 31
+; PPC970: clrlwi {{[0-9]+}}, {{[0-9]+}}, 16
; PPC970: std
; PPC970: lfd
; PPC970: fcfid
@@ -365,7 +365,7 @@ entry:
; ELF64LE: std
; ELF64LE: lfd
; ELF64LE: fcfidu
-; PPC970: rlwinm {{[0-9]+}}, {{[0-9]+}}, 0, 24, 31
+; PPC970: clrlwi {{[0-9]+}}, {{[0-9]+}}, 24
; PPC970: std
; PPC970: lfd
; PPC970: fcfid
diff --git a/test/CodeGen/PowerPC/fast-isel-crash.ll b/test/CodeGen/PowerPC/fast-isel-crash.ll
index 1813fc96acee..55e87effcd82 100644
--- a/test/CodeGen/PowerPC/fast-isel-crash.ll
+++ b/test/CodeGen/PowerPC/fast-isel-crash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7
; Ensure this doesn't crash.
diff --git a/test/CodeGen/PowerPC/fast-isel-ext.ll b/test/CodeGen/PowerPC/fast-isel-ext.ll
index 753305a68dda..6fd3b4035122 100644
--- a/test/CodeGen/PowerPC/fast-isel-ext.ll
+++ b/test/CodeGen/PowerPC/fast-isel-ext.ll
@@ -1,18 +1,18 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
; zext
define i32 @zext_8_32(i8 %a) nounwind ssp {
; ELF64: zext_8_32
%r = zext i8 %a to i32
-; ELF64: rlwinm {{[0-9]+}}, {{[0-9]+}}, 0, 24, 31
+; ELF64: clrlwi {{[0-9]+}}, {{[0-9]+}}, 24
ret i32 %r
}
define i32 @zext_16_32(i16 %a) nounwind ssp {
; ELF64: zext_16_32
%r = zext i16 %a to i32
-; ELF64: rlwinm {{[0-9]+}}, {{[0-9]+}}, 0, 16, 31
+; ELF64: clrlwi {{[0-9]+}}, {{[0-9]+}}, 16
ret i32 %r
}
diff --git a/test/CodeGen/PowerPC/fast-isel-fold.ll b/test/CodeGen/PowerPC/fast-isel-fold.ll
index 4de345f309af..e56101a28e2b 100644
--- a/test/CodeGen/PowerPC/fast-isel-fold.ll
+++ b/test/CodeGen/PowerPC/fast-isel-fold.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
@a = global i8 1, align 1
@b = global i16 2, align 2
@@ -6,7 +6,7 @@
define void @t1() nounwind uwtable ssp {
; ELF64: t1
- %1 = load i8* @a, align 1
+ %1 = load i8, i8* @a, align 1
call void @foo1(i8 zeroext %1)
; ELF64: lbz
; ELF64-NOT: rldicl
@@ -16,7 +16,7 @@ define void @t1() nounwind uwtable ssp {
define void @t2() nounwind uwtable ssp {
; ELF64: t2
- %1 = load i16* @b, align 2
+ %1 = load i16, i16* @b, align 2
call void @foo2(i16 zeroext %1)
; ELF64: lhz
; ELF64-NOT: rldicl
@@ -26,7 +26,7 @@ define void @t2() nounwind uwtable ssp {
define void @t2a() nounwind uwtable ssp {
; ELF64: t2a
- %1 = load i32* @c, align 4
+ %1 = load i32, i32* @c, align 4
call void @foo3(i32 zeroext %1)
; ELF64: lwz
; ELF64-NOT: rldicl
@@ -40,7 +40,7 @@ declare void @foo3(i32 zeroext)
define i32 @t3() nounwind uwtable ssp {
; ELF64: t3
- %1 = load i8* @a, align 1
+ %1 = load i8, i8* @a, align 1
%2 = zext i8 %1 to i32
; ELF64: lbz
; ELF64-NOT: rlwinm
@@ -49,7 +49,7 @@ define i32 @t3() nounwind uwtable ssp {
define i32 @t4() nounwind uwtable ssp {
; ELF64: t4
- %1 = load i16* @b, align 2
+ %1 = load i16, i16* @b, align 2
%2 = zext i16 %1 to i32
; ELF64: lhz
; ELF64-NOT: rlwinm
@@ -58,7 +58,7 @@ define i32 @t4() nounwind uwtable ssp {
define i32 @t5() nounwind uwtable ssp {
; ELF64: t5
- %1 = load i16* @b, align 2
+ %1 = load i16, i16* @b, align 2
%2 = sext i16 %1 to i32
; ELF64: lha
; ELF64-NOT: rlwinm
@@ -67,7 +67,7 @@ define i32 @t5() nounwind uwtable ssp {
define i32 @t6() nounwind uwtable ssp {
; ELF64: t6
- %1 = load i8* @a, align 2
+ %1 = load i8, i8* @a, align 2
%2 = sext i8 %1 to i32
; ELF64: lbz
; ELF64-NOT: rlwinm
@@ -76,7 +76,7 @@ define i32 @t6() nounwind uwtable ssp {
define i64 @t7() nounwind uwtable ssp {
; ELF64: t7
- %1 = load i8* @a, align 1
+ %1 = load i8, i8* @a, align 1
%2 = zext i8 %1 to i64
; ELF64: lbz
; ELF64-NOT: rldicl
@@ -85,7 +85,7 @@ define i64 @t7() nounwind uwtable ssp {
define i64 @t8() nounwind uwtable ssp {
; ELF64: t8
- %1 = load i16* @b, align 2
+ %1 = load i16, i16* @b, align 2
%2 = zext i16 %1 to i64
; ELF64: lhz
; ELF64-NOT: rldicl
@@ -94,7 +94,7 @@ define i64 @t8() nounwind uwtable ssp {
define i64 @t9() nounwind uwtable ssp {
; ELF64: t9
- %1 = load i16* @b, align 2
+ %1 = load i16, i16* @b, align 2
%2 = sext i16 %1 to i64
; ELF64: lha
; ELF64-NOT: extsh
@@ -103,7 +103,7 @@ define i64 @t9() nounwind uwtable ssp {
define i64 @t10() nounwind uwtable ssp {
; ELF64: t10
- %1 = load i8* @a, align 2
+ %1 = load i8, i8* @a, align 2
%2 = sext i8 %1 to i64
; ELF64: lbz
; ELF64: extsb
@@ -112,7 +112,7 @@ define i64 @t10() nounwind uwtable ssp {
define i64 @t11() nounwind uwtable ssp {
; ELF64: t11
- %1 = load i32* @c, align 4
+ %1 = load i32, i32* @c, align 4
%2 = zext i32 %1 to i64
; ELF64: lwz
; ELF64-NOT: rldicl
@@ -121,7 +121,7 @@ define i64 @t11() nounwind uwtable ssp {
define i64 @t12() nounwind uwtable ssp {
; ELF64: t12
- %1 = load i32* @c, align 4
+ %1 = load i32, i32* @c, align 4
%2 = sext i32 %1 to i64
; ELF64: lwa
; ELF64-NOT: extsw
diff --git a/test/CodeGen/PowerPC/fast-isel-icmp-split.ll b/test/CodeGen/PowerPC/fast-isel-icmp-split.ll
new file mode 100644
index 000000000000..459616eb9698
--- /dev/null
+++ b/test/CodeGen/PowerPC/fast-isel-icmp-split.ll
@@ -0,0 +1,72 @@
+; RUN: llc -O0 -relocation-model=pic < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-bgq-linux"
+
+%"class.std::__1::__tree_node.130.151" = type { %"class.std::__1::__tree_node_base.base.128.149", %"class.boost::serialization::extended_type_info.129.150"* }
+%"class.std::__1::__tree_node_base.base.128.149" = type <{ %"class.std::__1::__tree_end_node.127.148", %"class.std::__1::__tree_node_base.126.147"*, %"class.std::__1::__tree_node_base.126.147"*, i8 }>
+%"class.std::__1::__tree_end_node.127.148" = type { %"class.std::__1::__tree_node_base.126.147"* }
+%"class.std::__1::__tree_node_base.126.147" = type <{ %"class.std::__1::__tree_end_node.127.148", %"class.std::__1::__tree_node_base.126.147"*, %"class.std::__1::__tree_node_base.126.147"*, i8, [7 x i8] }>
+%"class.boost::serialization::extended_type_info.129.150" = type { i32 (...)**, i32, i8* }
+
+; Function Attrs: noinline
+define void @_ZN5boost13serialization18extended_type_info4findEPKc() #0 align 2 {
+entry:
+ br i1 undef, label %cond.true, label %cond.false
+
+; CHECK: @_ZN5boost13serialization18extended_type_info4findEPKc
+
+cond.true: ; preds = %entry
+ br label %cond.end
+
+cond.false: ; preds = %entry
+ unreachable
+ ; No predecessors!
+ br label %cond.end
+
+cond.end: ; preds = %0, %cond.true
+ invoke void @_ZNKSt3__16__treeIPKN5boost13serialization18extended_type_infoENS2_6detail11key_compareENS_9allocatorIS5_EEE4findIS5_EENS_21__tree_const_iteratorIS5_PNS_11__tree_nodeIS5_PvEElEERKT_()
+ to label %_ZNKSt3__18multisetIPKN5boost13serialization18extended_type_infoENS2_6detail11key_compareENS_9allocatorIS5_EEE4findERKS5_.exit unwind label %lpad
+
+_ZNKSt3__18multisetIPKN5boost13serialization18extended_type_infoENS2_6detail11key_compareENS_9allocatorIS5_EEE4findERKS5_.exit: ; preds = %cond.end
+ br label %invoke.cont
+
+invoke.cont: ; preds = %_ZNKSt3__18multisetIPKN5boost13serialization18extended_type_infoENS2_6detail11key_compareENS_9allocatorIS5_EEE4findERKS5_.exit
+ %1 = load %"class.std::__1::__tree_node.130.151"*, %"class.std::__1::__tree_node.130.151"** undef, align 8
+ %cmp.i = icmp eq %"class.std::__1::__tree_node.130.151"* undef, %1
+ br label %invoke.cont.2
+
+invoke.cont.2: ; preds = %invoke.cont
+ br i1 %cmp.i, label %if.then, label %if.end
+
+if.then: ; preds = %invoke.cont.2
+ br label %cleanup
+
+lpad: ; preds = %cond.end
+ %2 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ br label %eh.resume
+
+if.end: ; preds = %invoke.cont.2
+ br label %invoke.cont.4
+
+invoke.cont.4: ; preds = %if.end
+ br label %cleanup
+
+cleanup: ; preds = %invoke.cont.4, %if.then
+ ret void
+
+eh.resume: ; preds = %lpad
+ resume { i8*, i32 } undef
+}
+
+declare i32 @__gxx_personality_v0(...)
+
+; Function Attrs: noinline
+declare void @_ZNKSt3__16__treeIPKN5boost13serialization18extended_type_infoENS2_6detail11key_compareENS_9allocatorIS5_EEE4findIS5_EENS_21__tree_const_iteratorIS5_PNS_11__tree_nodeIS5_PvEElEERKT_() #0 align 2
+
+attributes #0 = { noinline "target-cpu"="a2q" }
+
+!llvm.module.flags = !{!0}
+
+!0 = !{i32 1, !"PIC Level", i32 2}
+
diff --git a/test/CodeGen/PowerPC/fast-isel-indirectbr.ll b/test/CodeGen/PowerPC/fast-isel-indirectbr.ll
index 88ccf918ae96..b5477134c517 100644
--- a/test/CodeGen/PowerPC/fast-isel-indirectbr.ll
+++ b/test/CodeGen/PowerPC/fast-isel-indirectbr.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
define void @t1(i8* %x) {
entry:
diff --git a/test/CodeGen/PowerPC/fast-isel-load-store-vsx.ll b/test/CodeGen/PowerPC/fast-isel-load-store-vsx.ll
new file mode 100644
index 000000000000..8a873daa6c7a
--- /dev/null
+++ b/test/CodeGen/PowerPC/fast-isel-load-store-vsx.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -O0 -fast-isel -mattr=+vsx -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64VSX
+
+;; The semantics of VSX stores for when R0 is used is different depending on
+;; whether it is used as base or offset. If used as base, the effective
+;; address computation will use zero regardless of the content of R0. If used as
+;; an offset the content will be used in the effective address. We observed that
+;; for some constructors, the initialization values were being stored without
+;; an offset register being specified which was causing R0 to be used as offset
+;; in regions where it contained the value in the link register. This test
+;; verifies that R0 is used as base in these situations.
+
+%SomeStruct = type { double }
+
+; ELF64VSX-LABEL: SomeStructCtor
+define linkonce_odr void @SomeStructCtor(%SomeStruct* %this, double %V) unnamed_addr align 2 {
+entry:
+ %this.addr = alloca %SomeStruct*, align 8
+ %V.addr = alloca double, align 8
+ store %SomeStruct* %this, %SomeStruct** %this.addr, align 8
+; ELF64VSX: stxsdx {{[0-9][0-9]?}}, 0, {{[1-9][0-9]?}}
+ store double %V, double* %V.addr, align 8
+ %this1 = load %SomeStruct*, %SomeStruct** %this.addr
+ %Val = getelementptr inbounds %SomeStruct, %SomeStruct* %this1, i32 0, i32 0
+; ELF64VSX: stxsdx {{[0-9][0-9]?}}, 0, {{[1-9][0-9]?}}
+ %0 = load double, double* %V.addr, align 8
+ store double %0, double* %Val, align 8
+ ret void
+ }
diff --git a/test/CodeGen/PowerPC/fast-isel-load-store.ll b/test/CodeGen/PowerPC/fast-isel-load-store.ll
index ef702e21d6a1..f6a55f06b2cb 100644
--- a/test/CodeGen/PowerPC/fast-isel-load-store.ll
+++ b/test/CodeGen/PowerPC/fast-isel-load-store.ll
@@ -1,8 +1,8 @@
; FIXME: FastISel currently returns false if it hits code that uses VSX
-; registers and with -fast-isel-abort turned on the test case will then fail.
+; registers and with -fast-isel-abort=1 turned on the test case will then fail.
; When fastisel better supports VSX fix up this test case.
;
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel -fast-isel-abort -mattr=-vsx -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel -fast-isel-abort=1 -mattr=-vsx -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
; This test verifies that load/store instructions are properly generated,
; and that they pass MI verification.
@@ -26,7 +26,7 @@
define i8 @t1() nounwind uwtable ssp {
; ELF64: t1
- %1 = load i8* @a, align 1
+ %1 = load i8, i8* @a, align 1
; ELF64: lbz
%2 = add nsw i8 %1, 1
; ELF64: addi
@@ -35,7 +35,7 @@ define i8 @t1() nounwind uwtable ssp {
define i16 @t2() nounwind uwtable ssp {
; ELF64: t2
- %1 = load i16* @b, align 2
+ %1 = load i16, i16* @b, align 2
; ELF64: lhz
%2 = add nsw i16 %1, 1
; ELF64: addi
@@ -44,7 +44,7 @@ define i16 @t2() nounwind uwtable ssp {
define i32 @t3() nounwind uwtable ssp {
; ELF64: t3
- %1 = load i32* @c, align 4
+ %1 = load i32, i32* @c, align 4
; ELF64: lwz
%2 = add nsw i32 %1, 1
; ELF64: addi
@@ -53,7 +53,7 @@ define i32 @t3() nounwind uwtable ssp {
define i64 @t4() nounwind uwtable ssp {
; ELF64: t4
- %1 = load i64* @d, align 4
+ %1 = load i64, i64* @d, align 4
; ELF64: ld
%2 = add nsw i64 %1, 1
; ELF64: addi
@@ -62,7 +62,7 @@ define i64 @t4() nounwind uwtable ssp {
define float @t5() nounwind uwtable ssp {
; ELF64: t5
- %1 = load float* @e, align 4
+ %1 = load float, float* @e, align 4
; ELF64: lfs
%2 = fadd float %1, 1.0
; ELF64: fadds
@@ -71,7 +71,7 @@ define float @t5() nounwind uwtable ssp {
define double @t6() nounwind uwtable ssp {
; ELF64: t6
- %1 = load double* @f, align 8
+ %1 = load double, double* @f, align 8
; ELF64: lfd
%2 = fadd double %1, 1.0
; ELF64: fadd
@@ -145,7 +145,7 @@ define void @t12(double %v) nounwind uwtable ssp {
;; lwa requires an offset divisible by 4, so we need lwax here.
define i64 @t13() nounwind uwtable ssp {
; ELF64: t13
- %1 = load i32* getelementptr inbounds (%struct.s* @g, i32 0, i32 1), align 1
+ %1 = load i32, i32* getelementptr inbounds (%struct.s, %struct.s* @g, i32 0, i32 1), align 1
%2 = sext i32 %1 to i64
; ELF64: li
; ELF64: lwax
@@ -157,7 +157,7 @@ define i64 @t13() nounwind uwtable ssp {
;; ld requires an offset divisible by 4, so we need ldx here.
define i64 @t14() nounwind uwtable ssp {
; ELF64: t14
- %1 = load i64* getelementptr inbounds (%struct.t* @h, i32 0, i32 1), align 1
+ %1 = load i64, i64* getelementptr inbounds (%struct.t, %struct.t* @h, i32 0, i32 1), align 1
; ELF64: li
; ELF64: ldx
%2 = add nsw i64 %1, 1
@@ -169,7 +169,7 @@ define i64 @t14() nounwind uwtable ssp {
define void @t15(i64 %v) nounwind uwtable ssp {
; ELF64: t15
%1 = add nsw i64 %v, 1
- store i64 %1, i64* getelementptr inbounds (%struct.t* @h, i32 0, i32 1), align 1
+ store i64 %1, i64* getelementptr inbounds (%struct.t, %struct.t* @h, i32 0, i32 1), align 1
; ELF64: addis
; ELF64: addi
; ELF64: addi
@@ -181,7 +181,7 @@ define void @t15(i64 %v) nounwind uwtable ssp {
;; ld requires an offset that fits in 16 bits, so we need ldx here.
define i64 @t16() nounwind uwtable ssp {
; ELF64: t16
- %1 = load i64* getelementptr inbounds ([8192 x i64]* @i, i32 0, i64 5000), align 8
+ %1 = load i64, i64* getelementptr inbounds ([8192 x i64], [8192 x i64]* @i, i32 0, i64 5000), align 8
; ELF64: lis
; ELF64: ori
; ELF64: ldx
@@ -194,7 +194,7 @@ define i64 @t16() nounwind uwtable ssp {
define void @t17(i64 %v) nounwind uwtable ssp {
; ELF64: t17
%1 = add nsw i64 %v, 1
- store i64 %1, i64* getelementptr inbounds ([8192 x i64]* @i, i32 0, i64 5000), align 8
+ store i64 %1, i64* getelementptr inbounds ([8192 x i64], [8192 x i64]* @i, i32 0, i64 5000), align 8
; ELF64: addis
; ELF64: ld
; ELF64: addi
diff --git a/test/CodeGen/PowerPC/fast-isel-redefinition.ll b/test/CodeGen/PowerPC/fast-isel-redefinition.ll
index 72422bda4433..60706a6e1438 100644
--- a/test/CodeGen/PowerPC/fast-isel-redefinition.ll
+++ b/test/CodeGen/PowerPC/fast-isel-redefinition.ll
@@ -1,10 +1,10 @@
-; RUN: llc -O0 -verify-machineinstrs -fast-isel-abort -optimize-regalloc -regalloc=basic -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s
+; RUN: llc -O0 -verify-machineinstrs -fast-isel-abort=1 -optimize-regalloc -regalloc=basic -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s
; This isn't exactly a useful set of command-line options, but check that it
; doesn't crash. (It crashed formerly on ARM, and proved useful in
; discovering a bug on PowerPC as well.)
define i32 @f(i32* %x) nounwind ssp {
- %y = getelementptr inbounds i32* %x, i32 5000
- %tmp103 = load i32* %y, align 4
+ %y = getelementptr inbounds i32, i32* %x, i32 5000
+ %tmp103 = load i32, i32* %y, align 4
ret i32 %tmp103
}
diff --git a/test/CodeGen/PowerPC/fast-isel-ret.ll b/test/CodeGen/PowerPC/fast-isel-ret.ll
index ae34fbf7bfe1..1e4566d94dfd 100644
--- a/test/CodeGen/PowerPC/fast-isel-ret.ll
+++ b/test/CodeGen/PowerPC/fast-isel-ret.ll
@@ -1,8 +1,8 @@
; FIXME: FastISel currently returns false if it hits code that uses VSX
-; registers and with -fast-isel-abort turned on the test case will then fail.
+; registers and with -fast-isel-abort=1 turned on the test case will then fail.
; When fastisel better supports VSX fix up this test case.
;
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s --check-prefix=ELF64
define zeroext i1 @rettrue() nounwind uwtable ssp {
entry:
diff --git a/test/CodeGen/PowerPC/fast-isel-shifter.ll b/test/CodeGen/PowerPC/fast-isel-shifter.ll
index 198bfbecda63..c18f659dde13 100644
--- a/test/CodeGen/PowerPC/fast-isel-shifter.ll
+++ b/test/CodeGen/PowerPC/fast-isel-shifter.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
define i32 @shl() nounwind ssp {
entry:
diff --git a/test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll b/test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll
index 4bcacf009746..96cf67c869f9 100644
--- a/test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll
+++ b/test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll
@@ -6,12 +6,12 @@ define zeroext i8 @gep_promotion(i8* %ptr) nounwind uwtable ssp {
entry:
%ptr.addr = alloca i8*, align 8
%add = add i8 64, 64 ; 0x40 + 0x40
- %0 = load i8** %ptr.addr, align 8
+ %0 = load i8*, i8** %ptr.addr, align 8
; CHECK-LABEL: gep_promotion:
; CHECK: lbz {{[0-9]+}}, 0({{.*}})
- %arrayidx = getelementptr inbounds i8* %0, i8 %add
+ %arrayidx = getelementptr inbounds i8, i8* %0, i8 %add
- %1 = load i8* %arrayidx, align 1
+ %1 = load i8, i8* %arrayidx, align 1
ret i8 %1
}
diff --git a/test/CodeGen/PowerPC/floatPSA.ll b/test/CodeGen/PowerPC/floatPSA.ll
index f14c73630a6f..cff95d591c9a 100644
--- a/test/CodeGen/PowerPC/floatPSA.ll
+++ b/test/CodeGen/PowerPC/floatPSA.ll
@@ -37,7 +37,7 @@ entry:
store float %l, float* %l.addr, align 4
store float %m, float* %m.addr, align 4
store float %n, float* %n.addr, align 4
- %0 = load float* %n.addr, align 4
+ %0 = load float, float* %n.addr, align 4
ret float %0
}
@@ -73,20 +73,20 @@ entry:
store float 1.200000e+01, float* %l, align 4
store float 1.300000e+01, float* %m, align 4
store float 1.400000e+01, float* %n, align 4
- %0 = load float* %a, align 4
- %1 = load float* %b, align 4
- %2 = load float* %c, align 4
- %3 = load float* %d, align 4
- %4 = load float* %e, align 4
- %5 = load float* %f, align 4
- %6 = load float* %g, align 4
- %7 = load float* %h, align 4
- %8 = load float* %i, align 4
- %9 = load float* %j, align 4
- %10 = load float* %k, align 4
- %11 = load float* %l, align 4
- %12 = load float* %m, align 4
- %13 = load float* %n, align 4
+ %0 = load float, float* %a, align 4
+ %1 = load float, float* %b, align 4
+ %2 = load float, float* %c, align 4
+ %3 = load float, float* %d, align 4
+ %4 = load float, float* %e, align 4
+ %5 = load float, float* %f, align 4
+ %6 = load float, float* %g, align 4
+ %7 = load float, float* %h, align 4
+ %8 = load float, float* %i, align 4
+ %9 = load float, float* %j, align 4
+ %10 = load float, float* %k, align 4
+ %11 = load float, float* %l, align 4
+ %12 = load float, float* %m, align 4
+ %13 = load float, float* %n, align 4
%call = call float @bar(float %0, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8, float %9, float %10, float %11, float %12, float %13)
ret float %call
}
diff --git a/test/CodeGen/PowerPC/flt-preinc.ll b/test/CodeGen/PowerPC/flt-preinc.ll
new file mode 100644
index 000000000000..c0e3d3a21e19
--- /dev/null
+++ b/test/CodeGen/PowerPC/flt-preinc.ll
@@ -0,0 +1,40 @@
+; RUN: llc -mcpu=ppc64 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readonly
+define float @tf(float* nocapture readonly %i, i32 signext %o) #0 {
+entry:
+ %idx.ext = sext i32 %o to i64
+ %add.ptr = getelementptr inbounds float, float* %i, i64 %idx.ext
+ %0 = load float, float* %add.ptr, align 4
+ %add.ptr.sum = add nsw i64 %idx.ext, 1
+ %add.ptr3 = getelementptr inbounds float, float* %i, i64 %add.ptr.sum
+ %1 = load float, float* %add.ptr3, align 4
+ %add = fadd float %0, %1
+ ret float %add
+
+; CHECK-LABEL: @tf
+; CHECK: lfsux
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readonly
+define double @td(double* nocapture readonly %i, i32 signext %o) #0 {
+entry:
+ %idx.ext = sext i32 %o to i64
+ %add.ptr = getelementptr inbounds double, double* %i, i64 %idx.ext
+ %0 = load double, double* %add.ptr, align 8
+ %add.ptr.sum = add nsw i64 %idx.ext, 1
+ %add.ptr3 = getelementptr inbounds double, double* %i, i64 %add.ptr.sum
+ %1 = load double, double* %add.ptr3, align 8
+ %add = fadd double %0, %1
+ ret double %add
+
+; CHECK-LABEL: @td
+; CHECK: lfdux
+; CHECK: blr
+}
+
+attributes #0 = { nounwind readonly }
+
diff --git a/test/CodeGen/PowerPC/fma-assoc.ll b/test/CodeGen/PowerPC/fma-assoc.ll
index dc1316e5e24f..3044dd09128c 100644
--- a/test/CodeGen/PowerPC/fma-assoc.ll
+++ b/test/CodeGen/PowerPC/fma-assoc.ll
@@ -3,11 +3,11 @@
define double @test_FMADD_ASSOC1(double %A, double %B, double %C,
double %D, double %E) {
- %F = fmul double %A, %B ; <double> [#uses=1]
- %G = fmul double %C, %D ; <double> [#uses=1]
- %H = fadd double %F, %G ; <double> [#uses=1]
- %I = fadd double %H, %E ; <double> [#uses=1]
- ret double %I
+ %F = fmul double %A, %B ; <double> [#uses=1]
+ %G = fmul double %C, %D ; <double> [#uses=1]
+ %H = fadd double %F, %G ; <double> [#uses=1]
+ %I = fadd double %H, %E ; <double> [#uses=1]
+ ret double %I
; CHECK-LABEL: test_FMADD_ASSOC1:
; CHECK: fmadd
; CHECK-NEXT: fmadd
@@ -22,11 +22,11 @@ define double @test_FMADD_ASSOC1(double %A, double %B, double %C,
define double @test_FMADD_ASSOC2(double %A, double %B, double %C,
double %D, double %E) {
- %F = fmul double %A, %B ; <double> [#uses=1]
- %G = fmul double %C, %D ; <double> [#uses=1]
- %H = fadd double %F, %G ; <double> [#uses=1]
- %I = fadd double %E, %H ; <double> [#uses=1]
- ret double %I
+ %F = fmul double %A, %B ; <double> [#uses=1]
+ %G = fmul double %C, %D ; <double> [#uses=1]
+ %H = fadd double %F, %G ; <double> [#uses=1]
+ %I = fadd double %E, %H ; <double> [#uses=1]
+ ret double %I
; CHECK-LABEL: test_FMADD_ASSOC2:
; CHECK: fmadd
; CHECK-NEXT: fmadd
@@ -41,11 +41,11 @@ define double @test_FMADD_ASSOC2(double %A, double %B, double %C,
define double @test_FMSUB_ASSOC1(double %A, double %B, double %C,
double %D, double %E) {
- %F = fmul double %A, %B ; <double> [#uses=1]
- %G = fmul double %C, %D ; <double> [#uses=1]
- %H = fadd double %F, %G ; <double> [#uses=1]
- %I = fsub double %H, %E ; <double> [#uses=1]
- ret double %I
+ %F = fmul double %A, %B ; <double> [#uses=1]
+ %G = fmul double %C, %D ; <double> [#uses=1]
+ %H = fadd double %F, %G ; <double> [#uses=1]
+ %I = fsub double %H, %E ; <double> [#uses=1]
+ ret double %I
; CHECK-LABEL: test_FMSUB_ASSOC1:
; CHECK: fmsub
; CHECK-NEXT: fmadd
@@ -60,11 +60,11 @@ define double @test_FMSUB_ASSOC1(double %A, double %B, double %C,
define double @test_FMSUB_ASSOC2(double %A, double %B, double %C,
double %D, double %E) {
- %F = fmul double %A, %B ; <double> [#uses=1]
- %G = fmul double %C, %D ; <double> [#uses=1]
- %H = fadd double %F, %G ; <double> [#uses=1]
- %I = fsub double %E, %H ; <double> [#uses=1]
- ret double %I
+ %F = fmul double %A, %B ; <double> [#uses=1]
+ %G = fmul double %C, %D ; <double> [#uses=1]
+ %H = fadd double %F, %G ; <double> [#uses=1]
+ %I = fsub double %E, %H ; <double> [#uses=1]
+ ret double %I
; CHECK-LABEL: test_FMSUB_ASSOC2:
; CHECK: fnmsub
; CHECK-NEXT: fnmsub
@@ -77,3 +77,159 @@ define double @test_FMSUB_ASSOC2(double %A, double %B, double %C,
; CHECK-VSX-NEXT: blr
}
+define double @test_FMADD_ASSOC_EXT1(float %A, float %B, double %C,
+ double %D, double %E) {
+ %F = fmul float %A, %B ; <float> [#uses=1]
+ %G = fpext float %F to double ; <double> [#uses=1]
+ %H = fmul double %C, %D ; <double> [#uses=1]
+ %I = fadd double %H, %G ; <double> [#uses=1]
+ %J = fadd double %I, %E ; <double> [#uses=1]
+ ret double %J
+; CHECK-LABEL: test_FMADD_ASSOC_EXT1:
+; CHECK: fmadd
+; CHECK-NEXT: fmadd
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMADD_ASSOC_EXT1:
+; CHECK-VSX: xsmaddmdp
+; CHECK-VSX-NEXT: xsmaddadp
+; CHECK-VSX-NEXT: blr
+}
+
+define double @test_FMADD_ASSOC_EXT2(float %A, float %B, float %C,
+ float %D, double %E) {
+ %F = fmul float %A, %B ; <float> [#uses=1]
+ %G = fmul float %C, %D ; <float> [#uses=1]
+ %H = fadd float %F, %G ; <float> [#uses=1]
+ %I = fpext float %H to double ; <double> [#uses=1]
+ %J = fadd double %I, %E ; <double> [#uses=1]
+ ret double %J
+; CHECK-LABEL: test_FMADD_ASSOC_EXT2:
+; CHECK: fmadd
+; CHECK-NEXT: fmadd
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMADD_ASSOC_EXT2:
+; CHECK-VSX: xsmaddmdp
+; CHECK-VSX-NEXT: xsmaddadp
+; CHECK-VSX-NEXT: fmr
+; CHECK-VSX-NEXT: blr
+}
+
+define double @test_FMADD_ASSOC_EXT3(float %A, float %B, double %C,
+ double %D, double %E) {
+ %F = fmul float %A, %B ; <float> [#uses=1]
+ %G = fpext float %F to double ; <double> [#uses=1]
+ %H = fmul double %C, %D ; <double> [#uses=1]
+ %I = fadd double %H, %G ; <double> [#uses=1]
+ %J = fadd double %E, %I ; <double> [#uses=1]
+ ret double %J
+; CHECK-LABEL: test_FMADD_ASSOC_EXT3:
+; CHECK: fmadd
+; CHECK-NEXT: fmadd
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMADD_ASSOC_EXT3:
+; CHECK-VSX: xsmaddmdp
+; CHECK-VSX-NEXT: xsmaddadp
+; CHECK-VSX-NEXT: blr
+}
+
+define double @test_FMADD_ASSOC_EXT4(float %A, float %B, float %C,
+ float %D, double %E) {
+ %F = fmul float %A, %B ; <float> [#uses=1]
+ %G = fmul float %C, %D ; <float> [#uses=1]
+ %H = fadd float %F, %G ; <float> [#uses=1]
+ %I = fpext float %H to double ; <double> [#uses=1]
+ %J = fadd double %E, %I ; <double> [#uses=1]
+ ret double %J
+; CHECK-LABEL: test_FMADD_ASSOC_EXT4:
+; CHECK: fmadd
+; CHECK-NEXT: fmadd
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMADD_ASSOC_EXT4:
+; CHECK-VSX: xsmaddmdp
+; CHECK-VSX-NEXT: xsmaddadp
+; CHECK-VSX-NEXT: fmr
+; CHECK-VSX-NEXT: blr
+}
+
+define double @test_FMSUB_ASSOC_EXT1(float %A, float %B, double %C,
+ double %D, double %E) {
+ %F = fmul float %A, %B ; <float> [#uses=1]
+ %G = fpext float %F to double ; <double> [#uses=1]
+ %H = fmul double %C, %D ; <double> [#uses=1]
+ %I = fadd double %H, %G ; <double> [#uses=1]
+ %J = fsub double %I, %E ; <double> [#uses=1]
+ ret double %J
+; CHECK-LABEL: test_FMSUB_ASSOC_EXT1:
+; CHECK: fmsub
+; CHECK-NEXT: fmadd
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMSUB_ASSOC_EXT1:
+; CHECK-VSX: xsmsubmdp
+; CHECK-VSX-NEXT: xsmaddadp
+; CHECK-VSX-NEXT: blr
+}
+
+define double @test_FMSUB_ASSOC_EXT2(float %A, float %B, float %C,
+ float %D, double %E) {
+ %F = fmul float %A, %B ; <float> [#uses=1]
+ %G = fmul float %C, %D ; <float> [#uses=1]
+ %H = fadd float %F, %G ; <float> [#uses=1]
+ %I = fpext float %H to double ; <double> [#uses=1]
+ %J = fsub double %I, %E ; <double> [#uses=1]
+ ret double %J
+; CHECK-LABEL: test_FMSUB_ASSOC_EXT2:
+; CHECK: fmsub
+; CHECK-NEXT: fmadd
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMSUB_ASSOC_EXT2:
+; CHECK-VSX: xsmsubmdp
+; CHECK-VSX-NEXT: xsmaddadp
+; CHECK-VSX-NEXT: fmr
+; CHECK-VSX-NEXT: blr
+}
+
+define double @test_FMSUB_ASSOC_EXT3(float %A, float %B, double %C,
+ double %D, double %E) {
+ %F = fmul float %A, %B ; <float> [#uses=1]
+ %G = fpext float %F to double ; <double> [#uses=1]
+ %H = fmul double %C, %D ; <double> [#uses=1]
+ %I = fadd double %H, %G ; <double> [#uses=1]
+ %J = fsub double %E, %I ; <double> [#uses=1]
+ ret double %J
+; CHECK-LABEL: test_FMSUB_ASSOC_EXT3:
+; CHECK: fnmsub
+; CHECK-NEXT: fnmsub
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMSUB_ASSOC_EXT3:
+; CHECK-VSX: xsnmsubmdp
+; CHECK-VSX-NEXT: xsnmsubadp
+; CHECK-VSX-NEXT: fmr
+; CHECK-VSX-NEXT: blr
+}
+
+define double @test_FMSUB_ASSOC_EXT4(float %A, float %B, float %C,
+ float %D, double %E) {
+ %F = fmul float %A, %B ; <float> [#uses=1]
+ %G = fmul float %C, %D ; <float> [#uses=1]
+ %H = fadd float %F, %G ; <float> [#uses=1]
+ %I = fpext float %H to double ; <double> [#uses=1]
+ %J = fsub double %E, %I ; <double> [#uses=1]
+ ret double %J
+; CHECK-LABEL: test_FMSUB_ASSOC_EXT4:
+; CHECK: fnmsub
+; CHECK-NEXT: fnmsub
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMSUB_ASSOC_EXT4:
+; CHECK-VSX: xsnmsubmdp
+; CHECK-VSX-NEXT: xsnmsubadp
+; CHECK-VSX-NEXT: fmr
+; CHECK-VSX-NEXT: blr
+}
diff --git a/test/CodeGen/PowerPC/fma-ext.ll b/test/CodeGen/PowerPC/fma-ext.ll
index 56825ce8f227..da7c34ccb9d8 100644
--- a/test/CodeGen/PowerPC/fma-ext.ll
+++ b/test/CodeGen/PowerPC/fma-ext.ll
@@ -60,34 +60,34 @@ define double @test_FMSUB_EXT2(float %A, float %B, double %C) {
define double @test_FMSUB_EXT3(float %A, float %B, double %C) {
%D = fmul float %A, %B ; <float> [#uses=1]
- %E = fsub float -0.000000e+00, %D ; <float> [#uses=1]
+ %E = fsub float -0.000000e+00, %D ; <float> [#uses=1]
%F = fpext float %E to double ; <double> [#uses=1]
%G = fsub double %F, %C ; <double> [#uses=1]
ret double %G
; CHECK-LABEL: test_FMSUB_EXT3:
-; CHECK: fneg
-; CHECK-NEXT: fmsub
+; CHECK: fnmadd
+
; CHECK-NEXT: blr
; CHECK-VSX-LABEL: test_FMSUB_EXT3:
-; CHECK-VSX: xsnegdp
-; CHECK-VSX-NEXT: xsmsubmdp
+; CHECK-VSX: xsnmaddmdp
+
; CHECK-VSX-NEXT: blr
}
define double @test_FMSUB_EXT4(float %A, float %B, double %C) {
%D = fmul float %A, %B ; <float> [#uses=1]
%E = fpext float %D to double ; <double> [#uses=1]
- %F = fsub double -0.000000e+00, %E ; <double> [#uses=1]
+ %F = fsub double -0.000000e+00, %E ; <double> [#uses=1]
%G = fsub double %F, %C ; <double> [#uses=1]
ret double %G
; CHECK-LABEL: test_FMSUB_EXT4:
-; CHECK: fneg
-; CHECK-NEXT: fmsub
+; CHECK: fnmadd
+
; CHECK-NEXT: blr
; CHECK-VSX-LABEL: test_FMSUB_EXT4:
-; CHECK-VSX: xsnegdp
-; CHECK-VSX-NEXT: xsmsubmdp
+; CHECK-VSX: xsnmaddmdp
+
; CHECK-VSX-NEXT: blr
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/PowerPC/fp-int-conversions-direct-moves.ll b/test/CodeGen/PowerPC/fp-int-conversions-direct-moves.ll
new file mode 100644
index 000000000000..1d9b64823140
--- /dev/null
+++ b/test/CodeGen/PowerPC/fp-int-conversions-direct-moves.ll
@@ -0,0 +1,426 @@
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64-unknown-unknown < %s | FileCheck %s
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s
+
+; Function Attrs: nounwind
+define zeroext i8 @_Z6testcff(float %arg) {
+entry:
+ %arg.addr = alloca float, align 4
+ store float %arg, float* %arg.addr, align 4
+ %0 = load float, float* %arg.addr, align 4
+ %conv = fptoui float %0 to i8
+ ret i8 %conv
+; CHECK-LABEL: @_Z6testcff
+; CHECK: xscvdpsxws [[CONVREG01:[0-9]+]], 1
+; CHECK: mfvsrwz 3, [[CONVREG01]]
+}
+
+; Function Attrs: nounwind
+define float @_Z6testfcc(i8 zeroext %arg) {
+entry:
+ %arg.addr = alloca i8, align 1
+ store i8 %arg, i8* %arg.addr, align 1
+ %0 = load i8, i8* %arg.addr, align 1
+ %conv = uitofp i8 %0 to float
+ ret float %conv
+; CHECK-LABEL: @_Z6testfcc
+; CHECK: mtvsrwz [[MOVEREG01:[0-9]+]], 3
+; FIXME: Once we have XSCVUXDSP implemented, this will change
+; CHECK: fcfidus 1, [[MOVEREG01]]
+}
+
+; Function Attrs: nounwind
+define zeroext i8 @_Z6testcdd(double %arg) {
+entry:
+ %arg.addr = alloca double, align 8
+ store double %arg, double* %arg.addr, align 8
+ %0 = load double, double* %arg.addr, align 8
+ %conv = fptoui double %0 to i8
+ ret i8 %conv
+; CHECK-LABEL: @_Z6testcdd
+; CHECK: xscvdpsxws [[CONVREG02:[0-9]+]], 1
+; CHECK: mfvsrwz 3, [[CONVREG02]]
+}
+
+; Function Attrs: nounwind
+define double @_Z6testdcc(i8 zeroext %arg) {
+entry:
+ %arg.addr = alloca i8, align 1
+ store i8 %arg, i8* %arg.addr, align 1
+ %0 = load i8, i8* %arg.addr, align 1
+ %conv = uitofp i8 %0 to double
+ ret double %conv
+; CHECK-LABEL: @_Z6testdcc
+; CHECK: mtvsrwz [[MOVEREG02:[0-9]+]], 3
+; CHECK: xscvuxddp 1, [[MOVEREG02]]
+}
+
+; Function Attrs: nounwind
+define zeroext i8 @_Z7testucff(float %arg) {
+entry:
+ %arg.addr = alloca float, align 4
+ store float %arg, float* %arg.addr, align 4
+ %0 = load float, float* %arg.addr, align 4
+ %conv = fptoui float %0 to i8
+ ret i8 %conv
+; CHECK-LABEL: @_Z7testucff
+; CHECK: xscvdpsxws [[CONVREG03:[0-9]+]], 1
+; CHECK: mfvsrwz 3, [[CONVREG03]]
+}
+
+; Function Attrs: nounwind
+define float @_Z7testfuch(i8 zeroext %arg) {
+entry:
+ %arg.addr = alloca i8, align 1
+ store i8 %arg, i8* %arg.addr, align 1
+ %0 = load i8, i8* %arg.addr, align 1
+ %conv = uitofp i8 %0 to float
+ ret float %conv
+; CHECK-LABEL: @_Z7testfuch
+; CHECK: mtvsrwz [[MOVEREG03:[0-9]+]], 3
+; FIXME: Once we have XSCVUXDSP implemented, this will change
+; CHECK: fcfidus 1, [[MOVEREG03]]
+}
+
+; Function Attrs: nounwind
+define zeroext i8 @_Z7testucdd(double %arg) {
+entry:
+ %arg.addr = alloca double, align 8
+ store double %arg, double* %arg.addr, align 8
+ %0 = load double, double* %arg.addr, align 8
+ %conv = fptoui double %0 to i8
+ ret i8 %conv
+; CHECK-LABEL: @_Z7testucdd
+; CHECK: xscvdpsxws [[CONVREG04:[0-9]+]], 1
+; CHECK: mfvsrwz 3, [[CONVREG04]]
+}
+
+; Function Attrs: nounwind
+define double @_Z7testduch(i8 zeroext %arg) {
+entry:
+ %arg.addr = alloca i8, align 1
+ store i8 %arg, i8* %arg.addr, align 1
+ %0 = load i8, i8* %arg.addr, align 1
+ %conv = uitofp i8 %0 to double
+ ret double %conv
+; CHECK-LABEL: @_Z7testduch
+; CHECK: mtvsrwz [[MOVEREG04:[0-9]+]], 3
+; CHECK: xscvuxddp 1, [[MOVEREG04]]
+}
+
+; Function Attrs: nounwind
+define signext i16 @_Z6testsff(float %arg) {
+entry:
+ %arg.addr = alloca float, align 4
+ store float %arg, float* %arg.addr, align 4
+ %0 = load float, float* %arg.addr, align 4
+ %conv = fptosi float %0 to i16
+ ret i16 %conv
+; CHECK-LABEL: @_Z6testsff
+; CHECK: xscvdpsxws [[CONVREG05:[0-9]+]], 1
+; CHECK: mfvsrwz 3, [[CONVREG05]]
+}
+
+; Function Attrs: nounwind
+define float @_Z6testfss(i16 signext %arg) {
+entry:
+ %arg.addr = alloca i16, align 2
+ store i16 %arg, i16* %arg.addr, align 2
+ %0 = load i16, i16* %arg.addr, align 2
+ %conv = sitofp i16 %0 to float
+ ret float %conv
+; CHECK-LABEL: @_Z6testfss
+; CHECK: mtvsrwa [[MOVEREG05:[0-9]+]], 3
+; FIXME: Once we have XSCVSXDSP implemented, this will change
+; CHECK: fcfids 1, [[MOVEREG05]]
+}
+
+; Function Attrs: nounwind
+define signext i16 @_Z6testsdd(double %arg) {
+entry:
+ %arg.addr = alloca double, align 8
+ store double %arg, double* %arg.addr, align 8
+ %0 = load double, double* %arg.addr, align 8
+ %conv = fptosi double %0 to i16
+ ret i16 %conv
+; CHECK-LABEL: @_Z6testsdd
+; CHECK: xscvdpsxws [[CONVREG06:[0-9]+]], 1
+; CHECK: mfvsrwz 3, [[CONVREG06]]
+}
+
+; Function Attrs: nounwind
+define double @_Z6testdss(i16 signext %arg) {
+entry:
+ %arg.addr = alloca i16, align 2
+ store i16 %arg, i16* %arg.addr, align 2
+ %0 = load i16, i16* %arg.addr, align 2
+ %conv = sitofp i16 %0 to double
+ ret double %conv
+; CHECK-LABEL: @_Z6testdss
+; CHECK: mtvsrwa [[MOVEREG06:[0-9]+]], 3
+; CHECK: xscvsxddp 1, [[MOVEREG06]]
+}
+
+; Function Attrs: nounwind
+define zeroext i16 @_Z7testusff(float %arg) {
+entry:
+ %arg.addr = alloca float, align 4
+ store float %arg, float* %arg.addr, align 4
+ %0 = load float, float* %arg.addr, align 4
+ %conv = fptoui float %0 to i16
+ ret i16 %conv
+; CHECK-LABEL: @_Z7testusff
+; CHECK: xscvdpsxws [[CONVREG07:[0-9]+]], 1
+; CHECK: mfvsrwz 3, [[CONVREG07]]
+}
+
+; Function Attrs: nounwind
+define float @_Z7testfust(i16 zeroext %arg) {
+entry:
+ %arg.addr = alloca i16, align 2
+ store i16 %arg, i16* %arg.addr, align 2
+ %0 = load i16, i16* %arg.addr, align 2
+ %conv = uitofp i16 %0 to float
+ ret float %conv
+; CHECK-LABEL: @_Z7testfust
+; CHECK: mtvsrwz [[MOVEREG07:[0-9]+]], 3
+; FIXME: Once we have XSCVUXDSP implemented, this will change
+; CHECK: fcfidus 1, [[MOVEREG07]]
+}
+
+; Function Attrs: nounwind
+define zeroext i16 @_Z7testusdd(double %arg) {
+entry:
+ %arg.addr = alloca double, align 8
+ store double %arg, double* %arg.addr, align 8
+ %0 = load double, double* %arg.addr, align 8
+ %conv = fptoui double %0 to i16
+ ret i16 %conv
+; CHECK-LABEL: @_Z7testusdd
+; CHECK: xscvdpsxws [[CONVREG08:[0-9]+]], 1
+; CHECK: mfvsrwz 3, [[CONVREG08]]
+}
+
+; Function Attrs: nounwind
+define double @_Z7testdust(i16 zeroext %arg) {
+entry:
+ %arg.addr = alloca i16, align 2
+ store i16 %arg, i16* %arg.addr, align 2
+ %0 = load i16, i16* %arg.addr, align 2
+ %conv = uitofp i16 %0 to double
+ ret double %conv
+; CHECK-LABEL: @_Z7testdust
+; CHECK: mtvsrwz [[MOVEREG08:[0-9]+]], 3
+; CHECK: xscvuxddp 1, [[MOVEREG08]]
+}
+
+; Function Attrs: nounwind
+define signext i32 @_Z6testiff(float %arg) {
+entry:
+ %arg.addr = alloca float, align 4
+ store float %arg, float* %arg.addr, align 4
+ %0 = load float, float* %arg.addr, align 4
+ %conv = fptosi float %0 to i32
+ ret i32 %conv
+; CHECK-LABEL: @_Z6testiff
+; CHECK: xscvdpsxws [[CONVREG09:[0-9]+]], 1
+; CHECK: mfvsrwz 3, [[CONVREG09]]
+}
+
+; Function Attrs: nounwind
+define float @_Z6testfii(i32 signext %arg) {
+entry:
+ %arg.addr = alloca i32, align 4
+ store i32 %arg, i32* %arg.addr, align 4
+ %0 = load i32, i32* %arg.addr, align 4
+ %conv = sitofp i32 %0 to float
+ ret float %conv
+; CHECK-LABEL: @_Z6testfii
+; CHECK: mtvsrwa [[MOVEREG09:[0-9]+]], 3
+; FIXME: Once we have XSCVSXDSP implemented, this will change
+; CHECK: fcfids 1, [[MOVEREG09]]
+}
+
+; Function Attrs: nounwind
+define signext i32 @_Z6testidd(double %arg) {
+entry:
+ %arg.addr = alloca double, align 8
+ store double %arg, double* %arg.addr, align 8
+ %0 = load double, double* %arg.addr, align 8
+ %conv = fptosi double %0 to i32
+ ret i32 %conv
+; CHECK-LABEL: @_Z6testidd
+; CHECK: xscvdpsxws [[CONVREG10:[0-9]+]], 1
+; CHECK: mfvsrwz 3, [[CONVREG10]]
+}
+
+; Function Attrs: nounwind
+define double @_Z6testdii(i32 signext %arg) {
+entry:
+ %arg.addr = alloca i32, align 4
+ store i32 %arg, i32* %arg.addr, align 4
+ %0 = load i32, i32* %arg.addr, align 4
+ %conv = sitofp i32 %0 to double
+ ret double %conv
+; CHECK-LABEL: @_Z6testdii
+; CHECK: mtvsrwa [[MOVEREG10:[0-9]+]], 3
+; CHECK: xscvsxddp 1, [[MOVEREG10]]
+}
+
+; Function Attrs: nounwind
+define zeroext i32 @_Z7testuiff(float %arg) {
+entry:
+ %arg.addr = alloca float, align 4
+ store float %arg, float* %arg.addr, align 4
+ %0 = load float, float* %arg.addr, align 4
+ %conv = fptoui float %0 to i32
+ ret i32 %conv
+; CHECK-LABEL: @_Z7testuiff
+; CHECK: xscvdpuxws [[CONVREG11:[0-9]+]], 1
+; CHECK: mfvsrwz 3, [[CONVREG11]]
+}
+
+; Function Attrs: nounwind
+define float @_Z7testfuij(i32 zeroext %arg) {
+entry:
+ %arg.addr = alloca i32, align 4
+ store i32 %arg, i32* %arg.addr, align 4
+ %0 = load i32, i32* %arg.addr, align 4
+ %conv = uitofp i32 %0 to float
+ ret float %conv
+; CHECK-LABEL: @_Z7testfuij
+; CHECK: mtvsrwz [[MOVEREG11:[0-9]+]], 3
+; FIXME: Once we have XSCVUXDSP implemented, this will change
+; CHECK: fcfidus 1, [[MOVEREG11]]
+}
+
+; Function Attrs: nounwind
+define zeroext i32 @_Z7testuidd(double %arg) {
+entry:
+ %arg.addr = alloca double, align 8
+ store double %arg, double* %arg.addr, align 8
+ %0 = load double, double* %arg.addr, align 8
+ %conv = fptoui double %0 to i32
+ ret i32 %conv
+; CHECK-LABEL: @_Z7testuidd
+; CHECK: xscvdpuxws [[CONVREG12:[0-9]+]], 1
+; CHECK: mfvsrwz 3, [[CONVREG12]]
+}
+
+; Function Attrs: nounwind
+define double @_Z7testduij(i32 zeroext %arg) {
+entry:
+ %arg.addr = alloca i32, align 4
+ store i32 %arg, i32* %arg.addr, align 4
+ %0 = load i32, i32* %arg.addr, align 4
+ %conv = uitofp i32 %0 to double
+ ret double %conv
+; CHECK-LABEL: @_Z7testduij
+; CHECK: mtvsrwz [[MOVEREG12:[0-9]+]], 3
+; CHECK: xscvuxddp 1, [[MOVEREG12]]
+}
+
+; Function Attrs: nounwind
+define i64 @_Z7testllff(float %arg) {
+entry:
+ %arg.addr = alloca float, align 4
+ store float %arg, float* %arg.addr, align 4
+ %0 = load float, float* %arg.addr, align 4
+ %conv = fptosi float %0 to i64
+ ret i64 %conv
+; CHECK-LABEL: @_Z7testllff
+; CHECK: xscvdpsxds [[CONVREG13:[0-9]+]], 1
+; CHECK: mfvsrd 3, [[CONVREG13]]
+}
+
+; Function Attrs: nounwind
+define float @_Z7testfllx(i64 %arg) {
+entry:
+ %arg.addr = alloca i64, align 8
+ store i64 %arg, i64* %arg.addr, align 8
+ %0 = load i64, i64* %arg.addr, align 8
+ %conv = sitofp i64 %0 to float
+ ret float %conv
+; CHECK-LABEL:@_Z7testfllx
+; CHECK: mtvsrd [[MOVEREG13:[0-9]+]], 3
+; FIXME: Once we have XSCVSXDSP implemented, this will change
+; CHECK: fcfids 1, [[MOVEREG13]]
+}
+
+; Function Attrs: nounwind
+define i64 @_Z7testlldd(double %arg) {
+entry:
+ %arg.addr = alloca double, align 8
+ store double %arg, double* %arg.addr, align 8
+ %0 = load double, double* %arg.addr, align 8
+ %conv = fptosi double %0 to i64
+ ret i64 %conv
+; CHECK-LABEL: @_Z7testlldd
+; CHECK: xscvdpsxds [[CONVREG14:[0-9]+]], 1
+; CHECK: mfvsrd 3, [[CONVREG14]]
+}
+
+; Function Attrs: nounwind
+define double @_Z7testdllx(i64 %arg) {
+entry:
+ %arg.addr = alloca i64, align 8
+ store i64 %arg, i64* %arg.addr, align 8
+ %0 = load i64, i64* %arg.addr, align 8
+ %conv = sitofp i64 %0 to double
+ ret double %conv
+; CHECK-LABEL: @_Z7testdllx
+; CHECK: mtvsrd [[MOVEREG14:[0-9]+]], 3
+; CHECK: xscvsxddp 1, [[MOVEREG14]]
+}
+
+; Function Attrs: nounwind
+define i64 @_Z8testullff(float %arg) {
+entry:
+ %arg.addr = alloca float, align 4
+ store float %arg, float* %arg.addr, align 4
+ %0 = load float, float* %arg.addr, align 4
+ %conv = fptoui float %0 to i64
+ ret i64 %conv
+; CHECK-LABEL: @_Z8testullff
+; CHECK: xscvdpuxds [[CONVREG15:[0-9]+]], 1
+; CHECK: mfvsrd 3, [[CONVREG15]]
+}
+
+; Function Attrs: nounwind
+define float @_Z8testfully(i64 %arg) {
+entry:
+ %arg.addr = alloca i64, align 8
+ store i64 %arg, i64* %arg.addr, align 8
+ %0 = load i64, i64* %arg.addr, align 8
+ %conv = uitofp i64 %0 to float
+ ret float %conv
+; CHECK-LABEL: @_Z8testfully
+; CHECK: mtvsrd [[MOVEREG15:[0-9]+]], 3
+; FIXME: Once we have XSCVUXDSP implemented, this will change
+; CHECK: fcfidus 1, [[MOVEREG15]]
+}
+
+; Function Attrs: nounwind
+define i64 @_Z8testulldd(double %arg) {
+entry:
+ %arg.addr = alloca double, align 8
+ store double %arg, double* %arg.addr, align 8
+ %0 = load double, double* %arg.addr, align 8
+ %conv = fptoui double %0 to i64
+ ret i64 %conv
+; CHECK-LABEL: @_Z8testulldd
+; CHECK: xscvdpuxds [[CONVREG16:[0-9]+]], 1
+; CHECK: mfvsrd 3, [[CONVREG16]]
+}
+
+; Function Attrs: nounwind
+define double @_Z8testdully(i64 %arg) {
+entry:
+ %arg.addr = alloca i64, align 8
+ store i64 %arg, i64* %arg.addr, align 8
+ %0 = load i64, i64* %arg.addr, align 8
+ %conv = uitofp i64 %0 to double
+ ret double %conv
+; CHECK-LABEL: @_Z8testdully
+; CHECK: mtvsrd [[MOVEREG16:[0-9]+]], 3
+; CHECK: xscvuxddp 1, [[MOVEREG16]]
+}
diff --git a/test/CodeGen/PowerPC/fp-to-int-ext.ll b/test/CodeGen/PowerPC/fp-to-int-ext.ll
index bfacd89ca1a2..393fe04d41c6 100644
--- a/test/CodeGen/PowerPC/fp-to-int-ext.ll
+++ b/test/CodeGen/PowerPC/fp-to-int-ext.ll
@@ -5,7 +5,7 @@ target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
define double @foo1(i32* %x) #0 {
entry:
- %0 = load i32* %x, align 4
+ %0 = load i32, i32* %x, align 4
%conv = sext i32 %0 to i64
%conv1 = sitofp i64 %conv to double
ret double %conv1
@@ -18,7 +18,7 @@ entry:
define double @foo2(i32* %x) #0 {
entry:
- %0 = load i32* %x, align 4
+ %0 = load i32, i32* %x, align 4
%conv = zext i32 %0 to i64
%conv1 = sitofp i64 %conv to double
ret double %conv1
@@ -31,7 +31,7 @@ entry:
define double @foo3(i32* %x) #0 {
entry:
- %0 = load i32* %x, align 4
+ %0 = load i32, i32* %x, align 4
%1 = add i32 %0, 8
%conv = zext i32 %1 to i64
%conv1 = sitofp i64 %conv to double
@@ -49,7 +49,7 @@ entry:
define double @foo4(i32* %x) #0 {
entry:
- %0 = load i32* %x, align 4
+ %0 = load i32, i32* %x, align 4
%1 = add i32 %0, 8
%conv = sext i32 %1 to i64
%conv1 = sitofp i64 %conv to double
diff --git a/test/CodeGen/PowerPC/frounds.ll b/test/CodeGen/PowerPC/frounds.ll
index 8eeadc3a3469..49f3465e9e22 100644
--- a/test/CodeGen/PowerPC/frounds.ll
+++ b/test/CodeGen/PowerPC/frounds.ll
@@ -7,12 +7,12 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%tmp1 = call i32 @llvm.flt.rounds( ) ; <i32> [#uses=1]
store i32 %tmp1, i32* %tmp, align 4
- %tmp2 = load i32* %tmp, align 4 ; <i32> [#uses=1]
+ %tmp2 = load i32, i32* %tmp, align 4 ; <i32> [#uses=1]
store i32 %tmp2, i32* %retval, align 4
br label %return
return: ; preds = %entry
- %retval3 = load i32* %retval ; <i32> [#uses=1]
+ %retval3 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval3
}
diff --git a/test/CodeGen/PowerPC/glob-comp-aa-crash.ll b/test/CodeGen/PowerPC/glob-comp-aa-crash.ll
index 2ea036f83496..66df6bb8669d 100644
--- a/test/CodeGen/PowerPC/glob-comp-aa-crash.ll
+++ b/test/CodeGen/PowerPC/glob-comp-aa-crash.ll
@@ -23,17 +23,17 @@ entry:
%ref.tmp = alloca %"class.std::__exception_ptr::exception_ptr", align 8
%tmp = alloca { i64, i64 }, align 8
%agg.tmp = alloca %"class.std::__exception_ptr::exception_ptr", align 8
- %__mut_ = getelementptr inbounds %"class.std::__1::__assoc_sub_state"* %this, i64 0, i32 2
- %__m_.i.i = getelementptr inbounds %"class.std::__1::unique_lock"* %__lk, i64 0, i32 0
+ %__mut_ = getelementptr inbounds %"class.std::__1::__assoc_sub_state", %"class.std::__1::__assoc_sub_state"* %this, i64 0, i32 2
+ %__m_.i.i = getelementptr inbounds %"class.std::__1::unique_lock", %"class.std::__1::unique_lock"* %__lk, i64 0, i32 0
store %"class.std::__1::mutex"* %__mut_, %"class.std::__1::mutex"** %__m_.i.i, align 8, !tbaa !5
- %__owns_.i.i = getelementptr inbounds %"class.std::__1::unique_lock"* %__lk, i64 0, i32 1
+ %__owns_.i.i = getelementptr inbounds %"class.std::__1::unique_lock", %"class.std::__1::unique_lock"* %__lk, i64 0, i32 1
store i8 1, i8* %__owns_.i.i, align 8, !tbaa !6
call void @_ZNSt3__15mutex4lockEv(%"class.std::__1::mutex"* %__mut_) #4
invoke void @_ZNSt3__117__assoc_sub_state10__sub_waitERNS_11unique_lockINS_5mutexEEE(%"class.std::__1::__assoc_sub_state"* %this, %"class.std::__1::unique_lock"* %__lk) #4
to label %invoke.cont unwind label %lpad
invoke.cont: ; preds = %entry
- %__exception_ = getelementptr inbounds %"class.std::__1::__assoc_sub_state"* %this, i64 0, i32 1
+ %__exception_ = getelementptr inbounds %"class.std::__1::__assoc_sub_state", %"class.std::__1::__assoc_sub_state"* %this, i64 0, i32 1
%0 = bitcast { i64, i64 }* %tmp to i8*
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 16, i32 8, i1 false)
call void @_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE(%"class.std::__exception_ptr::exception_ptr"* %ref.tmp, { i64, i64 }* byval %tmp) #5
@@ -65,12 +65,12 @@ lpad3: ; preds = %if.then
br label %ehcleanup
if.end: ; preds = %invoke.cont
- %7 = load i8* %__owns_.i.i, align 8, !tbaa !6, !range !4
+ %7 = load i8, i8* %__owns_.i.i, align 8, !tbaa !6, !range !4
%tobool.i.i = icmp eq i8 %7, 0
br i1 %tobool.i.i, label %_ZNSt3__111unique_lockINS_5mutexEED1Ev.exit, label %if.then.i.i
if.then.i.i: ; preds = %if.end
- %8 = load %"class.std::__1::mutex"** %__m_.i.i, align 8, !tbaa !5
+ %8 = load %"class.std::__1::mutex"*, %"class.std::__1::mutex"** %__m_.i.i, align 8, !tbaa !5
call void @_ZNSt3__15mutex6unlockEv(%"class.std::__1::mutex"* %8) #5
br label %_ZNSt3__111unique_lockINS_5mutexEED1Ev.exit
@@ -80,12 +80,12 @@ _ZNSt3__111unique_lockINS_5mutexEED1Ev.exit: ; preds = %if.then.i.i, %if.en
ehcleanup: ; preds = %lpad3, %lpad
%exn.slot.0 = phi i8* [ %5, %lpad3 ], [ %2, %lpad ]
%ehselector.slot.0 = phi i32 [ %6, %lpad3 ], [ %3, %lpad ]
- %9 = load i8* %__owns_.i.i, align 8, !tbaa !6, !range !4
+ %9 = load i8, i8* %__owns_.i.i, align 8, !tbaa !6, !range !4
%tobool.i.i9 = icmp eq i8 %9, 0
br i1 %tobool.i.i9, label %_ZNSt3__111unique_lockINS_5mutexEED1Ev.exit12, label %if.then.i.i11
if.then.i.i11: ; preds = %ehcleanup
- %10 = load %"class.std::__1::mutex"** %__m_.i.i, align 8, !tbaa !5
+ %10 = load %"class.std::__1::mutex"*, %"class.std::__1::mutex"** %__m_.i.i, align 8, !tbaa !5
call void @_ZNSt3__15mutex6unlockEv(%"class.std::__1::mutex"* %10) #5
br label %_ZNSt3__111unique_lockINS_5mutexEED1Ev.exit12
diff --git a/test/CodeGen/PowerPC/hello.ll b/test/CodeGen/PowerPC/hello.ll
index ea27e9257a65..da7fb31526b9 100644
--- a/test/CodeGen/PowerPC/hello.ll
+++ b/test/CodeGen/PowerPC/hello.ll
@@ -5,7 +5,7 @@
@.str = internal constant [13 x i8] c"Hello World!\00"
define i32 @main() {
- %tmp2 = tail call i32 @puts( i8* getelementptr ([13 x i8]* @.str, i32 0, i64 0) )
+ %tmp2 = tail call i32 @puts( i8* getelementptr ([13 x i8], [13 x i8]* @.str, i32 0, i64 0) )
ret i32 0
}
diff --git a/test/CodeGen/PowerPC/hidden-vis-2.ll b/test/CodeGen/PowerPC/hidden-vis-2.ll
index e9e2c0a93a0d..3eb9dbd21ade 100644
--- a/test/CodeGen/PowerPC/hidden-vis-2.ll
+++ b/test/CodeGen/PowerPC/hidden-vis-2.ll
@@ -5,8 +5,8 @@
define i32 @t() nounwind readonly {
entry:
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
- %1 = load i32* @y, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @x, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* @y, align 4 ; <i32> [#uses=1]
%2 = add i32 %1, %0 ; <i32> [#uses=1]
ret i32 %2
}
diff --git a/test/CodeGen/PowerPC/hidden-vis.ll b/test/CodeGen/PowerPC/hidden-vis.ll
index b2cc1431ebde..bcb6723402b0 100644
--- a/test/CodeGen/PowerPC/hidden-vis.ll
+++ b/test/CodeGen/PowerPC/hidden-vis.ll
@@ -4,6 +4,6 @@
define i32 @t() nounwind readonly {
entry:
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @x, align 4 ; <i32> [#uses=1]
ret i32 %0
}
diff --git a/test/CodeGen/PowerPC/htm.ll b/test/CodeGen/PowerPC/htm.ll
new file mode 100644
index 000000000000..0e4304dc163b
--- /dev/null
+++ b/test/CodeGen/PowerPC/htm.ll
@@ -0,0 +1,125 @@
+; RUN: llc -mcpu=pwr8 -mattr=+htm < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define zeroext i32 @test1() {
+entry:
+ %0 = tail call i32 @llvm.ppc.tbegin(i32 0)
+ ret i32 %0
+
+; CHECK-LABEL: @test1
+; CHECK: tbegin. 0
+; CHECK: mfocrf [[REGISTER1:[0-9]+]], 128
+; CHECK: rlwinm [[REGISTER2:[0-9]+]], [[REGISTER1]], 3, 31, 31
+; CHECK: xori {{[0-9]+}}, [[REGISTER2]], 1
+}
+
+declare i32 @llvm.ppc.tbegin(i32) #1
+
+
+define zeroext i32 @test2() {
+entry:
+ %0 = tail call i32 @llvm.ppc.tend(i32 0)
+ ret i32 %0
+; CHECK-LABEL: @test2
+; CHECK: tend. 0
+; CHECK: mfocrf {{[0-9]+}}, 128
+}
+
+declare i32 @llvm.ppc.tend(i32)
+
+
+define void @test3() {
+entry:
+ %0 = tail call i32 @llvm.ppc.tabort(i32 0)
+ %1 = tail call i32 @llvm.ppc.tabortdc(i32 0, i32 1, i32 2)
+ %2 = tail call i32 @llvm.ppc.tabortdci(i32 0, i32 1, i32 2)
+ %3 = tail call i32 @llvm.ppc.tabortwc(i32 0, i32 1, i32 2)
+ %4 = tail call i32 @llvm.ppc.tabortwci(i32 0, i32 1, i32 2)
+ ret void
+; CHECK-LABEL: @test3
+; CHECK: tabort. {{[0-9]+}}
+; CHECK: tabortdc. 0, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: tabortdci. 0, {{[0-9]+}}, 2
+; CHECK: tabortwc. 0, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: tabortwci. 0, {{[0-9]+}}, 2
+}
+
+declare i32 @llvm.ppc.tabort(i32)
+declare i32 @llvm.ppc.tabortdc(i32, i32, i32)
+declare i32 @llvm.ppc.tabortdci(i32, i32, i32)
+declare i32 @llvm.ppc.tabortwc(i32, i32, i32)
+declare i32 @llvm.ppc.tabortwci(i32, i32, i32)
+
+
+define void @test4() {
+entry:
+ %0 = tail call i32 @llvm.ppc.tendall()
+ %1 = tail call i32 @llvm.ppc.tresume()
+ %2 = tail call i32 @llvm.ppc.tsuspend()
+ ret void
+; CHECK-LABEL: @test4
+; CHECK: tend. 1
+; CHECK: tsr. 1
+; CHECK: tsr. 0
+}
+
+declare i32 @llvm.ppc.tendall()
+declare i32 @llvm.ppc.tresume()
+declare i32 @llvm.ppc.tsuspend()
+
+
+define void @test5(i64 %v) {
+entry:
+ tail call void @llvm.ppc.set.texasr(i64 %v)
+ tail call void @llvm.ppc.set.texasru(i64 %v)
+ tail call void @llvm.ppc.set.tfhar(i64 %v)
+ tail call void @llvm.ppc.set.tfiar(i64 %v)
+ ret void
+; CHECK-LABEL: @test5
+; CHECK: mtspr 130, [[REG1:[0-9]+]]
+; CHECK: mtspr 131, [[REG2:[0-9]+]]
+; CHECK: mtspr 128, [[REG3:[0-9]+]]
+; CHECK: mtspr 129, [[REG4:[0-9]+]]
+}
+
+define i64 @test6() {
+entry:
+ %0 = tail call i64 @llvm.ppc.get.texasr()
+ ret i64 %0
+; CHECK-LABEL: @test6
+; CHECK: mfspr [[REG1:[0-9]+]], 130
+}
+
+define i64 @test7() {
+entry:
+ %0 = tail call i64 @llvm.ppc.get.texasru()
+ ret i64 %0
+; CHECK-LABEL: @test7
+; CHECK: mfspr [[REG1:[0-9]+]], 131
+}
+
+define i64 @test8() {
+entry:
+ %0 = tail call i64 @llvm.ppc.get.tfhar()
+ ret i64 %0
+; CHECK-LABEL: @test8
+; CHECK: mfspr [[REG1:[0-9]+]], 128
+}
+
+define i64 @test9() {
+entry:
+ %0 = tail call i64 @llvm.ppc.get.tfiar()
+ ret i64 %0
+; CHECK-LABEL: @test9
+; CHECK: mfspr [[REG1:[0-9]+]], 129
+}
+
+declare void @llvm.ppc.set.texasr(i64)
+declare void @llvm.ppc.set.texasru(i64)
+declare void @llvm.ppc.set.tfhar(i64)
+declare void @llvm.ppc.set.tfiar(i64)
+declare i64 @llvm.ppc.get.texasr()
+declare i64 @llvm.ppc.get.texasru()
+declare i64 @llvm.ppc.get.tfhar()
+declare i64 @llvm.ppc.get.tfiar()
diff --git a/test/CodeGen/PowerPC/i64_fp_round.ll b/test/CodeGen/PowerPC/i64_fp_round.ll
index 5770d788caf7..2530b8a92825 100644
--- a/test/CodeGen/PowerPC/i64_fp_round.ll
+++ b/test/CodeGen/PowerPC/i64_fp_round.ll
@@ -14,7 +14,7 @@ entry:
; CHECK: sradi [[REG1:[0-9]+]], 3, 53
; CHECK: addi [[REG2:[0-9]+]], [[REG1]], 1
-; CHECK: cmpldi 0, [[REG2]], 1
+; CHECK: cmpldi [[REG2]], 1
; CHECK: isel [[REG3:[0-9]+]], {{[0-9]+}}, 3, 1
; CHECK: std [[REG3]], -{{[0-9]+}}(1)
diff --git a/test/CodeGen/PowerPC/ia-mem-r0.ll b/test/CodeGen/PowerPC/ia-mem-r0.ll
index 4ab17edc5b10..a007fc1aa631 100644
--- a/test/CodeGen/PowerPC/ia-mem-r0.ll
+++ b/test/CodeGen/PowerPC/ia-mem-r0.ll
@@ -16,78 +16,78 @@ define void @test1({ i8*, void (i8*, i8*)* } %fn_arg) {
%1 = bitcast [18 x i64]* %regs to i64*
call void asm sideeffect "std 14, $0", "=*m"(i64* %1)
%2 = bitcast [18 x i64]* %regs to i8*
- %3 = getelementptr i8* %2, i32 8
+ %3 = getelementptr i8, i8* %2, i32 8
%4 = bitcast i8* %3 to i64*
call void asm sideeffect "std 15, $0", "=*m"(i64* %4)
%5 = bitcast [18 x i64]* %regs to i8*
- %6 = getelementptr i8* %5, i32 16
+ %6 = getelementptr i8, i8* %5, i32 16
%7 = bitcast i8* %6 to i64*
call void asm sideeffect "std 16, $0", "=*m"(i64* %7)
%8 = bitcast [18 x i64]* %regs to i8*
- %9 = getelementptr i8* %8, i32 24
+ %9 = getelementptr i8, i8* %8, i32 24
%10 = bitcast i8* %9 to i64*
call void asm sideeffect "std 17, $0", "=*m"(i64* %10)
%11 = bitcast [18 x i64]* %regs to i8*
- %12 = getelementptr i8* %11, i32 32
+ %12 = getelementptr i8, i8* %11, i32 32
%13 = bitcast i8* %12 to i64*
call void asm sideeffect "std 18, $0", "=*m"(i64* %13)
%14 = bitcast [18 x i64]* %regs to i8*
- %15 = getelementptr i8* %14, i32 40
+ %15 = getelementptr i8, i8* %14, i32 40
%16 = bitcast i8* %15 to i64*
call void asm sideeffect "std 19, $0", "=*m"(i64* %16)
%17 = bitcast [18 x i64]* %regs to i8*
- %18 = getelementptr i8* %17, i32 48
+ %18 = getelementptr i8, i8* %17, i32 48
%19 = bitcast i8* %18 to i64*
call void asm sideeffect "std 20, $0", "=*m"(i64* %19)
%20 = bitcast [18 x i64]* %regs to i8*
- %21 = getelementptr i8* %20, i32 56
+ %21 = getelementptr i8, i8* %20, i32 56
%22 = bitcast i8* %21 to i64*
call void asm sideeffect "std 21, $0", "=*m"(i64* %22)
%23 = bitcast [18 x i64]* %regs to i8*
- %24 = getelementptr i8* %23, i32 64
+ %24 = getelementptr i8, i8* %23, i32 64
%25 = bitcast i8* %24 to i64*
call void asm sideeffect "std 22, $0", "=*m"(i64* %25)
%26 = bitcast [18 x i64]* %regs to i8*
- %27 = getelementptr i8* %26, i32 72
+ %27 = getelementptr i8, i8* %26, i32 72
%28 = bitcast i8* %27 to i64*
call void asm sideeffect "std 23, $0", "=*m"(i64* %28)
%29 = bitcast [18 x i64]* %regs to i8*
- %30 = getelementptr i8* %29, i32 80
+ %30 = getelementptr i8, i8* %29, i32 80
%31 = bitcast i8* %30 to i64*
call void asm sideeffect "std 24, $0", "=*m"(i64* %31)
%32 = bitcast [18 x i64]* %regs to i8*
- %33 = getelementptr i8* %32, i32 88
+ %33 = getelementptr i8, i8* %32, i32 88
%34 = bitcast i8* %33 to i64*
call void asm sideeffect "std 25, $0", "=*m"(i64* %34)
%35 = bitcast [18 x i64]* %regs to i8*
- %36 = getelementptr i8* %35, i32 96
+ %36 = getelementptr i8, i8* %35, i32 96
%37 = bitcast i8* %36 to i64*
call void asm sideeffect "std 26, $0", "=*m"(i64* %37)
%38 = bitcast [18 x i64]* %regs to i8*
- %39 = getelementptr i8* %38, i32 104
+ %39 = getelementptr i8, i8* %38, i32 104
%40 = bitcast i8* %39 to i64*
call void asm sideeffect "std 27, $0", "=*m"(i64* %40)
%41 = bitcast [18 x i64]* %regs to i8*
- %42 = getelementptr i8* %41, i32 112
+ %42 = getelementptr i8, i8* %41, i32 112
%43 = bitcast i8* %42 to i64*
call void asm sideeffect "std 28, $0", "=*m"(i64* %43)
%44 = bitcast [18 x i64]* %regs to i8*
- %45 = getelementptr i8* %44, i32 120
+ %45 = getelementptr i8, i8* %44, i32 120
%46 = bitcast i8* %45 to i64*
call void asm sideeffect "std 29, $0", "=*m"(i64* %46)
%47 = bitcast [18 x i64]* %regs to i8*
- %48 = getelementptr i8* %47, i32 128
+ %48 = getelementptr i8, i8* %47, i32 128
%49 = bitcast i8* %48 to i64*
call void asm sideeffect "std 30, $0", "=*m"(i64* %49)
%50 = bitcast [18 x i64]* %regs to i8*
- %51 = getelementptr i8* %50, i32 136
+ %51 = getelementptr i8, i8* %50, i32 136
%52 = bitcast i8* %51 to i64*
call void asm sideeffect "std 31, $0", "=*m"(i64* %52)
- %53 = getelementptr { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 1
- %.funcptr = load void (i8*, i8*)** %53
- %54 = getelementptr { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 0
- %.ptr = load i8** %54
- %55 = load i8** %sp
+ %53 = getelementptr { i8*, void (i8*, i8*)* }, { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 1
+ %.funcptr = load void (i8*, i8*)*, void (i8*, i8*)** %53
+ %54 = getelementptr { i8*, void (i8*, i8*)* }, { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 0
+ %.ptr = load i8*, i8** %54
+ %55 = load i8*, i8** %sp
call void %.funcptr(i8* %.ptr, i8* %55)
ret void
}
diff --git a/test/CodeGen/PowerPC/indexed-load.ll b/test/CodeGen/PowerPC/indexed-load.ll
index 59fc058c9414..ce386d76cf17 100644
--- a/test/CodeGen/PowerPC/indexed-load.ll
+++ b/test/CodeGen/PowerPC/indexed-load.ll
@@ -13,9 +13,9 @@ target triple = "powerpc64le-unknown-linux-gnu"
; CHECK-NOT: stwx {{[0-9]+}}, {{[0-9]+}}, 64
define void @f(%class.test* %this) {
entry:
- %Subminor.i.i = getelementptr inbounds %class.test* %this, i64 0, i32 1
+ %Subminor.i.i = getelementptr inbounds %class.test, %class.test* %this, i64 0, i32 1
%0 = bitcast [5 x i8]* %Subminor.i.i to i40*
- %bf.load2.i.i = load i40* %0, align 4
+ %bf.load2.i.i = load i40, i40* %0, align 4
%bf.clear7.i.i = and i40 %bf.load2.i.i, -8589934592
store i40 %bf.clear7.i.i, i40* %0, align 4
ret void
diff --git a/test/CodeGen/PowerPC/indirectbr.ll b/test/CodeGen/PowerPC/indirectbr.ll
index fd06fd9b7f46..d1e03ca7773a 100644
--- a/test/CodeGen/PowerPC/indirectbr.ll
+++ b/test/CodeGen/PowerPC/indirectbr.ll
@@ -10,7 +10,7 @@ define internal i32 @foo(i32 %i) nounwind {
; STATIC-LABEL: foo:
; PPC64-LABEL: foo:
entry:
- %0 = load i8** @nextaddr, align 4 ; <i8*> [#uses=2]
+ %0 = load i8*, i8** @nextaddr, align 4 ; <i8*> [#uses=2]
%1 = icmp eq i8* %0, null ; <i1> [#uses=1]
br i1 %1, label %bb3, label %bb2
@@ -37,8 +37,8 @@ bb2: ; preds = %entry, %bb3
indirectbr i8* %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
bb3: ; preds = %entry
- %2 = getelementptr inbounds [5 x i8*]* @C.0.2070, i32 0, i32 %i ; <i8**> [#uses=1]
- %gotovar.4.0.pre = load i8** %2, align 4 ; <i8*> [#uses=1]
+ %2 = getelementptr inbounds [5 x i8*], [5 x i8*]* @C.0.2070, i32 0, i32 %i ; <i8**> [#uses=1]
+ %gotovar.4.0.pre = load i8*, i8** %2, align 4 ; <i8*> [#uses=1]
br label %bb2
L5: ; preds = %bb2
diff --git a/test/CodeGen/PowerPC/inlineasm-i64-reg.ll b/test/CodeGen/PowerPC/inlineasm-i64-reg.ll
index 4d8e704f07a0..05f2a197cd51 100644
--- a/test/CodeGen/PowerPC/inlineasm-i64-reg.ll
+++ b/test/CodeGen/PowerPC/inlineasm-i64-reg.ll
@@ -19,18 +19,18 @@ entry:
store %struct.BG_CoordinateMapping_t* %map, %struct.BG_CoordinateMapping_t** %map.addr, align 8
store i64* %numentries, i64** %numentries.addr, align 8
store i64 1055, i64* %r0, align 8
- %0 = load i64* %mapsize.addr, align 8
+ %0 = load i64, i64* %mapsize.addr, align 8
store i64 %0, i64* %r3, align 8
- %1 = load %struct.BG_CoordinateMapping_t** %map.addr, align 8
+ %1 = load %struct.BG_CoordinateMapping_t*, %struct.BG_CoordinateMapping_t** %map.addr, align 8
%2 = ptrtoint %struct.BG_CoordinateMapping_t* %1 to i64
store i64 %2, i64* %r4, align 8
- %3 = load i64** %numentries.addr, align 8
+ %3 = load i64*, i64** %numentries.addr, align 8
%4 = ptrtoint i64* %3 to i64
store i64 %4, i64* %r5, align 8
- %5 = load i64* %r0, align 8
- %6 = load i64* %r3, align 8
- %7 = load i64* %r4, align 8
- %8 = load i64* %r5, align 8
+ %5 = load i64, i64* %r0, align 8
+ %6 = load i64, i64* %r3, align 8
+ %7 = load i64, i64* %r4, align 8
+ %8 = load i64, i64* %r5, align 8
%9 = call { i64, i64, i64, i64 } asm sideeffect "sc", "={r0},={r3},={r4},={r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 %5, i64 %6, i64 %7, i64 %8) #1, !srcloc !0
; CHECK-LABEL: @Kernel_RanksToCoords
@@ -52,9 +52,9 @@ entry:
store i64 %asmresult1, i64* %r3, align 8
store i64 %asmresult2, i64* %r4, align 8
store i64 %asmresult3, i64* %r5, align 8
- %10 = load i64* %r3, align 8
+ %10 = load i64, i64* %r3, align 8
store i64 %10, i64* %tmp
- %11 = load i64* %tmp
+ %11 = load i64, i64* %tmp
%conv = trunc i64 %11 to i32
ret i32 %conv
}
@@ -87,7 +87,7 @@ entry:
if.then: ; preds = %entry
call void @mtrace()
- %.pre = load i32* %argc.addr, align 4
+ %.pre = load i32, i32* %argc.addr, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
diff --git a/test/CodeGen/PowerPC/isel-rc-nox0.ll b/test/CodeGen/PowerPC/isel-rc-nox0.ll
index ac99aa408bdd..7475e12c4477 100644
--- a/test/CodeGen/PowerPC/isel-rc-nox0.ll
+++ b/test/CodeGen/PowerPC/isel-rc-nox0.ll
@@ -17,12 +17,12 @@ for.cond1.preheader.i: ; preds = %for.cond1.preheader
crc32_gentab.exit: ; preds = %for.cond1.preheader.i
%tobool.i19.i.i = icmp eq i32 undef, 0
- %retval.0.i.i.i = select i1 %tobool.i19.i.i, i32* getelementptr inbounds ([1 x [9 x i32]]* @g_62, i64 0, i64 0, i64 6), i32* getelementptr inbounds ([1 x [9 x i32]]* @g_62, i64 0, i64 0, i64 8)
+ %retval.0.i.i.i = select i1 %tobool.i19.i.i, i32* getelementptr inbounds ([1 x [9 x i32]], [1 x [9 x i32]]* @g_62, i64 0, i64 0, i64 6), i32* getelementptr inbounds ([1 x [9 x i32]], [1 x [9 x i32]]* @g_62, i64 0, i64 0, i64 8)
br label %for.cond1.preheader.i2961.i
for.cond1.preheader.i2961.i: ; preds = %for.inc44.i2977.i, %crc32_gentab.exit
call void @llvm.memset.p0i8.i64(i8* bitcast ([1 x [9 x i32]]* @g_62 to i8*), i8 -1, i64 36, i32 4, i1 false) #1
- %0 = load i32* %retval.0.i.i.i, align 4
+ %0 = load i32, i32* %retval.0.i.i.i, align 4
%tobool.i2967.i = icmp eq i32 %0, 0
br label %for.body21.i2968.i
diff --git a/test/CodeGen/PowerPC/lbz-from-ld-shift.ll b/test/CodeGen/PowerPC/lbz-from-ld-shift.ll
index 3eacd6a45fb4..7696b84708d8 100644
--- a/test/CodeGen/PowerPC/lbz-from-ld-shift.ll
+++ b/test/CodeGen/PowerPC/lbz-from-ld-shift.ll
@@ -5,7 +5,7 @@ target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind readonly
define signext i32 @test(i32* nocapture readonly %P) #0 {
entry:
- %0 = load i32* %P, align 4
+ %0 = load i32, i32* %P, align 4
%shr = lshr i32 %0, 24
ret i32 %shr
diff --git a/test/CodeGen/PowerPC/lbzux.ll b/test/CodeGen/PowerPC/lbzux.ll
index f3158b32f390..4bd9cb6ab18a 100644
--- a/test/CodeGen/PowerPC/lbzux.ll
+++ b/test/CodeGen/PowerPC/lbzux.ll
@@ -4,7 +4,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define fastcc void @allocateSpace(i1 %cond1, i1 %cond2) nounwind {
entry:
- %0 = load i8** undef, align 8
+ %0 = load i8*, i8** undef, align 8
br i1 undef, label %return, label %lor.lhs.false
lor.lhs.false: ; preds = %entry
@@ -18,7 +18,7 @@ if.then15: ; preds = %if.end7
while.cond: ; preds = %while.body, %if.then15
%idxprom17 = sext i32 0 to i64
- %arrayidx18 = getelementptr inbounds i8* %0, i64 %idxprom17
+ %arrayidx18 = getelementptr inbounds i8, i8* %0, i64 %idxprom17
%or = or i32 undef, undef
br i1 %cond1, label %if.end71, label %while.body
@@ -27,10 +27,10 @@ while.body: ; preds = %while.cond
if.then45: ; preds = %while.body
%idxprom48139 = zext i32 %or to i64
- %arrayidx49 = getelementptr inbounds i8* %0, i64 %idxprom48139
+ %arrayidx49 = getelementptr inbounds i8, i8* %0, i64 %idxprom48139
%1 = bitcast i8* %arrayidx49 to i16*
%2 = bitcast i8* %arrayidx18 to i16*
- %3 = load i16* %1, align 1
+ %3 = load i16, i16* %1, align 1
store i16 %3, i16* %2, align 1
br label %return
diff --git a/test/CodeGen/PowerPC/ld-st-upd.ll b/test/CodeGen/PowerPC/ld-st-upd.ll
index 24f31aca05ad..be0c94a54fe0 100644
--- a/test/CodeGen/PowerPC/ld-st-upd.ll
+++ b/test/CodeGen/PowerPC/ld-st-upd.ll
@@ -4,8 +4,8 @@ target triple = "powerpc-unknown-linux-gnu"
; Function Attrs: nounwind
define i32* @test4(i32* readonly %X, i32* nocapture %dest) #0 {
- %Y = getelementptr i32* %X, i64 4
- %A = load i32* %Y, align 4
+ %Y = getelementptr i32, i32* %X, i64 4
+ %A = load i32, i32* %Y, align 4
store i32 %A, i32* %dest, align 4
ret i32* %Y
diff --git a/test/CodeGen/PowerPC/ldtoc-inv.ll b/test/CodeGen/PowerPC/ldtoc-inv.ll
new file mode 100644
index 000000000000..07a1686cc704
--- /dev/null
+++ b/test/CodeGen/PowerPC/ldtoc-inv.ll
@@ -0,0 +1,39 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+@phasor = external constant [4096 x i32]
+
+; Function Attrs: nounwind
+define void @test(i32* nocapture %out, i32 zeroext %step_size) #0 {
+entry:
+ %shl = shl i32 %step_size, 2
+ %idxprom = zext i32 %shl to i64
+ br label %for.body
+
+; Make sure that the TOC load has been hoisted out of the loop.
+; CHECK-LABEL: @test
+; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc@l
+; CHECK: %for.body
+; CHECK: blr
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %0 = trunc i64 %indvars.iv to i32
+ %shl1 = shl i32 %0, %step_size
+ %idxprom2 = sext i32 %shl1 to i64
+ %arrayidx.sum = add nsw i64 %idxprom2, %idxprom
+ %arrayidx3 = getelementptr inbounds [4096 x i32], [4096 x i32]* @phasor, i64 0, i64 %arrayidx.sum
+ %1 = load i32, i32* %arrayidx3, align 4
+ %arrayidx5 = getelementptr inbounds i32, i32* %out, i64 %indvars.iv
+ store i32 %1, i32* %arrayidx5, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
+ %cmp = icmp slt i64 %indvars.iv.next, 1020
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/lha.ll b/test/CodeGen/PowerPC/lha.ll
index 3a100c1aae6d..c5c5f5905f67 100644
--- a/test/CodeGen/PowerPC/lha.ll
+++ b/test/CodeGen/PowerPC/lha.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=ppc32 | grep lha
define i32 @test(i16* %a) {
- %tmp.1 = load i16* %a ; <i16> [#uses=1]
+ %tmp.1 = load i16, i16* %a ; <i16> [#uses=1]
%tmp.2 = sext i16 %tmp.1 to i32 ; <i32> [#uses=1]
ret i32 %tmp.2
}
diff --git a/test/CodeGen/PowerPC/load-constant-addr.ll b/test/CodeGen/PowerPC/load-constant-addr.ll
index f1d061c1ad5a..783443008cae 100644
--- a/test/CodeGen/PowerPC/load-constant-addr.ll
+++ b/test/CodeGen/PowerPC/load-constant-addr.ll
@@ -3,7 +3,7 @@
; RUN: llc < %s -march=ppc32 | not grep ori
define float @test() {
- %tmp.i = load float* inttoptr (i32 186018016 to float*) ; <float> [#uses=1]
+ %tmp.i = load float, float* inttoptr (i32 186018016 to float*) ; <float> [#uses=1]
ret float %tmp.i
}
diff --git a/test/CodeGen/PowerPC/load-shift-combine.ll b/test/CodeGen/PowerPC/load-shift-combine.ll
index a5d1224864a6..8d1f8146db95 100644
--- a/test/CodeGen/PowerPC/load-shift-combine.ll
+++ b/test/CodeGen/PowerPC/load-shift-combine.ll
@@ -16,19 +16,19 @@
define void @test1847() nounwind {
entry:
%j = alloca i32, align 4
- %0 = load i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 8), align 8
- %1 = load i32* @fails, align 4
- %bf.load1 = load i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
+ %0 = load i64, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 8), align 8
+ %1 = load i32, i32* @fails, align 4
+ %bf.load1 = load i96, i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847], [5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
%bf.clear2 = and i96 %bf.load1, 302231454903657293676543
%bf.set3 = or i96 %bf.clear2, -38383394772764476296921088
- store i96 %bf.set3, i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
- %2 = load i32* %j, align 4
- %3 = load i32* %j, align 4
+ store i96 %bf.set3, i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847], [5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
+ %2 = load i32, i32* %j, align 4
+ %3 = load i32, i32* %j, align 4
%inc11 = add nsw i32 %3, 1
store i32 %inc11, i32* %j, align 4
- %bf.load15 = load i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
+ %bf.load15 = load i96, i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847], [5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
%bf.clear16 = and i96 %bf.load15, -18446744069414584321
%bf.set17 = or i96 %bf.clear16, 18446743532543672320
- store i96 %bf.set17, i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
+ store i96 %bf.set17, i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847], [5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
ret void
}
diff --git a/test/CodeGen/PowerPC/long-compare.ll b/test/CodeGen/PowerPC/long-compare.ll
index 915595f6dbac..e53356a5ddf2 100644
--- a/test/CodeGen/PowerPC/long-compare.ll
+++ b/test/CodeGen/PowerPC/long-compare.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=ppc32 | grep cntlzw
+; RUN: llc < %s -march=ppc32 | grep cntlz
; RUN: llc < %s -march=ppc32 | not grep xori
; RUN: llc < %s -march=ppc32 | not grep "li "
; RUN: llc < %s -march=ppc32 | not grep "mr "
diff --git a/test/CodeGen/PowerPC/loop-data-prefetch-inner.ll b/test/CodeGen/PowerPC/loop-data-prefetch-inner.ll
new file mode 100644
index 000000000000..adcc7b90bc48
--- /dev/null
+++ b/test/CodeGen/PowerPC/loop-data-prefetch-inner.ll
@@ -0,0 +1,66 @@
+; RUN: llc < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-bgq-linux"
+
+; Function Attrs: nounwind
+define void @foo(double* %x, double* nocapture readonly %y) #0 {
+entry:
+ %scevgep = getelementptr double, double* %x, i64 1599
+ %scevgep20 = getelementptr double, double* %y, i64 1599
+ br label %vector.memcheck
+
+vector.memcheck: ; preds = %for.end, %entry
+ %j.015 = phi i32 [ 0, %entry ], [ %inc7, %for.end ]
+ %bound0 = icmp uge double* %scevgep20, %x
+ %bound1 = icmp uge double* %scevgep, %y
+ %memcheck.conflict = and i1 %bound0, %bound1
+ br i1 %memcheck.conflict, label %middle.block, label %vector.body
+
+vector.body: ; preds = %vector.memcheck, %vector.body
+ %index = phi i64 [ %index.next, %vector.body ], [ 0, %vector.memcheck ]
+ %0 = getelementptr inbounds double, double* %y, i64 %index
+ %1 = bitcast double* %0 to <4 x double>*
+ %wide.load = load <4 x double>, <4 x double>* %1, align 8
+ %2 = fadd <4 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
+ %3 = getelementptr inbounds double, double* %x, i64 %index
+ %4 = bitcast double* %3 to <4 x double>*
+ store <4 x double> %2, <4 x double>* %4, align 8
+ %index.next = add i64 %index, 4
+ %5 = icmp eq i64 %index.next, 1600
+ br i1 %5, label %middle.block, label %vector.body
+
+middle.block: ; preds = %vector.body, %vector.memcheck
+ %resume.val = phi i1 [ false, %vector.memcheck ], [ true, %vector.body ]
+ %trunc.resume.val = phi i64 [ 0, %vector.memcheck ], [ 1600, %vector.body ]
+ br i1 %resume.val, label %for.end, label %for.body3
+
+for.body3: ; preds = %middle.block, %for.body3
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ %trunc.resume.val, %middle.block ]
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
+ %6 = load double, double* %arrayidx, align 8
+ %add = fadd double %6, 1.000000e+00
+ %arrayidx5 = getelementptr inbounds double, double* %x, i64 %indvars.iv
+ store double %add, double* %arrayidx5, align 8
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1600
+ br i1 %exitcond, label %for.end, label %for.body3
+
+for.end: ; preds = %middle.block, %for.body3
+ tail call void @bar(double* %x) #2
+ %inc7 = add nuw nsw i32 %j.015, 1
+ %exitcond16 = icmp eq i32 %inc7, 100
+ br i1 %exitcond16, label %for.end8, label %vector.memcheck
+
+for.end8: ; preds = %for.end
+ ret void
+
+; CHECK-LABEL: @foo
+; CHECK: dcbt
+}
+
+declare void @bar(double*) #1
+
+attributes #0 = { nounwind "target-cpu"="a2q" }
+attributes #1 = { "target-cpu"="a2q" }
+attributes #2 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/loop-data-prefetch.ll b/test/CodeGen/PowerPC/loop-data-prefetch.ll
new file mode 100644
index 000000000000..59b74e67251b
--- /dev/null
+++ b/test/CodeGen/PowerPC/loop-data-prefetch.ll
@@ -0,0 +1,29 @@
+; RUN: llc -mcpu=a2 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-bgq-linux"
+
+; Function Attrs: nounwind
+define void @foo(double* nocapture %a, double* nocapture readonly %b) #0 {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, double* %b, i64 %indvars.iv
+ %0 = load double, double* %arrayidx, align 8
+ %add = fadd double %0, 1.000000e+00
+ %arrayidx2 = getelementptr inbounds double, double* %a, i64 %indvars.iv
+ store double %add, double* %arrayidx2, align 8
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1600
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+
+; CHECK-LABEL: @foo
+; CHECK: dcbt
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/loop-prep-all.ll b/test/CodeGen/PowerPC/loop-prep-all.ll
new file mode 100644
index 000000000000..895daff63ad9
--- /dev/null
+++ b/test/CodeGen/PowerPC/loop-prep-all.ll
@@ -0,0 +1,48 @@
+; RUN: llc < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-bgq-linux < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BGQ
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @foo(double* nocapture %x, double* nocapture readonly %y) #0 {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
+ %0 = load double, double* %arrayidx, align 8
+ %add = fadd double %0, 1.000000e+00
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
+ store double %add, double* %arrayidx2, align 8
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond19 = icmp eq i64 %indvars.iv.next, 1600
+ br i1 %exitcond19, label %for.body7, label %for.body
+
+; CHECK-LABEL: @foo
+
+; CHECK-BGQ-DAG: dcbt 4, 5
+; CHECK-DAG: lfdu [[REG1:[0-9]+]], 8({{[0-9]+}})
+; CHECK-DAG: fadd [[REG2:[0-9]+]], [[REG1]], 0
+; CHECK-DAG: stfdu [[REG2]], 8({{[0-9]+}})
+; CHECK: bdnz
+
+; CHECK: blr
+
+for.cond.cleanup6: ; preds = %for.body7
+ ret void
+
+for.body7: ; preds = %for.body, %for.body7
+ %i3.017 = phi i32 [ %inc9, %for.body7 ], [ 0, %for.body ]
+ tail call void bitcast (void (...)* @bar to void ()*)() #2
+ %inc9 = add nuw nsw i32 %i3.017, 1
+ %exitcond = icmp eq i32 %inc9, 1024
+ br i1 %exitcond, label %for.cond.cleanup6, label %for.body7
+}
+
+declare void @bar(...) #1
+
+attributes #0 = { nounwind "target-cpu"="a2q" }
+attributes #1 = { "target-cpu"="a2q" }
+attributes #2 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/lsa.ll b/test/CodeGen/PowerPC/lsa.ll
index a892a4cf4140..f4d61c014dd9 100644
--- a/test/CodeGen/PowerPC/lsa.ll
+++ b/test/CodeGen/PowerPC/lsa.ll
@@ -13,13 +13,13 @@ entry:
call void @llvm.lifetime.start(i64 32800, i8* %1) #0
%2 = bitcast [8200 x i32]* %q to i8*
call void @llvm.lifetime.start(i64 32800, i8* %2) #0
- %arraydecay = getelementptr inbounds [8200 x i32]* %q, i64 0, i64 0
- %arraydecay1 = getelementptr inbounds [8200 x i32]* %v, i64 0, i64 0
- %arraydecay2 = getelementptr inbounds [8200 x i32]* %w, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [8200 x i32], [8200 x i32]* %q, i64 0, i64 0
+ %arraydecay1 = getelementptr inbounds [8200 x i32], [8200 x i32]* %v, i64 0, i64 0
+ %arraydecay2 = getelementptr inbounds [8200 x i32], [8200 x i32]* %w, i64 0, i64 0
call void @bar(i32* %arraydecay, i32* %arraydecay1, i32* %arraydecay2) #0
- %3 = load i32* %arraydecay2, align 4
- %arrayidx3 = getelementptr inbounds [8200 x i32]* %w, i64 0, i64 1
- %4 = load i32* %arrayidx3, align 4
+ %3 = load i32, i32* %arraydecay2, align 4
+ %arrayidx3 = getelementptr inbounds [8200 x i32], [8200 x i32]* %w, i64 0, i64 1
+ %4 = load i32, i32* %arrayidx3, align 4
; CHECK: @foo
; CHECK-NOT: lwzx
diff --git a/test/CodeGen/PowerPC/lsr-postinc-pos.ll b/test/CodeGen/PowerPC/lsr-postinc-pos.ll
index 42472c58fe8b..7831df154606 100644
--- a/test/CodeGen/PowerPC/lsr-postinc-pos.ll
+++ b/test/CodeGen/PowerPC/lsr-postinc-pos.ll
@@ -3,8 +3,8 @@
; The icmp is a post-inc use, and the increment is in %bb11, but the
; scevgep needs to be inserted in %bb so that it is dominated by %t.
-; CHECK: %t = load i8** undef
-; CHECK: %scevgep = getelementptr i8* %t, i32 %lsr.iv.next
+; CHECK: %t = load i8*, i8** undef
+; CHECK: %scevgep = getelementptr i8, i8* %t, i32 %lsr.iv.next
; CHECK: %c1 = icmp ult i8* %scevgep, undef
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128-n32"
@@ -21,8 +21,8 @@ bb11:
br i1 %c0, label %bb13, label %bb
bb:
- %t = load i8** undef, align 16 ; <i8*> [#uses=1]
- %p = getelementptr i8* %t, i32 %ii ; <i8*> [#uses=1]
+ %t = load i8*, i8** undef, align 16 ; <i8*> [#uses=1]
+ %p = getelementptr i8, i8* %t, i32 %ii ; <i8*> [#uses=1]
%c1 = icmp ult i8* %p, undef ; <i1> [#uses=1]
%i.next = add i32 %i, 1 ; <i32> [#uses=1]
br i1 %c1, label %bb11, label %bb13
diff --git a/test/CodeGen/PowerPC/mask64.ll b/test/CodeGen/PowerPC/mask64.ll
index 139621af1f22..600cecd3fe7b 100644
--- a/test/CodeGen/PowerPC/mask64.ll
+++ b/test/CodeGen/PowerPC/mask64.ll
@@ -9,8 +9,8 @@ entry:
br i1 false, label %bb16, label %bb49
bb16: ; preds = %entry
- %tmp19 = load i8** null, align 1 ; <i8*> [#uses=1]
- %tmp21 = load i8* %tmp19, align 1 ; <i8> [#uses=1]
+ %tmp19 = load i8*, i8** null, align 1 ; <i8*> [#uses=1]
+ %tmp21 = load i8, i8* %tmp19, align 1 ; <i8> [#uses=1]
switch i8 %tmp21, label %bb49 [
i8 0, label %bb45
i8 1, label %bb34
diff --git a/test/CodeGen/PowerPC/mature-mc-support.ll b/test/CodeGen/PowerPC/mature-mc-support.ll
index 7c83e184a6f8..aa387f6e2666 100644
--- a/test/CodeGen/PowerPC/mature-mc-support.ll
+++ b/test/CodeGen/PowerPC/mature-mc-support.ll
@@ -10,6 +10,10 @@
; RUN: not llc -march=ppc32 -filetype=obj < %s > /dev/null 2> %t2
; RUN: FileCheck %s < %t2
+; Test that we don't try to produce COFF for ppc.
+; RUN: not llc -mtriple=powerpc-mingw32 -filetype=obj < %s > /dev/null 2> %t2
+; RUN: FileCheck %s < %t2
+
; SKIP: not llc -march=ppc64 < %s > /dev/null 2> %t3
; SKIP: FileCheck %s < %t3
diff --git a/test/CodeGen/PowerPC/mcm-1.ll b/test/CodeGen/PowerPC/mcm-1.ll
index 4e31550c40d4..7ef4a8e957e6 100644
--- a/test/CodeGen/PowerPC/mcm-1.ll
+++ b/test/CodeGen/PowerPC/mcm-1.ll
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define signext i32 @test_external() nounwind {
entry:
- %0 = load i32* @ei, align 4
+ %0 = load i32, i32* @ei, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* @ei, align 4
ret i32 %0
diff --git a/test/CodeGen/PowerPC/mcm-10.ll b/test/CodeGen/PowerPC/mcm-10.ll
index 9565ebc780bf..affa249e32b5 100644
--- a/test/CodeGen/PowerPC/mcm-10.ll
+++ b/test/CodeGen/PowerPC/mcm-10.ll
@@ -10,7 +10,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define signext i32 @test_fn_static() nounwind {
entry:
- %0 = load i32* @test_fn_static.si, align 4
+ %0 = load i32, i32* @test_fn_static.si, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* @test_fn_static.si, align 4
ret i32 %0
diff --git a/test/CodeGen/PowerPC/mcm-11.ll b/test/CodeGen/PowerPC/mcm-11.ll
index 033045c74c8a..457c60afb57e 100644
--- a/test/CodeGen/PowerPC/mcm-11.ll
+++ b/test/CodeGen/PowerPC/mcm-11.ll
@@ -10,7 +10,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define signext i32 @test_file_static() nounwind {
entry:
- %0 = load i32* @gi, align 4
+ %0 = load i32, i32* @gi, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* @gi, align 4
ret i32 %0
diff --git a/test/CodeGen/PowerPC/mcm-2.ll b/test/CodeGen/PowerPC/mcm-2.ll
index 811600ecdbf6..03ce1c65851e 100644
--- a/test/CodeGen/PowerPC/mcm-2.ll
+++ b/test/CodeGen/PowerPC/mcm-2.ll
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define signext i32 @test_fn_static() nounwind {
entry:
- %0 = load i32* @test_fn_static.si, align 4
+ %0 = load i32, i32* @test_fn_static.si, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* @test_fn_static.si, align 4
ret i32 %0
diff --git a/test/CodeGen/PowerPC/mcm-3.ll b/test/CodeGen/PowerPC/mcm-3.ll
index b6d681d580ad..40188bcb5a0b 100644
--- a/test/CodeGen/PowerPC/mcm-3.ll
+++ b/test/CodeGen/PowerPC/mcm-3.ll
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define signext i32 @test_file_static() nounwind {
entry:
- %0 = load i32* @gi, align 4
+ %0 = load i32, i32* @gi, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* @gi, align 4
ret i32 %0
diff --git a/test/CodeGen/PowerPC/mcm-5.ll b/test/CodeGen/PowerPC/mcm-5.ll
index 92ddecaeb8c8..19adbe5b7d93 100644
--- a/test/CodeGen/PowerPC/mcm-5.ll
+++ b/test/CodeGen/PowerPC/mcm-5.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mcpu=pwr7 -O0 -code-model=medium <%s | FileCheck %s
-; RUN: llc -mcpu=pwr7 -O0 -code-model=large <%s | FileCheck %s
+; RUN: llc -mcpu=pwr7 -code-model=medium <%s | FileCheck %s
+; RUN: llc -mcpu=pwr7 -code-model=large <%s | FileCheck %s
; Test correct code generation for medium and large code model
; for loading the address of a jump table from the TOC.
@@ -11,7 +11,7 @@ define signext i32 @test_jump_table(i32 signext %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
- %0 = load i32* %i.addr, align 4
+ %0 = load i32, i32* %i.addr, align 4
switch i32 %0, label %sw.default [
i32 3, label %sw.bb
i32 4, label %sw.bb1
@@ -23,31 +23,31 @@ sw.default: ; preds = %entry
br label %sw.epilog
sw.bb: ; preds = %entry
- %1 = load i32* %i.addr, align 4
+ %1 = load i32, i32* %i.addr, align 4
%mul = mul nsw i32 %1, 7
store i32 %mul, i32* %i.addr, align 4
br label %sw.bb1
sw.bb1: ; preds = %entry, %sw.bb
- %2 = load i32* %i.addr, align 4
+ %2 = load i32, i32* %i.addr, align 4
%dec = add nsw i32 %2, -1
store i32 %dec, i32* %i.addr, align 4
br label %sw.bb2
sw.bb2: ; preds = %entry, %sw.bb1
- %3 = load i32* %i.addr, align 4
+ %3 = load i32, i32* %i.addr, align 4
%add = add nsw i32 %3, 3
store i32 %add, i32* %i.addr, align 4
br label %sw.bb3
sw.bb3: ; preds = %entry, %sw.bb2
- %4 = load i32* %i.addr, align 4
+ %4 = load i32, i32* %i.addr, align 4
%shl = shl i32 %4, 1
store i32 %shl, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb3, %sw.default
- %5 = load i32* %i.addr, align 4
+ %5 = load i32, i32* %i.addr, align 4
ret i32 %5
}
diff --git a/test/CodeGen/PowerPC/mcm-6.ll b/test/CodeGen/PowerPC/mcm-6.ll
index f7838b4b2527..6a50d1264109 100644
--- a/test/CodeGen/PowerPC/mcm-6.ll
+++ b/test/CodeGen/PowerPC/mcm-6.ll
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define signext i32 @test_tentative() nounwind {
entry:
- %0 = load i32* @ti, align 4
+ %0 = load i32, i32* @ti, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* @ti, align 4
ret i32 %0
diff --git a/test/CodeGen/PowerPC/mcm-7.ll b/test/CodeGen/PowerPC/mcm-7.ll
index 7caa13bcdcf8..9c8158df77b3 100644
--- a/test/CodeGen/PowerPC/mcm-7.ll
+++ b/test/CodeGen/PowerPC/mcm-7.ll
@@ -11,7 +11,7 @@ define i8* @test_fnaddr() nounwind {
entry:
%func = alloca i32 (i32)*, align 8
store i32 (i32)* @foo, i32 (i32)** %func, align 8
- %0 = load i32 (i32)** %func, align 8
+ %0 = load i32 (i32)*, i32 (i32)** %func, align 8
%1 = bitcast i32 (i32)* %0 to i8*
ret i8* %1
}
diff --git a/test/CodeGen/PowerPC/mcm-8.ll b/test/CodeGen/PowerPC/mcm-8.ll
index 643548f6b125..b265c77e2d31 100644
--- a/test/CodeGen/PowerPC/mcm-8.ll
+++ b/test/CodeGen/PowerPC/mcm-8.ll
@@ -11,8 +11,8 @@ target triple = "powerpc64-unknown-linux-gnu"
define signext i8 @test_avext() nounwind {
entry:
- %0 = getelementptr inbounds [13 x i8]* @x, i32 0, i32 0
- %1 = load i8* %0, align 1
+ %0 = getelementptr inbounds [13 x i8], [13 x i8]* @x, i32 0, i32 0
+ %1 = load i8, i8* %0, align 1
ret i8 %1
}
diff --git a/test/CodeGen/PowerPC/mcm-9.ll b/test/CodeGen/PowerPC/mcm-9.ll
index 7906b6abea6a..45a4e699a3db 100644
--- a/test/CodeGen/PowerPC/mcm-9.ll
+++ b/test/CodeGen/PowerPC/mcm-9.ll
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define signext i32 @test_external() nounwind {
entry:
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* @a, align 4
ret i32 %0
diff --git a/test/CodeGen/PowerPC/mcm-default.ll b/test/CodeGen/PowerPC/mcm-default.ll
index 8d4ff14118f3..49e6513a7c13 100644
--- a/test/CodeGen/PowerPC/mcm-default.ll
+++ b/test/CodeGen/PowerPC/mcm-default.ll
@@ -10,7 +10,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define signext i32 @test_external() nounwind {
entry:
- %0 = load i32* @ei, align 4
+ %0 = load i32, i32* @ei, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* @ei, align 4
ret i32 %0
diff --git a/test/CodeGen/PowerPC/mcm-obj-2.ll b/test/CodeGen/PowerPC/mcm-obj-2.ll
index c42cf0c36ea8..36c58560ebff 100644
--- a/test/CodeGen/PowerPC/mcm-obj-2.ll
+++ b/test/CodeGen/PowerPC/mcm-obj-2.ll
@@ -10,7 +10,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define signext i32 @test_fn_static() nounwind {
entry:
- %0 = load i32* @test_fn_static.si, align 4
+ %0 = load i32, i32* @test_fn_static.si, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* @test_fn_static.si, align 4
ret i32 %0
@@ -20,7 +20,7 @@ entry:
; accessing function-scoped variable si.
;
; CHECK: Relocations [
-; CHECK: Section (2) .rela.text {
+; CHECK: Section {{.*}} .rela.text {
; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM2:[^ ]+]]
; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM2]]
; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO [[SYM2]]
@@ -29,7 +29,7 @@ entry:
define signext i32 @test_file_static() nounwind {
entry:
- %0 = load i32* @gi, align 4
+ %0 = load i32, i32* @gi, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* @gi, align 4
ret i32 %0
diff --git a/test/CodeGen/PowerPC/mcm-obj.ll b/test/CodeGen/PowerPC/mcm-obj.ll
index d3d05eb48d32..1ececf84926e 100644
--- a/test/CodeGen/PowerPC/mcm-obj.ll
+++ b/test/CodeGen/PowerPC/mcm-obj.ll
@@ -3,6 +3,12 @@
; RUN: llc -O0 -mcpu=pwr7 -code-model=large -filetype=obj -fast-isel=false %s -o - | \
; RUN: llvm-readobj -r | FileCheck -check-prefix=LARGE %s
+; Run jump table test separately since jump tables aren't generated at -O0.
+; RUN: llc -mcpu=pwr7 -code-model=medium -filetype=obj -fast-isel=false %s -o - | \
+; RUN: llvm-readobj -r | FileCheck -check-prefix=MEDIUM-JT %s
+; RUN: llc -mcpu=pwr7 -code-model=large -filetype=obj -fast-isel=false %s -o - | \
+; RUN: llvm-readobj -r | FileCheck -check-prefix=LARGE-JT %s
+
; FIXME: When asm-parse is available, could make this an assembly test.
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
@@ -12,7 +18,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define signext i32 @test_external() nounwind {
entry:
- %0 = load i32* @ei, align 4
+ %0 = load i32, i32* @ei, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* @ei, align 4
ret i32 %0
@@ -22,12 +28,12 @@ entry:
; accessing external variable ei.
;
; MEDIUM: Relocations [
-; MEDIUM: Section (2) .rela.text {
+; MEDIUM: Section {{.*}} .rela.text {
; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM1:[^ ]+]]
; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM1]]
;
; LARGE: Relocations [
-; LARGE: Section (2) .rela.text {
+; LARGE: Section {{.*}} .rela.text {
; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM1:[^ ]+]]
; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM1]]
@@ -35,7 +41,7 @@ entry:
define signext i32 @test_fn_static() nounwind {
entry:
- %0 = load i32* @test_fn_static.si, align 4
+ %0 = load i32, i32* @test_fn_static.si, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* @test_fn_static.si, align 4
ret i32 %0
@@ -57,7 +63,7 @@ entry:
define signext i32 @test_file_static() nounwind {
entry:
- %0 = load i32* @gi, align 4
+ %0 = load i32, i32* @gi, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* @gi, align 4
ret i32 %0
@@ -92,11 +98,51 @@ entry:
; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM4:[^ ]+]]
; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM4]]
+@ti = common global i32 0, align 4
+
+define signext i32 @test_tentative() nounwind {
+entry:
+ %0 = load i32, i32* @ti, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* @ti, align 4
+ ret i32 %0
+}
+
+; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO_DS for
+; accessing tentatively declared variable ti.
+;
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM6:[^ ]+]]
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM6]]
+;
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM6:[^ ]+]]
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM6]]
+
+define i8* @test_fnaddr() nounwind {
+entry:
+ %func = alloca i32 (i32)*, align 8
+ store i32 (i32)* @foo, i32 (i32)** %func, align 8
+ %0 = load i32 (i32)*, i32 (i32)** %func, align 8
+ %1 = bitcast i32 (i32)* %0 to i8*
+ ret i8* %1
+}
+
+declare signext i32 @foo(i32 signext)
+
+; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO_DS for
+; accessing function address foo.
+;
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM7:[^ ]+]]
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM7]]
+;
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM7:[^ ]+]]
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM7]]
+
+
define signext i32 @test_jump_table(i32 signext %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
- %0 = load i32* %i.addr, align 4
+ %0 = load i32, i32* %i.addr, align 4
switch i32 %0, label %sw.default [
i32 3, label %sw.bb
i32 4, label %sw.bb1
@@ -108,78 +154,43 @@ sw.default: ; preds = %entry
br label %sw.epilog
sw.bb: ; preds = %entry
- %1 = load i32* %i.addr, align 4
+ %1 = load i32, i32* %i.addr, align 4
%mul = mul nsw i32 %1, 7
store i32 %mul, i32* %i.addr, align 4
br label %sw.bb1
sw.bb1: ; preds = %entry, %sw.bb
- %2 = load i32* %i.addr, align 4
+ %2 = load i32, i32* %i.addr, align 4
%dec = add nsw i32 %2, -1
store i32 %dec, i32* %i.addr, align 4
br label %sw.bb2
sw.bb2: ; preds = %entry, %sw.bb1
- %3 = load i32* %i.addr, align 4
+ %3 = load i32, i32* %i.addr, align 4
%add = add nsw i32 %3, 3
store i32 %add, i32* %i.addr, align 4
br label %sw.bb3
sw.bb3: ; preds = %entry, %sw.bb2
- %4 = load i32* %i.addr, align 4
+ %4 = load i32, i32* %i.addr, align 4
%shl = shl i32 %4, 1
store i32 %shl, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb3, %sw.default
- %5 = load i32* %i.addr, align 4
+ %5 = load i32, i32* %i.addr, align 4
ret i32 %5
}
; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO_DS for
; accessing a jump table address.
;
-; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM5:[^ ]+]]
-; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM5]]
-;
-; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM5:[^ ]+]]
-; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM5]]
-
-@ti = common global i32 0, align 4
-
-define signext i32 @test_tentative() nounwind {
-entry:
- %0 = load i32* @ti, align 4
- %inc = add nsw i32 %0, 1
- store i32 %inc, i32* @ti, align 4
- ret i32 %0
-}
-
-; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO_DS for
-; accessing tentatively declared variable ti.
-;
-; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM6:[^ ]+]]
-; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM6]]
-;
-; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM6:[^ ]+]]
-; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM6]]
-
-define i8* @test_fnaddr() nounwind {
-entry:
- %func = alloca i32 (i32)*, align 8
- store i32 (i32)* @foo, i32 (i32)** %func, align 8
- %0 = load i32 (i32)** %func, align 8
- %1 = bitcast i32 (i32)* %0 to i8*
- ret i8* %1
-}
-
-declare signext i32 @foo(i32 signext)
-
-; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO_DS for
-; accessing function address foo.
-;
-; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM7:[^ ]+]]
-; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM7]]
+; MEDIUM-JT: Relocations [
+; MEDIUM-JT: Section ({{.*}}) .rela.text {
+; MEDIUM-JT-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM:[^ ]+]]
+; MEDIUM-JT-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM]]
;
-; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM7:[^ ]+]]
-; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM7]]
+; LARGE-JT: Relocations [
+; LARGE-JT: Section ({{.*}}) .rela.text {
+; LARGE-JT-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM:[^ ]+]]
+; LARGE-JT-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM]]
diff --git a/test/CodeGen/PowerPC/mem-rr-addr-mode.ll b/test/CodeGen/PowerPC/mem-rr-addr-mode.ll
index 5661ef9768d1..60a4bdba2296 100644
--- a/test/CodeGen/PowerPC/mem-rr-addr-mode.ll
+++ b/test/CodeGen/PowerPC/mem-rr-addr-mode.ll
@@ -5,12 +5,12 @@
; This shares the 16 between the two loads.
define void @func(<4 x float>* %a, <4 x float>* %b) {
- %tmp1 = getelementptr <4 x float>* %b, i32 1 ; <<4 x float>*> [#uses=1]
- %tmp = load <4 x float>* %tmp1 ; <<4 x float>> [#uses=1]
- %tmp3 = getelementptr <4 x float>* %a, i32 1 ; <<4 x float>*> [#uses=1]
- %tmp4 = load <4 x float>* %tmp3 ; <<4 x float>> [#uses=1]
+ %tmp1 = getelementptr <4 x float>, <4 x float>* %b, i32 1 ; <<4 x float>*> [#uses=1]
+ %tmp = load <4 x float>, <4 x float>* %tmp1 ; <<4 x float>> [#uses=1]
+ %tmp3 = getelementptr <4 x float>, <4 x float>* %a, i32 1 ; <<4 x float>*> [#uses=1]
+ %tmp4 = load <4 x float>, <4 x float>* %tmp3 ; <<4 x float>> [#uses=1]
%tmp5 = fmul <4 x float> %tmp, %tmp4 ; <<4 x float>> [#uses=1]
- %tmp8 = load <4 x float>* %b ; <<4 x float>> [#uses=1]
+ %tmp8 = load <4 x float>, <4 x float>* %b ; <<4 x float>> [#uses=1]
%tmp9 = fadd <4 x float> %tmp5, %tmp8 ; <<4 x float>> [#uses=1]
store <4 x float> %tmp9, <4 x float>* %a
ret void
diff --git a/test/CodeGen/PowerPC/mem_update.ll b/test/CodeGen/PowerPC/mem_update.ll
index fcf53da67fc2..2fa01402579a 100644
--- a/test/CodeGen/PowerPC/mem_update.ll
+++ b/test/CodeGen/PowerPC/mem_update.ll
@@ -6,58 +6,58 @@
@Glob = global i64 4
define i32* @test0(i32* %X, i32* %dest) nounwind {
- %Y = getelementptr i32* %X, i32 4
- %A = load i32* %Y
+ %Y = getelementptr i32, i32* %X, i32 4
+ %A = load i32, i32* %Y
store i32 %A, i32* %dest
ret i32* %Y
}
define i32* @test1(i32* %X, i32* %dest) nounwind {
- %Y = getelementptr i32* %X, i32 4
- %A = load i32* %Y
+ %Y = getelementptr i32, i32* %X, i32 4
+ %A = load i32, i32* %Y
store i32 %A, i32* %dest
ret i32* %Y
}
define i16* @test2(i16* %X, i32* %dest) nounwind {
- %Y = getelementptr i16* %X, i32 4
- %A = load i16* %Y
+ %Y = getelementptr i16, i16* %X, i32 4
+ %A = load i16, i16* %Y
%B = sext i16 %A to i32
store i32 %B, i32* %dest
ret i16* %Y
}
define i16* @test3(i16* %X, i32* %dest) nounwind {
- %Y = getelementptr i16* %X, i32 4
- %A = load i16* %Y
+ %Y = getelementptr i16, i16* %X, i32 4
+ %A = load i16, i16* %Y
%B = zext i16 %A to i32
store i32 %B, i32* %dest
ret i16* %Y
}
define i16* @test3a(i16* %X, i64* %dest) nounwind {
- %Y = getelementptr i16* %X, i32 4
- %A = load i16* %Y
+ %Y = getelementptr i16, i16* %X, i32 4
+ %A = load i16, i16* %Y
%B = sext i16 %A to i64
store i64 %B, i64* %dest
ret i16* %Y
}
define i64* @test4(i64* %X, i64* %dest) nounwind {
- %Y = getelementptr i64* %X, i32 4
- %A = load i64* %Y
+ %Y = getelementptr i64, i64* %X, i32 4
+ %A = load i64, i64* %Y
store i64 %A, i64* %dest
ret i64* %Y
}
define i16* @test5(i16* %X) nounwind {
- %Y = getelementptr i16* %X, i32 4
+ %Y = getelementptr i16, i16* %X, i32 4
store i16 7, i16* %Y
ret i16* %Y
}
define i64* @test6(i64* %X, i64 %A) nounwind {
- %Y = getelementptr i64* %X, i32 4
+ %Y = getelementptr i64, i64* %X, i32 4
store i64 %A, i64* %Y
ret i64* %Y
}
diff --git a/test/CodeGen/PowerPC/memcpy-vec.ll b/test/CodeGen/PowerPC/memcpy-vec.ll
new file mode 100644
index 000000000000..70b8ea931a27
--- /dev/null
+++ b/test/CodeGen/PowerPC/memcpy-vec.ll
@@ -0,0 +1,110 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s -check-prefix=PWR7
+; RUN: llc -mcpu=pwr8 < %s | FileCheck %s -check-prefix=PWR8
+; RUN: llc -mcpu=a2q < %s | FileCheck %s -check-prefix=A2Q
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @foo1(double* nocapture %x, double* nocapture readonly %y) #0 {
+entry:
+ %0 = bitcast double* %x to i8*
+ %1 = bitcast double* %y to i8*
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 32, i32 8, i1 false)
+ ret void
+
+; PWR7-LABEL: @foo1
+; PWR7-NOT: bl memcpy
+; PWR7: ld {{[0-9]+}}, {{[0-9]+}}(4)
+; PWR7: std {{[0-9]+}}, {{[0-9]+}}(3)
+; PWR7: blr
+
+; PWR8-LABEL: @foo1
+; PWR8: lxvw4x
+; PWR8: stxvw4x
+; PWR8: blr
+
+; A2Q-LABEL: @foo1
+; A2Q-NOT: bl memcpy
+; A2Q: ld {{[0-9]+}}, {{[0-9]+}}(4)
+; A2Q: std {{[0-9]+}}, {{[0-9]+}}(3)
+; A2Q: blr
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #0
+
+; Function Attrs: nounwind
+define void @foo2(double* nocapture %x, double* nocapture readonly %y) #0 {
+entry:
+ %0 = bitcast double* %x to i8*
+ %1 = bitcast double* %y to i8*
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 128, i32 8, i1 false)
+ ret void
+
+; PWR7-LABEL: @foo2
+; PWR7: bl memcpy
+; PWR7: blr
+
+; PWR8-LABEL: @foo2
+; PWR8: lxvw4x
+; PWR8: stxvw4x
+; PWR8: blr
+
+; A2Q-LABEL: @foo2
+; A2Q-NOT: bl memcpy
+; A2Q: ld {{[0-9]+}}, {{[0-9]+}}(4)
+; A2Q: std {{[0-9]+}}, {{[0-9]+}}(3)
+; A2Q: blr
+}
+
+; Function Attrs: nounwind
+define void @bar1(double* nocapture %x) #0 {
+entry:
+ %0 = bitcast double* %x to i8*
+ tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 128, i32 8, i1 false)
+ ret void
+
+; PWR7-LABEL: @bar1
+; PWR7-NOT: bl memset
+; PWR7: stxvw4x
+; PWR7: blr
+
+; PWR8-LABEL: @bar1
+; PWR8-NOT: bl memset
+; PWR8: stxvw4x
+; PWR8: blr
+
+; A2Q-LABEL: @bar1
+; A2Q-NOT: bl memset
+; A2Q: std {{[0-9]+}}, {{[0-9]+}}(3)
+; A2Q: blr
+}
+
+; Function Attrs: nounwind
+define void @bar2(double* nocapture %x) #0 {
+entry:
+ %0 = bitcast double* %x to i8*
+ tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 128, i32 32, i1 false)
+ ret void
+
+; PWR7-LABEL: @bar2
+; PWR7-NOT: bl memset
+; PWR7: stxvw4x
+; PWR7: blr
+
+; PWR8-LABEL: @bar2
+; PWR8-NOT: bl memset
+; PWR8: stxvw4x
+; PWR8: blr
+
+; A2Q-LABEL: @bar2
+; A2Q-NOT: bl memset
+; A2Q: qvstfdx
+; A2Q: blr
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #0
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/memset-nc-le.ll b/test/CodeGen/PowerPC/memset-nc-le.ll
new file mode 100644
index 000000000000..af8e9c3fb4fd
--- /dev/null
+++ b/test/CodeGen/PowerPC/memset-nc-le.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le"
+
+; Function Attrs: nounwind
+define void @test_vsx() unnamed_addr #0 align 2 {
+entry:
+ %0 = load i32, i32* undef, align 4
+ %1 = trunc i32 %0 to i8
+ call void @llvm.memset.p0i8.i64(i8* null, i8 %1, i64 32, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: @test_vsx
+; CHECK: stxvd2x
+; CHECK: stxvd2x
+; CHECK: blr
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1
+
+attributes #0 = { nounwind "target-cpu"="pwr8" }
+attributes #1 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/memset-nc.ll b/test/CodeGen/PowerPC/memset-nc.ll
new file mode 100644
index 000000000000..414a987a56fe
--- /dev/null
+++ b/test/CodeGen/PowerPC/memset-nc.ll
@@ -0,0 +1,48 @@
+; RUN: llc < %s | FileCheck %s
+; RUN: llc -O0 < %s | FileCheck %s -check-prefix=CHECK-O0
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-bgq-linux"
+
+; Function Attrs: nounwind
+define void @test_qpx() unnamed_addr #0 align 2 {
+entry:
+ %0 = load i32, i32* undef, align 4
+ %1 = trunc i32 %0 to i8
+ call void @llvm.memset.p0i8.i64(i8* null, i8 %1, i64 64, i32 32, i1 false)
+ ret void
+
+; CHECK-LABEL: @test_qpx
+; CHECK: qvstfdx
+; CHECK: qvstfdx
+; CHECK: blr
+
+; CHECK-O0-LABEL: @test_qpx
+; CHECK-O0-NOT: qvstfdx
+; CHECK-O0: blr
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1
+
+; Function Attrs: nounwind
+define void @test_vsx() unnamed_addr #2 align 2 {
+entry:
+ %0 = load i32, i32* undef, align 4
+ %1 = trunc i32 %0 to i8
+ call void @llvm.memset.p0i8.i64(i8* null, i8 %1, i64 32, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: @test_vsx
+; CHECK: stxvw4x
+; CHECK: stxvw4x
+; CHECK: blr
+
+; CHECK-O0-LABEL: @test_vsx
+; CHECK-O0-NOT: stxvw4x
+; CHECK-O0: blr
+}
+
+attributes #0 = { nounwind "target-cpu"="a2q" }
+attributes #1 = { nounwind }
+attributes #2 = { nounwind "target-cpu"="pwr7" }
+
diff --git a/test/CodeGen/PowerPC/misched-inorder-latency.ll b/test/CodeGen/PowerPC/misched-inorder-latency.ll
index b259ff182c0c..0f57e90abebd 100644
--- a/test/CodeGen/PowerPC/misched-inorder-latency.ll
+++ b/test/CodeGen/PowerPC/misched-inorder-latency.ll
@@ -15,13 +15,13 @@ target triple = "powerpc64-bgq-linux"
define i32 @testload(i32 *%ptr, i32 %sumin) {
entry:
%sum1 = add i32 %sumin, 1
- %val1 = load i32* %ptr
+ %val1 = load i32, i32* %ptr
%p = icmp eq i32 %sumin, 0
br i1 %p, label %true, label %end
true:
%sum2 = add i32 %sum1, 1
- %ptr2 = getelementptr i32* %ptr, i32 1
- %val = load i32* %ptr2
+ %ptr2 = getelementptr i32, i32* %ptr, i32 1
+ %val = load i32, i32* %ptr2
%val2 = add i32 %val1, %val
br label %end
end:
diff --git a/test/CodeGen/PowerPC/misched.ll b/test/CodeGen/PowerPC/misched.ll
index d6fb3b30464f..1c868b3f171c 100644
--- a/test/CodeGen/PowerPC/misched.ll
+++ b/test/CodeGen/PowerPC/misched.ll
@@ -18,7 +18,7 @@ for.body24.i: ; preds = %for.body24.i, %for.
br i1 undef, label %for.body24.i58, label %for.body24.i
for.body24.i58: ; preds = %for.body24.i58, %for.body24.i
- %arrayidx26.i55.1 = getelementptr inbounds [16000 x double]* @b, i64 0, i64 undef
+ %arrayidx26.i55.1 = getelementptr inbounds [16000 x double], [16000 x double]* @b, i64 0, i64 undef
store double 1.000000e+00, double* %arrayidx26.i55.1, align 8
br i1 undef, label %for.body24.i64, label %for.body24.i58
diff --git a/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll b/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll
index 743cc62ddba7..36aecf1806bd 100644
--- a/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll
+++ b/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll
@@ -33,10 +33,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,<r"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r<"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -48,10 +48,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,>r"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r>"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -63,7 +63,7 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,r"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
ret void
@@ -120,10 +120,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,imr"(i32 1) nounwind
@@ -137,15 +137,15 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,X"(i32 1) nounwind
store i32 %2, i32* %out0, align 4
- %3 = call i32 asm "foo $1,$0", "=r,X"(i32* getelementptr inbounds ([2 x i32]* @marray, i32 0, i32 0)) nounwind
+ %3 = call i32 asm "foo $1,$0", "=r,X"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
store i32 %3, i32* %out0, align 4
%4 = call i32 asm "foo $1,$0", "=r,X"(double 1.000000e+001) nounwind
store i32 %4, i32* %out0, align 4
@@ -158,14 +158,14 @@ define void @single_p() nounwind {
entry:
%out0 = alloca i32, align 4
store i32 0, i32* %out0, align 4
- %0 = call i32 asm "foo $1,$0", "=r,r"(i32* getelementptr inbounds ([2 x i32]* @marray, i32 0, i32 0)) nounwind
+ %0 = call i32 asm "foo $1,$0", "=r,r"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
store i32 %0, i32* %out0, align 4
ret void
}
define void @multi_m() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*m|r,m|r"(i32* @mout0, i32 %tmp) nounwind
ret void
}
@@ -190,10 +190,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|<r"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r<"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -205,10 +205,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|>r"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r>"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -220,7 +220,7 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|m"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
ret void
@@ -277,10 +277,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 1) nounwind
@@ -294,15 +294,15 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 1) nounwind
store i32 %2, i32* %out0, align 4
- %3 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32* getelementptr inbounds ([2 x i32]* @marray, i32 0, i32 0)) nounwind
+ %3 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
store i32 %3, i32* %out0, align 4
%4 = call i32 asm "foo $1,$0", "=r|r,r|X"(double 1.000000e+001) nounwind
store i32 %4, i32* %out0, align 4
@@ -315,7 +315,7 @@ define void @multi_p() nounwind {
entry:
%out0 = alloca i32, align 4
store i32 0, i32* %out0, align 4
- %0 = call i32 asm "foo $1,$0", "=r|r,r|r"(i32* getelementptr inbounds ([2 x i32]* @marray, i32 0, i32 0)) nounwind
+ %0 = call i32 asm "foo $1,$0", "=r|r,r|r"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
store i32 %0, i32* %out0, align 4
ret void
}
diff --git a/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll b/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll
index 29a57867f567..ab7e414ead55 100644
--- a/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll
+++ b/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll
@@ -33,10 +33,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,<r"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r<"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -48,10 +48,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,>r"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r>"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -63,7 +63,7 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,r"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
ret void
@@ -120,10 +120,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,imr"(i32 1) nounwind
@@ -137,15 +137,15 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,X"(i32 1) nounwind
store i32 %2, i32* %out0, align 4
- %3 = call i32 asm "foo $1,$0", "=r,X"(i32* getelementptr inbounds ([2 x i32]* @marray, i32 0, i32 0)) nounwind
+ %3 = call i32 asm "foo $1,$0", "=r,X"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
store i32 %3, i32* %out0, align 4
%4 = call i32 asm "foo $1,$0", "=r,X"(double 1.000000e+001) nounwind
store i32 %4, i32* %out0, align 4
@@ -158,14 +158,14 @@ define void @single_p() nounwind {
entry:
%out0 = alloca i32, align 4
store i32 0, i32* %out0, align 4
- %0 = call i32 asm "foo $1,$0", "=r,r"(i32* getelementptr inbounds ([2 x i32]* @marray, i32 0, i32 0)) nounwind
+ %0 = call i32 asm "foo $1,$0", "=r,r"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
store i32 %0, i32* %out0, align 4
ret void
}
define void @multi_m() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*m|r,m|r"(i32* @mout0, i32 %tmp) nounwind
ret void
}
@@ -190,10 +190,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|<r"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r<"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -205,10 +205,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|>r"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r>"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -220,7 +220,7 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|m"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
ret void
@@ -277,10 +277,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 1) nounwind
@@ -294,15 +294,15 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 1) nounwind
store i32 %2, i32* %out0, align 4
- %3 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32* getelementptr inbounds ([2 x i32]* @marray, i32 0, i32 0)) nounwind
+ %3 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
store i32 %3, i32* %out0, align 4
%4 = call i32 asm "foo $1,$0", "=r|r,r|X"(double 1.000000e+001) nounwind
store i32 %4, i32* %out0, align 4
@@ -315,7 +315,7 @@ define void @multi_p() nounwind {
entry:
%out0 = alloca i32, align 4
store i32 0, i32* %out0, align 4
- %0 = call i32 asm "foo $1,$0", "=r|r,r|r"(i32* getelementptr inbounds ([2 x i32]* @marray, i32 0, i32 0)) nounwind
+ %0 = call i32 asm "foo $1,$0", "=r|r,r|r"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
store i32 %0, i32* %out0, align 4
ret void
}
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r2-64.ll b/test/CodeGen/PowerPC/named-reg-alloc-r2-64.ll
index d4ed05b9e50a..834df8b4d91c 100644
--- a/test/CodeGen/PowerPC/named-reg-alloc-r2-64.ll
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r2-64.ll
@@ -1,15 +1,12 @@
-; RUN: not llc < %s -mtriple=powerpc64-apple-darwin 2>&1 | FileCheck %s --check-prefix=CHECK-DARWIN
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
+; RUN: not llc < %s -mtriple=powerpc64-apple-darwin 2>&1 | FileCheck %s
+; RUN: not llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
define i64 @get_reg() nounwind {
entry:
; FIXME: Include an allocatable-specific error message
-; CHECK-DARWIN: Invalid register name global variable
+; CHECK: Invalid register name global variable
%reg = call i64 @llvm.read_register.i64(metadata !0)
ret i64 %reg
-
-; CHECK-LABEL: @get_reg
-; CHECK: mr 3, 2
}
declare i64 @llvm.read_register.i64(metadata) nounwind
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r2.ll b/test/CodeGen/PowerPC/named-reg-alloc-r2.ll
index 262d034e16bd..45d9816793c4 100644
--- a/test/CodeGen/PowerPC/named-reg-alloc-r2.ll
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r2.ll
@@ -1,11 +1,11 @@
-; RUN: not llc < %s -mtriple=powerpc-apple-darwin 2>&1 | FileCheck %s --check-prefix=CHECK-DARWIN
+; RUN: not llc < %s -mtriple=powerpc-apple-darwin 2>&1 | FileCheck %s --check-prefix=CHECK-NOTPPC32
; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu 2>&1 | FileCheck %s
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
+; RUN: not llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s --check-prefix=CHECK-NOTPPC32
define i32 @get_reg() nounwind {
entry:
; FIXME: Include an allocatable-specific error message
-; CHECK-DARWIN: Invalid register name global variable
+; CHECK-NOTPPC32: Invalid register name global variable
%reg = call i32 @llvm.read_register.i32(metadata !0)
ret i32 %reg
diff --git a/test/CodeGen/PowerPC/no-extra-fp-conv-ldst.ll b/test/CodeGen/PowerPC/no-extra-fp-conv-ldst.ll
index 6beee253a2ec..26739bf958b5 100644
--- a/test/CodeGen/PowerPC/no-extra-fp-conv-ldst.ll
+++ b/test/CodeGen/PowerPC/no-extra-fp-conv-ldst.ll
@@ -5,7 +5,7 @@ target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind readonly
define double @test1(i64* nocapture readonly %x) #0 {
entry:
- %0 = load i64* %x, align 8
+ %0 = load i64, i64* %x, align 8
%conv = sitofp i64 %0 to double
ret double %conv
@@ -18,7 +18,7 @@ entry:
; Function Attrs: nounwind readonly
define double @test2(i32* nocapture readonly %x) #0 {
entry:
- %0 = load i32* %x, align 4
+ %0 = load i32, i32* %x, align 4
%conv = sitofp i32 %0 to double
ret double %conv
diff --git a/test/CodeGen/PowerPC/no-pref-jumps.ll b/test/CodeGen/PowerPC/no-pref-jumps.ll
new file mode 100644
index 000000000000..d9490f16e8f9
--- /dev/null
+++ b/test/CodeGen/PowerPC/no-pref-jumps.ll
@@ -0,0 +1,36 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @foo(i32 signext %a, i32 signext %b) #0 {
+entry:
+ %cmp = icmp sgt i32 %a, 5
+ %cmp1 = icmp slt i32 %b, 3
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.else
+
+; CHECK-LABEL: @foo
+; CHECK: cmpwi
+; CHECK: cmpwi
+; CHECK: cror
+; CHECK: blr
+
+if.then: ; preds = %entry
+ tail call void bitcast (void (...)* @bar to void ()*)() #0
+ br label %if.end
+
+if.else: ; preds = %entry
+ tail call void bitcast (void (...)* @car to void ()*)() #0
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret void
+}
+
+declare void @bar(...)
+
+declare void @car(...)
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/novrsave.ll b/test/CodeGen/PowerPC/novrsave.ll
index a70576a291e9..50be2a162ed8 100644
--- a/test/CodeGen/PowerPC/novrsave.ll
+++ b/test/CodeGen/PowerPC/novrsave.ll
@@ -7,7 +7,7 @@ define <4 x float> @bar(<4 x float> %v) nounwind {
entry:
%v.addr = alloca <4 x float>, align 16
store <4 x float> %v, <4 x float>* %v.addr, align 16
- %0 = load <4 x float>* %v.addr, align 16
+ %0 = load <4 x float>, <4 x float>* %v.addr, align 16
ret <4 x float> %0
}
diff --git a/test/CodeGen/PowerPC/optnone-crbits-i1-ret.ll b/test/CodeGen/PowerPC/optnone-crbits-i1-ret.ll
new file mode 100644
index 000000000000..745a038d6ce0
--- /dev/null
+++ b/test/CodeGen/PowerPC/optnone-crbits-i1-ret.ll
@@ -0,0 +1,37 @@
+; RUN: llc < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-bgq-linux"
+
+declare zeroext i1 @ri1()
+declare void @se1()
+declare void @se2()
+
+define void @test() #0 {
+entry:
+ %b = call zeroext i1 @ri1()
+ br label %next
+
+; CHECK-LABEL: @test
+; CHECK: bl ri1
+; CHECK-NEXT: nop
+; CHECK: andi. 3, 3, 1
+
+next:
+ br i1 %b, label %case1, label %case2
+
+case1:
+ call void @se1()
+ br label %end
+
+case2:
+ call void @se2()
+ br label %end
+
+end:
+ ret void
+
+; CHECK: blr
+}
+
+attributes #0 = { noinline optnone }
+
diff --git a/test/CodeGen/PowerPC/or-addressing-mode.ll b/test/CodeGen/PowerPC/or-addressing-mode.ll
index e50374e30696..f98b34cfdb6a 100644
--- a/test/CodeGen/PowerPC/or-addressing-mode.ll
+++ b/test/CodeGen/PowerPC/or-addressing-mode.ll
@@ -8,7 +8,7 @@ define i32 @test1(i8* %P) {
%tmp.11.i = and i32 %tmp.10.i, 2040 ; <i32> [#uses=1]
%tmp.13.i = or i32 %tmp.11.i, %tmp.4.i ; <i32> [#uses=1]
%tmp.14.i = inttoptr i32 %tmp.13.i to i32* ; <i32*> [#uses=1]
- %tmp.3 = load i32* %tmp.14.i ; <i32> [#uses=1]
+ %tmp.3 = load i32, i32* %tmp.14.i ; <i32> [#uses=1]
ret i32 %tmp.3
}
@@ -16,7 +16,7 @@ define i32 @test2(i32 %P) {
%tmp.2 = shl i32 %P, 4 ; <i32> [#uses=1]
%tmp.3 = or i32 %tmp.2, 2 ; <i32> [#uses=1]
%tmp.4 = inttoptr i32 %tmp.3 to i32* ; <i32*> [#uses=1]
- %tmp.5 = load i32* %tmp.4 ; <i32> [#uses=1]
+ %tmp.5 = load i32, i32* %tmp.4 ; <i32> [#uses=1]
ret i32 %tmp.5
}
diff --git a/test/CodeGen/PowerPC/p8-isel-sched.ll b/test/CodeGen/PowerPC/p8-isel-sched.ll
new file mode 100644
index 000000000000..034fe3c5fcbf
--- /dev/null
+++ b/test/CodeGen/PowerPC/p8-isel-sched.ll
@@ -0,0 +1,33 @@
+; RUN: llc -mcpu=pwr8 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @foo(i32* nocapture %r1, i32* nocapture %r2, i32* nocapture %r3, i32* nocapture %r4, i32 signext %a, i32 signext %b, i32 signext %c, i32 signext %d) #0 {
+entry:
+ %tobool = icmp ne i32 %a, 0
+ %cond = select i1 %tobool, i32 %b, i32 %c
+ store i32 %cond, i32* %r1, align 4
+ %cond5 = select i1 %tobool, i32 %b, i32 %d
+ store i32 %cond5, i32* %r2, align 4
+ %add = add nsw i32 %b, 1
+ %sub = add nsw i32 %d, -2
+ %cond10 = select i1 %tobool, i32 %add, i32 %sub
+ store i32 %cond10, i32* %r3, align 4
+ %add13 = add nsw i32 %b, 3
+ %sub15 = add nsw i32 %d, -5
+ %cond17 = select i1 %tobool, i32 %add13, i32 %sub15
+ store i32 %cond17, i32* %r4, align 4
+ ret void
+}
+
+; Make sure that we don't schedule all of the isels together, they should be
+; intermixed with the adds because each isel starts a new dispatch group.
+; CHECK-LABEL: @foo
+; CHECK: isel
+; CHECK: addi
+; CHECK: isel
+; CHECK: blr
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/pip-inner.ll b/test/CodeGen/PowerPC/pip-inner.ll
new file mode 100644
index 000000000000..930f0d371472
--- /dev/null
+++ b/test/CodeGen/PowerPC/pip-inner.ll
@@ -0,0 +1,52 @@
+; RUN: llc < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @foo(double* %x, double* nocapture readonly %y) #0 {
+entry:
+ br label %for.cond1.preheader
+
+for.cond1.preheader: ; preds = %for.end, %entry
+ %i.015 = phi i32 [ 0, %entry ], [ %inc7, %for.end ]
+ br label %for.body3
+
+for.body3: ; preds = %for.body3, %for.cond1.preheader
+ %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
+ %0 = load double, double* %arrayidx, align 8
+ %add = fadd double %0, 1.000000e+00
+ %arrayidx5 = getelementptr inbounds double, double* %x, i64 %indvars.iv
+ store double %add, double* %arrayidx5, align 8
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 16000
+ br i1 %exitcond, label %for.end, label %for.body3
+
+for.end: ; preds = %for.body3
+ tail call void @bar(double* %x) #2
+ %inc7 = add nuw nsw i32 %i.015, 1
+ %exitcond16 = icmp eq i32 %inc7, 1000
+ br i1 %exitcond16, label %for.end8, label %for.cond1.preheader
+
+for.end8: ; preds = %for.end
+ ret void
+
+; CHECK-LABEL: @foo
+
+; CHECK: lfdu [[REG1:[0-9]+]], 8({{[0-9]+}})
+; CHECK: fadd [[REG2:[0-9]+]], [[REG1]], {{[0-9]+}}
+; CHECK: stfdu [[REG2]], 8({{[0-9]+}})
+; CHECK: bdnz
+
+; CHECK: bl bar
+; CHECK-NEXT: nop
+
+; CHECK: blr
+}
+
+declare void @bar(double*) #1
+
+attributes #0 = { nounwind "target-cpu"="a2" }
+attributes #1 = { "target-cpu"="a2" }
+attributes #2 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/post-ra-ec.ll b/test/CodeGen/PowerPC/post-ra-ec.ll
index 9c61677650ba..e32441be90fd 100644
--- a/test/CodeGen/PowerPC/post-ra-ec.ll
+++ b/test/CodeGen/PowerPC/post-ra-ec.ll
@@ -16,12 +16,12 @@ entry:
br i1 undef, label %if.end, label %if.then
if.then: ; preds = %entry
- %0 = load i64* undef, align 8
+ %0 = load i64, i64* undef, align 8
%conv.i = trunc i64 %0 to i32
- %1 = load i32* null, align 4
+ %1 = load i32, i32* null, align 4
%add = add i32 %1, %conv.i
store i32 %add, i32* null, align 4
- %counter.i.i = getelementptr inbounds %struct.task_struct.4.16.124* %call1.i, i64 0, i32 1, i32 0
+ %counter.i.i = getelementptr inbounds %struct.task_struct.4.16.124, %struct.task_struct.4.16.124* %call1.i, i64 0, i32 1, i32 0
%2 = tail call i32 asm sideeffect "\09lwsync\0A1:\09lwarx\09$0,0,$1\09\09# atomic_dec_return\0A\09addic\09$0,$0,-1\0A\09stwcx.\09$0,0,$1\0A\09bne-\091b\0A\09sync\0A", "=&r,r,~{cr0},~{xer},~{memory}"(i32* %counter.i.i) #0
%cmp.i = icmp eq i32 %2, 0
br i1 %cmp.i, label %if.then.i, label %if.end
diff --git a/test/CodeGen/PowerPC/ppc-crbits-onoff.ll b/test/CodeGen/PowerPC/ppc-crbits-onoff.ll
new file mode 100644
index 000000000000..88648df5fa36
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc-crbits-onoff.ll
@@ -0,0 +1,43 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readnone
+define signext i32 @crbitsoff(i32 signext %v1, i32 signext %v2) #0 {
+entry:
+ %tobool = icmp ne i32 %v1, 0
+ %lnot = icmp eq i32 %v2, 0
+ %and3 = and i1 %tobool, %lnot
+ %and = zext i1 %and3 to i32
+ ret i32 %and
+
+; CHECK-LABEL: @crbitsoff
+; CHECK-DAG: cmplwi {{[0-9]+}}, 3, 0
+; CHECK-DAG: li [[REG2:[0-9]+]], 1
+; CHECK-DAG: cntlzw [[REG3:[0-9]+]],
+; CHECK: isel 3, 0, [[REG2]]
+; CHECK: and 3, 3, [[REG3]]
+; CHECK: blr
+}
+
+define signext i32 @crbitson(i32 signext %v1, i32 signext %v2) #1 {
+entry:
+ %tobool = icmp ne i32 %v1, 0
+ %lnot = icmp eq i32 %v2, 0
+ %and3 = and i1 %tobool, %lnot
+ %and = zext i1 %and3 to i32
+ ret i32 %and
+
+; CHECK-LABEL: @crbitson
+; CHECK-DAG: cmpwi {{[0-9]+}}, 3, 0
+; CHECK-DAG: cmpwi {{[0-9]+}}, 4, 0
+; CHECK-DAG: li [[REG2:[0-9]+]], 1
+; CHECK-DAG: crorc [[REG3:[0-9]+]],
+; CHECK: isel 3, 0, [[REG2]], [[REG3]]
+; CHECK: blr
+}
+
+
+attributes #0 = { nounwind readnone "target-features"="-crbits" }
+attributes #1 = { nounwind readnone }
+
diff --git a/test/CodeGen/PowerPC/ppc-empty-fs.ll b/test/CodeGen/PowerPC/ppc-empty-fs.ll
new file mode 100644
index 000000000000..8c0a2fb1dc06
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc-empty-fs.ll
@@ -0,0 +1,32 @@
+; RUN: llc < %s | FileCheck %s
+; This guarantees that we add the default set of features to the current feature
+; string. We won't successfully legalize the types here without +64bit being
+; silently added.
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+%struct.fab = type { float, float }
+
+; Function Attrs: nounwind
+define void @func_fab(%struct.fab* noalias sret %agg.result, i64 %x.coerce) #0 {
+entry:
+ %x = alloca %struct.fab, align 8
+ %0 = bitcast %struct.fab* %x to i64*
+ store i64 %x.coerce, i64* %0, align 1
+ %1 = bitcast %struct.fab* %agg.result to i8*
+ %2 = bitcast %struct.fab* %x to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 8, i32 4, i1 false)
+ ret void
+}
+
+; CHECK: func_fab
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "target-features"="" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"clang version 3.7.0 (trunk 233227) (llvm/trunk 233226)"}
diff --git a/test/CodeGen/PowerPC/ppc-prologue.ll b/test/CodeGen/PowerPC/ppc-prologue.ll
index 553837121a36..c84e6fbd4b60 100644
--- a/test/CodeGen/PowerPC/ppc-prologue.ll
+++ b/test/CodeGen/PowerPC/ppc-prologue.ll
@@ -14,12 +14,12 @@ entry:
store i32 %a, i32* %a_addr
%1 = call i32 @_Z3barPi(i32* %a_addr) ; <i32> [#uses=1]
store i32 %1, i32* %0, align 4
- %2 = load i32* %0, align 4 ; <i32> [#uses=1]
+ %2 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
store i32 %2, i32* %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
+ %retval1 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval1
}
diff --git a/test/CodeGen/PowerPC/ppc32-cyclecounter.ll b/test/CodeGen/PowerPC/ppc32-cyclecounter.ll
index 9e2cd0b12880..ea50a1bdbdc1 100644
--- a/test/CodeGen/PowerPC/ppc32-cyclecounter.ll
+++ b/test/CodeGen/PowerPC/ppc32-cyclecounter.ll
@@ -13,8 +13,8 @@ entry:
; CHECK: mfspr 3, 269
; CHECK: mfspr 4, 268
; CHECK: mfspr [[REG:[0-9]+]], 269
-; CHECK: cmpw [[CR:[0-9]+]], 3, [[REG]]
-; CHECK: bne [[CR]], .LBB
+; CHECK: cmpw 3, [[REG]]
+; CHECK: bne 0, .LBB
declare i64 @llvm.readcyclecounter()
diff --git a/test/CodeGen/PowerPC/ppc32-i1-vaarg.ll b/test/CodeGen/PowerPC/ppc32-i1-vaarg.ll
index 6e0aec27b7c1..ad8ed38da7fa 100644
--- a/test/CodeGen/PowerPC/ppc32-i1-vaarg.ll
+++ b/test/CodeGen/PowerPC/ppc32-i1-vaarg.ll
@@ -5,7 +5,7 @@ target triple = "powerpc-unknown-linux-gnu"
declare void @printf(i8*, ...)
define void @main() {
- call void (i8*, ...)* @printf(i8* undef, i1 false)
+ call void (i8*, ...) @printf(i8* undef, i1 false)
ret void
}
diff --git a/test/CodeGen/PowerPC/ppc32-lshrti3.ll b/test/CodeGen/PowerPC/ppc32-lshrti3.ll
index f773cce81be3..a2a280f5168e 100644
--- a/test/CodeGen/PowerPC/ppc32-lshrti3.ll
+++ b/test/CodeGen/PowerPC/ppc32-lshrti3.ll
@@ -9,7 +9,7 @@ target triple = "powerpc--netbsd"
; Function Attrs: nounwind uwtable
define i32 @fn1() #0 {
entry:
- %.promoted = load i72* inttoptr (i32 1 to i72*), align 4
+ %.promoted = load i72, i72* inttoptr (i32 1 to i72*), align 4
br label %while.cond
while.cond: ; preds = %while.cond, %entry
diff --git a/test/CodeGen/PowerPC/ppc32-pic-large.ll b/test/CodeGen/PowerPC/ppc32-pic-large.ll
index bb906ec78d11..6a069451a410 100644
--- a/test/CodeGen/PowerPC/ppc32-pic-large.ll
+++ b/test/CodeGen/PowerPC/ppc32-pic-large.ll
@@ -5,8 +5,8 @@ declare i32 @call_foo(i32, ...)
define i32 @foo() {
entry:
- %0 = load i32* @bar, align 4
- %call = call i32 (i32, ...)* @call_foo(i32 %0, i32 0, i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64)
+ %0 = load i32, i32* @bar, align 4
+ %call = call i32 (i32, ...) @call_foo(i32 %0, i32 0, i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64)
ret i32 %0
}
diff --git a/test/CodeGen/PowerPC/ppc32-pic.ll b/test/CodeGen/PowerPC/ppc32-pic.ll
index abc136757177..63f521cbea38 100644
--- a/test/CodeGen/PowerPC/ppc32-pic.ll
+++ b/test/CodeGen/PowerPC/ppc32-pic.ll
@@ -5,8 +5,8 @@ declare i32 @call_foo(i32, ...)
define i32 @foo() {
entry:
- %0 = load i32* @bar, align 4
- %call = call i32 (i32, ...)* @call_foo(i32 %0, i32 0, i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64)
+ %0 = load i32, i32* @bar, align 4
+ %call = call i32 (i32, ...) @call_foo(i32 %0, i32 0, i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64)
ret i32 0
}
diff --git a/test/CodeGen/PowerPC/ppc440-fp-basic.ll b/test/CodeGen/PowerPC/ppc440-fp-basic.ll
index 77b726c5ae38..95773570d33c 100644
--- a/test/CodeGen/PowerPC/ppc440-fp-basic.ll
+++ b/test/CodeGen/PowerPC/ppc440-fp-basic.ll
@@ -4,28 +4,28 @@
define void @maybe_an_fma(%0* sret %agg.result, %0* byval %a, %0* byval %b, %0* byval %c) nounwind {
entry:
- %a.realp = getelementptr inbounds %0* %a, i32 0, i32 0
- %a.real = load double* %a.realp
- %a.imagp = getelementptr inbounds %0* %a, i32 0, i32 1
- %a.imag = load double* %a.imagp
- %b.realp = getelementptr inbounds %0* %b, i32 0, i32 0
- %b.real = load double* %b.realp
- %b.imagp = getelementptr inbounds %0* %b, i32 0, i32 1
- %b.imag = load double* %b.imagp
+ %a.realp = getelementptr inbounds %0, %0* %a, i32 0, i32 0
+ %a.real = load double, double* %a.realp
+ %a.imagp = getelementptr inbounds %0, %0* %a, i32 0, i32 1
+ %a.imag = load double, double* %a.imagp
+ %b.realp = getelementptr inbounds %0, %0* %b, i32 0, i32 0
+ %b.real = load double, double* %b.realp
+ %b.imagp = getelementptr inbounds %0, %0* %b, i32 0, i32 1
+ %b.imag = load double, double* %b.imagp
%mul.rl = fmul double %a.real, %b.real
%mul.rr = fmul double %a.imag, %b.imag
%mul.r = fsub double %mul.rl, %mul.rr
%mul.il = fmul double %a.imag, %b.real
%mul.ir = fmul double %a.real, %b.imag
%mul.i = fadd double %mul.il, %mul.ir
- %c.realp = getelementptr inbounds %0* %c, i32 0, i32 0
- %c.real = load double* %c.realp
- %c.imagp = getelementptr inbounds %0* %c, i32 0, i32 1
- %c.imag = load double* %c.imagp
+ %c.realp = getelementptr inbounds %0, %0* %c, i32 0, i32 0
+ %c.real = load double, double* %c.realp
+ %c.imagp = getelementptr inbounds %0, %0* %c, i32 0, i32 1
+ %c.imag = load double, double* %c.imagp
%add.r = fadd double %mul.r, %c.real
%add.i = fadd double %mul.i, %c.imag
- %real = getelementptr inbounds %0* %agg.result, i32 0, i32 0
- %imag = getelementptr inbounds %0* %agg.result, i32 0, i32 1
+ %real = getelementptr inbounds %0, %0* %agg.result, i32 0, i32 0
+ %imag = getelementptr inbounds %0, %0* %agg.result, i32 0, i32 1
store double %add.r, double* %real
store double %add.i, double* %imag
ret void
diff --git a/test/CodeGen/PowerPC/ppc64-abi-extend.ll b/test/CodeGen/PowerPC/ppc64-abi-extend.ll
index 8baf1c613e78..f8e72e3a108e 100644
--- a/test/CodeGen/PowerPC/ppc64-abi-extend.ll
+++ b/test/CodeGen/PowerPC/ppc64-abi-extend.ll
@@ -15,7 +15,7 @@ declare zeroext i32 @ret_ui()
define void @pass_arg_si() nounwind {
entry:
- %0 = load i32* @si, align 4
+ %0 = load i32, i32* @si, align 4
tail call void @arg_si(i32 signext %0) nounwind
ret void
}
@@ -25,7 +25,7 @@ entry:
define void @pass_arg_ui() nounwind {
entry:
- %0 = load i32* @ui, align 4
+ %0 = load i32, i32* @ui, align 4
tail call void @arg_ui(i32 zeroext %0) nounwind
ret void
}
@@ -53,7 +53,7 @@ entry:
define signext i32 @pass_ret_si() nounwind readonly {
entry:
- %0 = load i32* @si, align 4
+ %0 = load i32, i32* @si, align 4
ret i32 %0
}
; CHECK: @pass_ret_si
@@ -62,7 +62,7 @@ entry:
define zeroext i32 @pass_ret_ui() nounwind readonly {
entry:
- %0 = load i32* @ui, align 4
+ %0 = load i32, i32* @ui, align 4
ret i32 %0
}
; CHECK: @pass_ret_ui
diff --git a/test/CodeGen/PowerPC/ppc64-align-long-double.ll b/test/CodeGen/PowerPC/ppc64-align-long-double.ll
index 5ed029cc9702..b9fd6707f041 100644
--- a/test/CodeGen/PowerPC/ppc64-align-long-double.ll
+++ b/test/CodeGen/PowerPC/ppc64-align-long-double.ll
@@ -13,8 +13,8 @@ target triple = "powerpc64-unknown-linux-gnu"
define ppc_fp128 @test(%struct.S* byval %x) nounwind {
entry:
- %b = getelementptr inbounds %struct.S* %x, i32 0, i32 1
- %0 = load ppc_fp128* %b, align 16
+ %b = getelementptr inbounds %struct.S, %struct.S* %x, i32 0, i32 1
+ %0 = load ppc_fp128, ppc_fp128* %b, align 16
ret ppc_fp128 %0
}
diff --git a/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll b/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll
index 479c7a7af25f..dfa6ec058b92 100644
--- a/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll
+++ b/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll
@@ -8,7 +8,7 @@ define i64 @anyreglimit(i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i6
i64 %v17, i64 %v18, i64 %v19, i64 %v20, i64 %v21, i64 %v22, i64 %v23, i64 %v24,
i64 %v25, i64 %v26, i64 %v27, i64 %v28, i64 %v29, i64 %v30, i64 %v31, i64 %v32) {
entry:
- %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 12, i32 15, i8* inttoptr (i64 0 to i8*), i32 32,
+ %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 12, i32 15, i8* inttoptr (i64 0 to i8*), i32 32,
i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7, i64 %v8,
i64 %v9, i64 %v10, i64 %v11, i64 %v12, i64 %v13, i64 %v14, i64 %v15, i64 %v16,
i64 %v17, i64 %v18, i64 %v19, i64 %v20, i64 %v21, i64 %v22, i64 %v23, i64 %v24,
diff --git a/test/CodeGen/PowerPC/ppc64-anyregcc.ll b/test/CodeGen/PowerPC/ppc64-anyregcc.ll
index 7cd3c4b38200..66f6a2c790c6 100644
--- a/test/CodeGen/PowerPC/ppc64-anyregcc.ll
+++ b/test/CodeGen/PowerPC/ppc64-anyregcc.ll
@@ -2,6 +2,31 @@
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
+; CHECK-LABEL: test:
+; CHECK: {{^}}.L[[test_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: property_access1:
+; CHECK: {{^}}.L[[property_access1_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: property_access2:
+; CHECK: {{^}}.L[[property_access2_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: property_access3:
+; CHECK: {{^}}.L[[property_access3_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: anyreg_test1:
+; CHECK: {{^}}.L[[anyreg_test1_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: anyreg_test2:
+; CHECK: {{^}}.L[[anyreg_test2_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: patchpoint_spilldef:
+; CHECK: {{^}}.L[[patchpoint_spilldef_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: patchpoint_spillargs:
+; CHECK: {{^}}.L[[patchpoint_spillargs_BEGIN:.*]]:{{$}}
+
+
; Stackmap Header: no constants - 6 callsites
; CHECK-LABEL: .section .llvm_stackmaps
; CHECK-NEXT: __LLVM_StackMaps:
@@ -26,9 +51,9 @@ target triple = "powerpc64-unknown-linux-gnu"
; CHECK-NEXT: .quad property_access3
; CHECK-NEXT: .quad 128
; CHECK-NEXT: .quad anyreg_test1
-; CHECK-NEXT: .quad 160
+; CHECK-NEXT: .quad 144
; CHECK-NEXT: .quad anyreg_test2
-; CHECK-NEXT: .quad 160
+; CHECK-NEXT: .quad 144
; CHECK-NEXT: .quad patchpoint_spilldef
; CHECK-NEXT: .quad 256
; CHECK-NEXT: .quad patchpoint_spillargs
@@ -36,7 +61,7 @@ target triple = "powerpc64-unknown-linux-gnu"
; test
-; CHECK-LABEL: .long .L{{.*}}-.L.test
+; CHECK: .long .L{{.*}}-.L[[test_BEGIN]]
; CHECK-NEXT: .short 0
; 3 locations
; CHECK-NEXT: .short 3
@@ -57,12 +82,12 @@ target triple = "powerpc64-unknown-linux-gnu"
; CHECK-NEXT: .long 3
define i64 @test() nounwind ssp uwtable {
entry:
- call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 0, i32 24, i8* null, i32 2, i32 1, i32 2, i64 3)
+ call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 0, i32 24, i8* null, i32 2, i32 1, i32 2, i64 3)
ret i64 0
}
; property access 1 - %obj is an anyreg call argument and should therefore be in a register
-; CHECK-LABEL: .long .L{{.*}}-.L.property_access1
+; CHECK: .long .L{{.*}}-.L[[property_access1_BEGIN]]
; CHECK-NEXT: .short 0
; 2 locations
; CHECK-NEXT: .short 2
@@ -79,12 +104,12 @@ entry:
define i64 @property_access1(i8* %obj) nounwind ssp uwtable {
entry:
%f = inttoptr i64 281474417671919 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 1, i32 24, i8* %f, i32 1, i8* %obj)
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 1, i32 24, i8* %f, i32 1, i8* %obj)
ret i64 %ret
}
; property access 2 - %obj is an anyreg call argument and should therefore be in a register
-; CHECK-LABEL: .long .L{{.*}}-.L.property_access2
+; CHECK: .long .L{{.*}}-.L[[property_access2_BEGIN]]
; CHECK-NEXT: .short 0
; 2 locations
; CHECK-NEXT: .short 2
@@ -102,12 +127,12 @@ define i64 @property_access2() nounwind ssp uwtable {
entry:
%obj = alloca i64, align 8
%f = inttoptr i64 281474417671919 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 2, i32 24, i8* %f, i32 1, i64* %obj)
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 24, i8* %f, i32 1, i64* %obj)
ret i64 %ret
}
; property access 3 - %obj is a frame index
-; CHECK-LABEL: .long .L{{.*}}-.L.property_access3
+; CHECK: .long .L{{.*}}-.L[[property_access3_BEGIN]]
; CHECK-NEXT: .short 0
; 2 locations
; CHECK-NEXT: .short 2
@@ -125,12 +150,12 @@ define i64 @property_access3() nounwind ssp uwtable {
entry:
%obj = alloca i64, align 8
%f = inttoptr i64 281474417671919 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 3, i32 24, i8* %f, i32 0, i64* %obj)
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 3, i32 24, i8* %f, i32 0, i64* %obj)
ret i64 %ret
}
; anyreg_test1
-; CHECK-LABEL: .long .L{{.*}}-.L.anyreg_test1
+; CHECK: .long .L{{.*}}-.L[[anyreg_test1_BEGIN]]
; CHECK-NEXT: .short 0
; 14 locations
; CHECK-NEXT: .short 14
@@ -207,12 +232,12 @@ entry:
define i64 @anyreg_test1(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) nounwind ssp uwtable {
entry:
%f = inttoptr i64 281474417671919 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 4, i32 24, i8* %f, i32 13, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 4, i32 24, i8* %f, i32 13, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
ret i64 %ret
}
; anyreg_test2
-; CHECK-LABEL: .long .L{{.*}}-.L.anyreg_test2
+; CHECK: .long .L{{.*}}-.L[[anyreg_test2_BEGIN]]
; CHECK-NEXT: .short 0
; 14 locations
; CHECK-NEXT: .short 14
@@ -289,7 +314,7 @@ entry:
define i64 @anyreg_test2(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) nounwind ssp uwtable {
entry:
%f = inttoptr i64 281474417671919 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 24, i8* %f, i32 8, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 24, i8* %f, i32 8, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
ret i64 %ret
}
@@ -297,7 +322,7 @@ entry:
;
; <rdar://problem/15432754> [JS] Assertion: "Folded a def to a non-store!"
;
-; CHECK-LABEL: .long .L{{.*}}-.L.patchpoint_spilldef
+; CHECK: .long .L{{.*}}-.L[[patchpoint_spilldef_BEGIN]]
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 3
; Loc 0: Register (some register that will be spilled to the stack)
@@ -317,7 +342,7 @@ entry:
; CHECK-NEXT: .long 0
define i64 @patchpoint_spilldef(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
entry:
- %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 12, i32 24, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2)
+ %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 12, i32 24, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2)
tail call void asm sideeffect "nop", "~{r0},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r14},~{r15},~{r16},~{r17
},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27},~{r28},~{r29},~{r30},~{r31}"() nounwind
ret i64 %result
@@ -327,7 +352,7 @@ entry:
;
; <rdar://problem/15487687> [JS] AnyRegCC argument ends up being spilled
;
-; CHECK-LABEL: .long .L{{.*}}-.L.patchpoint_spillargs
+; CHECK: .long .L{{.*}}-.L[[patchpoint_spillargs_BEGIN]]
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 5
; Loc 0: Return a register
@@ -359,7 +384,7 @@ define i64 @patchpoint_spillargs(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
entry:
tail call void asm sideeffect "nop", "~{r0},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r14},~{r15},~{r16},~{r17
},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27},~{r28},~{r29},~{r30},~{r31}"() nounwind
- %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 13, i32 24, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
+ %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 13, i32 24, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
ret i64 %result
}
diff --git a/test/CodeGen/PowerPC/ppc64-byval-align.ll b/test/CodeGen/PowerPC/ppc64-byval-align.ll
index 0e73cf2b0e05..7170f5906581 100644
--- a/test/CodeGen/PowerPC/ppc64-byval-align.ll
+++ b/test/CodeGen/PowerPC/ppc64-byval-align.ll
@@ -30,8 +30,8 @@ entry:
define i64 @callee2(%struct.pad* byval nocapture readnone %x, i32 signext %y, %struct.test* byval align 16 nocapture readonly %z) {
entry:
- %x1 = getelementptr inbounds %struct.test* %z, i64 0, i32 0
- %0 = load i64* %x1, align 16
+ %x1 = getelementptr inbounds %struct.test, %struct.test* %z, i64 0, i32 0
+ %0 = load i64, i64* %x1, align 16
ret i64 %0
}
; CHECK-LABEL: @callee2
@@ -43,14 +43,16 @@ declare i64 @test2(%struct.pad* byval, i32 signext, %struct.test* byval align 16
define void @caller2(i64 %z) {
entry:
%tmp = alloca %struct.test, align 16
- %.compoundliteral.sroa.0.0..sroa_idx = getelementptr inbounds %struct.test* %tmp, i64 0, i32 0
+ %.compoundliteral.sroa.0.0..sroa_idx = getelementptr inbounds %struct.test, %struct.test* %tmp, i64 0, i32 0
store i64 %z, i64* %.compoundliteral.sroa.0.0..sroa_idx, align 16
%call = call i64 @test2(%struct.pad* byval @gp, i32 signext 0, %struct.test* byval align 16 %tmp)
ret void
}
; CHECK-LABEL: @caller2
; CHECK: std 3, [[OFF:[0-9]+]](1)
-; CHECK: ld [[REG:[0-9]+]], [[OFF]](1)
-; CHECK: std [[REG]], 128(1)
+; CHECK: addi [[REG1:[0-9]+]], 1, [[OFF]]
+; CHECK: lxvw4x [[REG2:[0-9]+]], 0, [[REG1]]
+; CHECK: li [[REG3:[0-9]+]], 128
+; CHECK: stxvw4x 0, 1, [[REG3]]
; CHECK: bl test2
diff --git a/test/CodeGen/PowerPC/ppc64-calls.ll b/test/CodeGen/PowerPC/ppc64-calls.ll
index 707ba95235f2..23a14e6687d6 100644
--- a/test/CodeGen/PowerPC/ppc64-calls.ll
+++ b/test/CodeGen/PowerPC/ppc64-calls.ll
@@ -73,7 +73,7 @@ define double @test_external(double %x) nounwind {
@g = external global void ()*
declare void @h(i64)
define void @test_indir_toc_reload(i64 %x) {
- %1 = load void ()** @g
+ %1 = load void ()*, void ()** @g
call void %1()
call void @h(i64 %x)
ret void
diff --git a/test/CodeGen/PowerPC/ppc64-elf-abi.ll b/test/CodeGen/PowerPC/ppc64-elf-abi.ll
index d82122d58ee5..53443376e4d5 100644
--- a/test/CodeGen/PowerPC/ppc64-elf-abi.ll
+++ b/test/CodeGen/PowerPC/ppc64-elf-abi.ll
@@ -1,9 +1,9 @@
; RUN: llc -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s -check-prefix=CHECK-ELFv1
-; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mattr=+elfv1 < %s | FileCheck %s -check-prefix=CHECK-ELFv1
-; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mattr=+elfv2 < %s | FileCheck %s -check-prefix=CHECK-ELFv2
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -target-abi elfv1 < %s | FileCheck %s -check-prefix=CHECK-ELFv1
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -target-abi elfv2 < %s | FileCheck %s -check-prefix=CHECK-ELFv2
; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s -check-prefix=CHECK-ELFv2
-; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mattr=+elfv1 < %s | FileCheck %s -check-prefix=CHECK-ELFv1
-; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mattr=+elfv2 < %s | FileCheck %s -check-prefix=CHECK-ELFv2
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -target-abi elfv1 < %s | FileCheck %s -check-prefix=CHECK-ELFv1
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -target-abi elfv2 < %s | FileCheck %s -check-prefix=CHECK-ELFv2
; CHECK-ELFv2: .abiversion 2
; CHECK-ELFv1-NOT: .abiversion 2
diff --git a/test/CodeGen/PowerPC/ppc64-fastcc-fast-isel.ll b/test/CodeGen/PowerPC/ppc64-fastcc-fast-isel.ll
new file mode 100644
index 000000000000..f90519836c25
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-fastcc-fast-isel.ll
@@ -0,0 +1,56 @@
+; RUN: llc -mcpu=pwr7 -mattr=-vsx -fast-isel -fast-isel-abort=1 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define fastcc i64 @g1(i64 %g1, double %f1, i64 %g2, double %f2, i64 %g3, double %f3, i64 %g4, double %f4) #0 {
+ ret i64 %g1
+
+; CHECK-LABEL: @g1
+; CHECK-NOT: mr 3,
+; CHECK: blr
+}
+
+define fastcc i64 @g2(i64 %g1, double %f1, i64 %g2, double %f2, i64 %g3, double %f3, i64 %g4, double %f4) #0 {
+ ret i64 %g2
+
+; CHECK-LABEL: @g2
+; CHECK: mr 3, 4
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g3(i64 %g1, double %f1, i64 %g2, double %f2, i64 %g3, double %f3, i64 %g4, double %f4) #0 {
+ ret i64 %g3
+
+; CHECK-LABEL: @g3
+; CHECK: mr 3, 5
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f2(i64 %g1, double %f1, i64 %g2, double %f2, i64 %g3, double %f3, i64 %g4, double %f4) #0 {
+ ret double %f2
+
+; CHECK-LABEL: @f2
+; CHECK: fmr 1, 2
+; CHECK-NEXT: blr
+}
+
+define void @cg2(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, i64 %v, double 0.0, i64 0, double 0.0, i64 0, double 0.0)
+ ret void
+
+; CHECK-LABEL: @cg2
+; CHECK: mr 4, 3
+; CHECK: blr
+}
+
+define void @cf2(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, i64 0, double %v, i64 0, double 0.0, i64 0, double 0.0)
+ ret void
+
+; CHECK-LABEL: @cf2
+; CHECK: mr 2, 1
+; CHECK: blr
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/ppc64-fastcc.ll b/test/CodeGen/PowerPC/ppc64-fastcc.ll
new file mode 100644
index 000000000000..bb1365a3b675
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-fastcc.ll
@@ -0,0 +1,540 @@
+; RUN: llc -mcpu=pwr7 -mattr=-vsx < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define fastcc i64 @g1(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g1
+
+; CHECK-LABEL: @g1
+; CHECK-NOT: mr 3,
+; CHECK: blr
+}
+
+define fastcc i64 @g2(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g2
+
+; CHECK-LABEL: @g2
+; CHECK: mr 3, 4
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g3(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g3
+
+; CHECK-LABEL: @g3
+; CHECK: mr 3, 5
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g4(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g4
+
+; CHECK-LABEL: @g4
+; CHECK: mr 3, 6
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g5(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g5
+
+; CHECK-LABEL: @g5
+; CHECK: mr 3, 7
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g6(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g6
+
+; CHECK-LABEL: @g6
+; CHECK: mr 3, 8
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g7(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g7
+
+; CHECK-LABEL: @g7
+; CHECK: mr 3, 9
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g8(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g8
+
+; CHECK-LABEL: @g8
+; CHECK: mr 3, 10
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g9(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g9
+
+; CHECK-LABEL: @g9
+; CHECK: ld 3, 48(1)
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g10(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g10
+
+; CHECK-LABEL: @g10
+; CHECK: ld 3, 56(1)
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g11(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g11
+
+; CHECK-LABEL: @g11
+; CHECK: ld 3, 64(1)
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f1(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f1
+
+; CHECK-LABEL: @f1
+; CHECK-NOT: fmr 1,
+; CHECK: blr
+}
+
+define fastcc double @f2(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f2
+
+; CHECK-LABEL: @f2
+; CHECK: fmr 1, 2
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f3(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f3
+
+; CHECK-LABEL: @f3
+; CHECK: fmr 1, 3
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f4(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f4
+
+; CHECK-LABEL: @f4
+; CHECK: fmr 1, 4
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f5(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f5
+
+; CHECK-LABEL: @f5
+; CHECK: fmr 1, 5
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f6(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f6
+
+; CHECK-LABEL: @f6
+; CHECK: fmr 1, 6
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f7(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f7
+
+; CHECK-LABEL: @f7
+; CHECK: fmr 1, 7
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f8(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f8
+
+; CHECK-LABEL: @f8
+; CHECK: fmr 1, 8
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f9(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f9
+
+; CHECK-LABEL: @f9
+; CHECK: fmr 1, 9
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f10(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f10
+
+; CHECK-LABEL: @f10
+; CHECK: fmr 1, 10
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f11(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f11
+
+; CHECK-LABEL: @f11
+; CHECK: fmr 1, 11
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f12(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f12
+
+; CHECK-LABEL: @f12
+; CHECK: fmr 1, 12
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f13(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f13
+
+; CHECK-LABEL: @f13
+; CHECK: fmr 1, 13
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f14(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f14
+
+; CHECK-LABEL: @f14
+; CHECK: lfd 1, 120(1)
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f15(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f15
+
+; CHECK-LABEL: @f15
+; CHECK: lfd 1, 152(1)
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v1(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v1
+
+; CHECK-LABEL: @v1
+; CHECK-NOT: vor 2,
+; CHECK: blr
+}
+
+define fastcc <4 x i32> @v2(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v2
+
+; CHECK-LABEL: @v2
+; CHECK: vor 2, 3, 3
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v3(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v3
+
+; CHECK-LABEL: @v3
+; CHECK: vor 2, 4, 4
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v4(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v4
+
+; CHECK-LABEL: @v4
+; CHECK: vor 2, 5, 5
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v5(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v5
+
+; CHECK-LABEL: @v5
+; CHECK: vor 2, 6, 6
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v6(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v6
+
+; CHECK-LABEL: @v6
+; CHECK: vor 2, 7, 7
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v7(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v7
+
+; CHECK-LABEL: @v7
+; CHECK: vor 2, 8, 8
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v8(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v8
+
+; CHECK-LABEL: @v8
+; CHECK: vor 2, 9, 9
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v9(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v9
+
+; CHECK-LABEL: @v9
+; CHECK: vor 2, 10, 10
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v10(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v10
+
+; CHECK-LABEL: @v10
+; CHECK: vor 2, 11, 11
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v11(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v11
+
+; CHECK-LABEL: @v11
+; CHECK: vor 2, 12, 12
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v12(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v12
+
+; CHECK-LABEL: @v12
+; CHECK: vor 2, 13, 13
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v13(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v13
+
+; CHECK-LABEL: @v13
+; CHECK: addi [[REG1:[0-9]+]], 1, 96
+; CHECK-NEXT: lvx 2, 0, [[REG1]]
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v14(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v14
+
+; CHECK-LABEL: @v14
+; CHECK: addi [[REG1:[0-9]+]], 1, 128
+; CHECK-NEXT: lvx 2, 0, [[REG1]]
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v15(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v15
+
+; CHECK-LABEL: @v15
+; CHECK: addi [[REG1:[0-9]+]], 1, 160
+; CHECK-NEXT: lvx 2, 0, [[REG1]]
+; CHECK-NEXT: blr
+}
+
+define void @cg1(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg1
+; CHECK-NOT: {{^[ \t]*}}mr 3,
+; CHECK: blr
+}
+
+define void @cg2(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg2
+; CHECK: mr 4, 3
+; CHECK: blr
+}
+
+define void @cg3(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg3
+; CHECK: mr 5, 3
+; CHECK: blr
+}
+
+define void @cg4(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg4
+; CHECK: mr 6, 3
+; CHECK: blr
+}
+
+define void @cg5(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg5
+; CHECK: mr 7, 3
+; CHECK: blr
+}
+
+define void @cg6(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg6
+; CHECK: mr 8, 3
+; CHECK: blr
+}
+
+define void @cg7(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg7
+; CHECK: mr 9, 3
+; CHECK: blr
+}
+
+define void @cg8(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg8
+; CHECK: mr 10, 3
+; CHECK: blr
+}
+
+define void @cg9(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg9
+; CHECK: mr [[REG1:[0-9]+]], 3
+; CHECK: std [[REG1]], 48(1)
+; CHECK: blr
+}
+
+define void @cg10(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg10
+; CHECK: mr [[REG1:[0-9]+]], 3
+; CHECK: std [[REG1]], 56(1)
+; CHECK: blr
+}
+
+define void @cg11(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg11
+; CHECK: mr [[REG1:[0-9]+]], 3
+; CHECK: std [[REG1]], 64(1)
+; CHECK: blr
+}
+
+define void @cf1(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double %v, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cf1
+; CHECK-NOT: fmr 1,
+; CHECK: blr
+}
+
+define void @cf2(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double %v, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cf2
+; CHECK: fmr 2, 1
+; CHECK: blr
+}
+
+define void @cf3(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double %v, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cf3
+; CHECK: fmr 3, 1
+; CHECK: blr
+}
+
+define void @cf4(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double %v, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cf4
+; CHECK: fmr 4, 1
+; CHECK: blr
+}
+
+define void @cf5(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double %v, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cf5
+; CHECK: fmr 5, 1
+; CHECK: blr
+}
+
+define void @cf14(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double %v, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cf14
+; CHECK: stfd 1, 120(1)
+; CHECK: blr
+}
+
+define void @cf15(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double %v, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cf15
+; CHECK: stfd 1, 152(1)
+; CHECK: blr
+}
+
+define void @cv2(<4 x i32> %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> %v, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cv2
+; CHECK: vor 3, 2, 2
+; CHECK: blr
+}
+
+define void @cv3(<4 x i32> %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> %v, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cv3
+; CHECK: vor 4, 2, 2
+; CHECK: blr
+}
+
+define void @cv13(<4 x i32> %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> %v, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cv13
+; CHECK: li [[REG1:[0-9]+]], 96
+; CHECK: stvx 2, 1, [[REG1]]
+; CHECK: blr
+}
+
+define void @cv14(<4 x i32> %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> %v, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cv14
+; CHECK: li [[REG1:[0-9]+]], 128
+; CHECK: stvx 2, 1, [[REG1]]
+; CHECK: blr
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/ppc64-func-desc-hoist.ll b/test/CodeGen/PowerPC/ppc64-func-desc-hoist.ll
new file mode 100644
index 000000000000..57577f90109c
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-func-desc-hoist.ll
@@ -0,0 +1,47 @@
+; RUN: llc -mcpu=a2 < %s | FileCheck %s -check-prefix=INVFUNCDESC
+; RUN: llc -mcpu=a2 -mattr=-invariant-function-descriptors < %s | FileCheck %s -check-prefix=NONINVFUNCDESC
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @bar(void (...)* nocapture %x) #0 {
+entry:
+ %callee.knr.cast = bitcast void (...)* %x to void ()*
+ br label %for.body
+
+; INVFUNCDESC-LABEL: @bar
+; INVFUNCDESC-DAG: ld [[REG1:[0-9]+]], 8(3)
+; INVFUNCDESC-DAG: ld [[REG2:[0-9]+]], 16(3)
+; INVFUNCDESC-DAG: ld [[REG3:[0-9]+]], 0(3)
+
+; INVFUNCDESC: %for.body
+; INVFUNCDESC: std 2, 40(1)
+; INVFUNCDESC-DAG: mtctr [[REG3]]
+; INVFUNCDESC-DAG: mr 11, [[REG2]]
+; INVFUNCDESC-DAG: mr 2, [[REG1]]
+; INVFUNCDESC: bctrl
+; INVFUNCDESC-NEXT: ld 2, 40(1)
+
+; NONINVFUNCDESC-LABEL: @bar
+; NONINVFUNCDESC: %for.body
+; NONINVFUNCDESC: std 2, 40(1)
+; NONINVFUNCDESC-DAG: ld 3, 0(30)
+; NONINVFUNCDESC-DAG: ld 11, 16(30)
+; NONINVFUNCDESC-DAG: ld 2, 8(30)
+; NONINVFUNCDESC: mtctr 3
+; NONINVFUNCDESC: bctrl
+; NONINVFUNCDESC-NEXT: ld 2, 40(1)
+
+for.body: ; preds = %for.body, %entry
+ %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ tail call void %callee.knr.cast() #0
+ %inc = add nuw nsw i32 %i.02, 1
+ %exitcond = icmp eq i32 %inc, 1600000000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/ppc64-gep-opt.ll b/test/CodeGen/PowerPC/ppc64-gep-opt.ll
index 14cf9a7e8382..f238908fcaf2 100644
--- a/test/CodeGen/PowerPC/ppc64-gep-opt.ll
+++ b/test/CodeGen/PowerPC/ppc64-gep-opt.ll
@@ -14,14 +14,14 @@ target triple = "powerpc64-unknown-linux-gnu"
; Check that when two complex GEPs are used in two basic blocks, LLVM can
; elimilate the common subexpression for the second use.
define void @test_GEP_CSE([240 x %struct]* %string, i32* %adj, i32 %lib, i64 %idxprom) {
- %liberties = getelementptr [240 x %struct]* %string, i64 1, i64 %idxprom, i32 3
- %1 = load i32* %liberties, align 4
+ %liberties = getelementptr [240 x %struct], [240 x %struct]* %string, i64 1, i64 %idxprom, i32 3
+ %1 = load i32, i32* %liberties, align 4
%cmp = icmp eq i32 %1, %lib
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
- %origin = getelementptr [240 x %struct]* %string, i64 1, i64 %idxprom, i32 2
- %2 = load i32* %origin, align 4
+ %origin = getelementptr [240 x %struct], [240 x %struct]* %string, i64 1, i64 %idxprom, i32 2
+ %2 = load i32, i32* %origin, align 4
store i32 %2, i32* %adj, align 4
br label %if.end
@@ -44,11 +44,11 @@ if.end: ; preds = %if.then, %entry
; CHECK-UseAA-LABEL: @test_GEP_CSE(
; CHECK-UseAA: [[PTR0:%[a-zA-Z0-9]+]] = bitcast [240 x %struct]* %string to i8*
; CHECK-UseAA: [[IDX:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96
-; CHECK-UseAA: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8* [[PTR0]], i64 [[IDX]]
-; CHECK-UseAA: getelementptr i8* [[PTR1]], i64 23052
+; CHECK-UseAA: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8, i8* [[PTR0]], i64 [[IDX]]
+; CHECK-UseAA: getelementptr i8, i8* [[PTR1]], i64 23052
; CHECK-UseAA: bitcast
; CHECK-UseAA: if.then:
-; CHECK-UseAA: getelementptr i8* [[PTR1]], i64 23048
+; CHECK-UseAA: getelementptr i8, i8* [[PTR1]], i64 23048
; CHECK-UseAA: bitcast
%class.my = type { i32, [128 x i32], i32, [256 x %struct.pt]}
@@ -59,10 +59,10 @@ if.end: ; preds = %if.then, %entry
; calculation and code gen can generate a better addressing mode for the second
; use.
define void @test_GEP_across_BB(%class.my* %this, i64 %idx) {
- %1 = getelementptr %class.my* %this, i64 0, i32 3, i64 %idx, i32 1
- %2 = load i32* %1, align 4
- %3 = getelementptr %class.my* %this, i64 0, i32 3, i64 %idx, i32 2
- %4 = load i32* %3, align 4
+ %1 = getelementptr %class.my, %class.my* %this, i64 0, i32 3, i64 %idx, i32 1
+ %2 = load i32, i32* %1, align 4
+ %3 = getelementptr %class.my, %class.my* %this, i64 0, i32 3, i64 %idx, i32 2
+ %4 = load i32, i32* %3, align 4
%5 = icmp eq i32 %2, %4
br i1 %5, label %if.true, label %exit
@@ -90,12 +90,12 @@ exit:
; CHECK-UseAA-LABEL: test_GEP_across_BB(
; CHECK-UseAA: [[PTR0:%[a-zA-Z0-9]+]] = getelementptr
-; CHECK-UseAA: getelementptr i8* [[PTR0]], i64 528
-; CHECK-UseAA: getelementptr i8* [[PTR0]], i64 532
+; CHECK-UseAA: getelementptr i8, i8* [[PTR0]], i64 528
+; CHECK-UseAA: getelementptr i8, i8* [[PTR0]], i64 532
; CHECK-UseAA: if.true:
-; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8* [[PTR0]], i64 532
+; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8, i8* [[PTR0]], i64 532
; CHECK-UseAA: exit:
-; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8* [[PTR0]], i64 528
+; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8, i8* [[PTR0]], i64 528
%struct.S = type { float, double }
@struct_array = global [1024 x %struct.S] zeroinitializer, align 16
@@ -109,7 +109,7 @@ define double* @test-struct_1(i32 %i) {
entry:
%add = add nsw i32 %i, 5
%idxprom = sext i32 %add to i64
- %p = getelementptr [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
+ %p = getelementptr [1024 x %struct.S], [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
ret double* %p
}
; CHECK-NoAA-LABEL: @test-struct_1(
@@ -117,7 +117,7 @@ entry:
; CHECK-NoAA: add i64 %{{[a-zA-Z0-9]+}}, 88
; CHECK-UseAA-LABEL: @test-struct_1(
-; CHECK-UseAA: getelementptr i8* %{{[a-zA-Z0-9]+}}, i64 88
+; CHECK-UseAA: getelementptr i8, i8* %{{[a-zA-Z0-9]+}}, i64 88
%struct3 = type { i64, i32 }
%struct2 = type { %struct3, i32 }
@@ -131,7 +131,7 @@ entry:
define %struct2* @test-struct_2(%struct0* %ptr, i64 %idx) {
entry:
%arrayidx = add nsw i64 %idx, -2
- %ptr2 = getelementptr %struct0* %ptr, i64 0, i32 3, i64 %arrayidx, i32 1
+ %ptr2 = getelementptr %struct0, %struct0* %ptr, i64 0, i32 3, i64 %arrayidx, i32 1
ret %struct2* %ptr2
}
; CHECK-NoAA-LABEL: @test-struct_2(
@@ -139,14 +139,14 @@ entry:
; CHECK-NoAA: add i64 %{{[a-zA-Z0-9]+}}, -40
; CHECK-UseAA-LABEL: @test-struct_2(
-; CHECK-UseAA: getelementptr i8* %{{[a-zA-Z0-9]+}}, i64 -40
+; CHECK-UseAA: getelementptr i8, i8* %{{[a-zA-Z0-9]+}}, i64 -40
; Test that when a index is added from two constant, SeparateConstOffsetFromGEP
; pass does not generate incorrect result.
define void @test_const_add([3 x i32]* %in) {
%inc = add nsw i32 2, 1
%idxprom = sext i32 %inc to i64
- %arrayidx = getelementptr [3 x i32]* %in, i64 %idxprom, i64 2
+ %arrayidx = getelementptr [3 x i32], [3 x i32]* %in, i64 %idxprom, i64 2
store i32 0, i32* %arrayidx, align 4
ret void
}
diff --git a/test/CodeGen/PowerPC/ppc64-i128-abi.ll b/test/CodeGen/PowerPC/ppc64-i128-abi.ll
new file mode 100644
index 000000000000..993aec24c8fb
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-i128-abi.ll
@@ -0,0 +1,274 @@
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s -check-prefix=CHECK-LE
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s -check-prefix=CHECK-BE
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s -check-prefix=CHECK-NOVSX
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s -check-prefix=CHECK-NOVSX
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s -check-prefix=CHECK-BE-NOVSX
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s -check-prefix=CHECK-LE-NOVSX
+
+@x = common global <1 x i128> zeroinitializer, align 16
+@y = common global <1 x i128> zeroinitializer, align 16
+@a = common global i128 zeroinitializer, align 16
+@b = common global i128 zeroinitializer, align 16
+
+; VSX:
+; %a is passed in register 34
+; The value of 1 is stored in the TOC.
+; On LE, ensure the value of 1 is swapped before being used (using xxswapd).
+; VMX (no VSX):
+; %a is passed in register 2
+; The value of 1 is stored in the TOC.
+; No swaps are necessary when using P8 Vector instructions on LE
+define <1 x i128> @v1i128_increment_by_one(<1 x i128> %a) nounwind {
+ %tmp = add <1 x i128> %a, <i128 1>
+ ret <1 x i128> %tmp
+
+; FIXME: Seems a 128-bit literal is materialized by loading from the TOC. There
+; should be a better way of doing this.
+
+; CHECK-LE-LABEL: @v1i128_increment_by_one
+; CHECK-LE: lxvd2x [[VAL:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
+; CHECK-LE: xxswapd 35, [[VAL]]
+; CHECK-LE: vadduqm 2, 2, 3
+; CHECK-LE: blr
+
+; CHECK-BE-LABEL: @v1i128_increment_by_one
+; CHECK-BE: lxvd2x 35, {{[0-9]+}}, {{[0-9]+}}
+; CHECK-BE-NOT: xxswapd
+; CHECK-BE: vadduqm 2, 2, 3
+; CHECK-BE-NOT: xxswapd 34, {{[0-9]+}}
+; CHECK-BE: blr
+
+; CHECK-NOVSX-LABEL: @v1i128_increment_by_one
+; CHECK-NOVSX-NOT: xxswapd {{[0-9]+}}, {{[0-9]+}}
+; CHECK-NOVSX-NOT: stxvd2x {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK-NOVSX: lvx [[VAL:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
+; CHECK-NOVSX-NOT: lxvd2x {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK-NOVSX-NOT: xxswapd {{[0-9]+}}, {{[0-9]+}}
+; CHECK-NOVSX: vadduqm 2, 2, [[VAL]]
+; CHECK-NOVSX: blr
+}
+
+; VSX:
+; %a is passed in register 34
+; %b is passed in register 35
+; No swaps are necessary when using P8 Vector instructions on LE
+; VMX (no VSX):
+; %a is passewd in register 2
+; %b is passed in register 3
+; On LE, do not need to swap contents of 2 and 3 because the lvx/stvx
+; instructions no not swap elements
+define <1 x i128> @v1i128_increment_by_val(<1 x i128> %a, <1 x i128> %b) nounwind {
+ %tmp = add <1 x i128> %a, %b
+ ret <1 x i128> %tmp
+
+; CHECK-LE-LABEL: @v1i128_increment_by_val
+; CHECK-LE-NOT: xxswapd
+; CHECK-LE: adduqm 2, 2, 3
+; CHECK-LE: blr
+
+; CHECK-BE-LABEL: @v1i128_increment_by_val
+; CHECK-BE-NOT: xxswapd {{[0-9]+}}, 34
+; CHECK-BE-NOT: xxswapd {{[0-9]+}}, 35
+; CHECK-BE-NOT: xxswapd 34, [[RESULT]]
+; CHECK-BE: adduqm 2, 2, 3
+; CHECK-BE: blr
+
+; CHECK-NOVSX-LABEL: @v1i128_increment_by_val
+; CHECK-NOVSX-NOT: xxswapd 34, [[RESULT]]
+; CHECK-NOVSX: adduqm 2, 2, 3
+; CHECK-NOVSX: blr
+}
+
+; Little Endian (VSX and VMX):
+; Lower 64-bits of %a are passed in register 3
+; Upper 64-bits of %a are passed in register 4
+; Increment lower 64-bits using addic (immediate value of 1)
+; Increment upper 64-bits using add zero extended
+; Results are placed in registers 3 and 4
+; Big Endian (VSX and VMX)
+; Lower 64-bits of %a are passed in register 4
+; Upper 64-bits of %a are passed in register 3
+; Increment lower 64-bits using addic (immediate value of 1)
+; Increment upper 64-bits using add zero extended
+; Results are placed in registers 3 and 4
+define i128 @i128_increment_by_one(i128 %a) nounwind {
+ %tmp = add i128 %a, 1
+ ret i128 %tmp
+; CHECK-LE-LABEL: @i128_increment_by_one
+; CHECK-LE: addic 3, 3, 1
+; CHECK-LE-NEXT: addze 4, 4
+; CHECK-LE: blr
+
+; CHECK-BE-LABEL: @i128_increment_by_one
+; CHECK-BE: addic 4, 4, 1
+; CHECK-BE-NEXT: addze 3, 3
+; CHECK-BE: blr
+
+; CHECK-LE-NOVSX-LABEL: @i128_increment_by_one
+; CHECK-LE-NOVSX: addic 3, 3, 1
+; CHECK-LE-NOVSX-NEXT: addze 4, 4
+; CHECK-LE-NOVSX: blr
+
+; CHECK-BE-NOVSX-LABEL: @i128_increment_by_one
+; CHECK-BE-NOVSX: addic 4, 4, 1
+; CHECK-BE-NOVSX-NEXT: addze 3, 3
+; CHECK-BE-NOVSX: blr
+}
+
+; Little Endian (VSX and VMX):
+; Lower 64-bits of %a are passed in register 3
+; Upper 64-bits of %a are passed in register 4
+; Lower 64-bits of %b are passed in register 5
+; Upper 64-bits of %b are passed in register 6
+; Add the lower 64-bits using addc on registers 3 and 5
+; Add the upper 64-bits using adde on registers 4 and 6
+; Registers 3 and 4 should hold the result
+; Big Endian (VSX and VMX):
+; Upper 64-bits of %a are passed in register 3
+; Lower 64-bits of %a are passed in register 4
+; Upper 64-bits of %b are passed in register 5
+; Lower 64-bits of %b are passed in register 6
+; Add the lower 64-bits using addc on registers 4 and 6
+; Add the upper 64-bits using adde on registers 3 and 5
+; Registers 3 and 4 should hold the result
+define i128 @i128_increment_by_val(i128 %a, i128 %b) nounwind {
+ %tmp = add i128 %a, %b
+ ret i128 %tmp
+; CHECK-LE-LABEL: @i128_increment_by_val
+; CHECK-LE: addc 3, 3, 5
+; CHECK-LE-NEXT: adde 4, 4, 6
+; CHECK-LE: blr
+
+; CHECK-BE-LABEL: @i128_increment_by_val
+; CHECK-BE: addc 4, 4, 6
+; CHECK-BE-NEXT: adde 3, 3, 5
+; CHECK-BE: blr
+
+; CHECK-LE-NOVSX-LABEL: @i128_increment_by_val
+; CHECK-LE-NOVSX: addc 3, 3, 5
+; CHECK-LE-NOVSX-NEXT: adde 4, 4, 6
+; CHECK-LE-NOVSX: blr
+
+; CHECK-BE-NOVSX-LABEL: @i128_increment_by_val
+; CHECK-BE-NOVSX: addc 4, 4, 6
+; CHECK-BE-NOVSX-NEXT: adde 3, 3, 5
+; CHECK-BE-NOVSX: blr
+}
+
+
+; Callsites for the routines defined above.
+; Ensure the parameters are loaded in the same order that is expected by the
+; callee. See comments for individual functions above for details on registers
+; used for parameters.
+define <1 x i128> @call_v1i128_increment_by_one() nounwind {
+ %tmp = load <1 x i128>, <1 x i128>* @x, align 16
+ %ret = call <1 x i128> @v1i128_increment_by_one(<1 x i128> %tmp)
+ ret <1 x i128> %ret
+
+; CHECK-LE-LABEL: @call_v1i128_increment_by_one
+; CHECK-LE: lxvd2x [[PARAM:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
+; CHECK-LE: xxswapd 34, [[PARAM]]
+; CHECK-LE: bl v1i128_increment_by_one
+; CHECK-LE: blr
+
+; CHECK-BE-LABEL: @call_v1i128_increment_by_one
+; CHECK-BE: lxvw4x 34, {{[0-9]+}}, {{[0-9]+}}
+; CHECK-BE-NOT: xxswapd 34, {{[0-9]+}}
+; CHECK-BE: bl v1i128_increment_by_one
+; CHECK-BE: blr
+
+; CHECK-NOVSX-LABEL: @call_v1i128_increment_by_one
+; CHECK-NOVSX: lvx 2, {{[0-9]+}}, {{[0-9]+}}
+; CHECK-NOVSX-NOT: xxswapd {{[0-9]+}}, {{[0-9]+}}
+; CHECK-NOVSX: bl v1i128_increment_by_one
+; CHECK-NOVSX: blr
+}
+
+define <1 x i128> @call_v1i128_increment_by_val() nounwind {
+ %tmp = load <1 x i128>, <1 x i128>* @x, align 16
+ %tmp2 = load <1 x i128>, <1 x i128>* @y, align 16
+ %ret = call <1 x i128> @v1i128_increment_by_val(<1 x i128> %tmp, <1 x i128> %tmp2)
+ ret <1 x i128> %ret
+
+; CHECK-LE-LABEL: @call_v1i128_increment_by_val
+; CHECK-LE: lxvd2x [[PARAM1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
+; CHECK-LE: lxvd2x [[PARAM2:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
+; CHECK-LE-DAG: xxswapd 34, [[PARAM1]]
+; CHECK-LE-DAG: xxswapd 35, [[PARAM2]]
+; CHECK-LE: bl v1i128_increment_by_val
+; CHECK-LE: blr
+
+; CHECK-BE-LABEL: @call_v1i128_increment_by_val
+
+
+; CHECK-BE-DAG: lxvw4x 35, {{[0-9]+}}, {{[0-9]+}}
+; CHECK-BE-NOT: xxswapd 34, {{[0-9]+}}
+; CHECK-BE-NOT: xxswapd 35, {{[0-9]+}}
+; CHECK-BE: bl v1i128_increment_by_val
+; CHECK-BE: blr
+
+; CHECK-NOVSX-LABEL: @call_v1i128_increment_by_val
+; CHECK-NOVSX-DAG: lvx 2, {{[0-9]+}}, {{[0-9]+}}
+; CHECK-NOVSX-DAG: lvx 3, {{[0-9]+}}, {{[0-9]+}}
+; CHECK-NOVSX-NOT: xxswapd 34, {{[0-9]+}}
+; CHECK-NOVSX-NOT: xxswapd 35, {{[0-9]+}}
+; CHECK-NOVSX: bl v1i128_increment_by_val
+; CHECK-NOVSX: blr
+
+}
+
+define i128 @call_i128_increment_by_one() nounwind {
+ %tmp = load i128, i128* @a, align 16
+ %ret = call i128 @i128_increment_by_one(i128 %tmp)
+ ret i128 %ret
+; %ret4 = call i128 @i128_increment_by_val(i128 %tmp2, i128 %tmp2)
+; CHECK-LE-LABEL: @call_i128_increment_by_one
+; CHECK-LE-DAG: ld 3, 0([[BASEREG:[0-9]+]])
+; CHECK-LE-DAG: ld 4, 8([[BASEREG]])
+; CHECK-LE: bl i128_increment_by_one
+; CHECK-LE: blr
+
+; CHECK-BE-LABEL: @call_i128_increment_by_one
+; CHECK-BE-DAG: ld 3, 0([[BASEREG:[0-9]+]])
+; CHECK-BE-DAG: ld 4, 8([[BASEREG]])
+; CHECK-BE: bl i128_increment_by_one
+; CHECK-BE: blr
+
+; CHECK-NOVSX-LABEL: @call_i128_increment_by_one
+; CHECK-NOVSX-DAG: ld 3, 0([[BASEREG:[0-9]+]])
+; CHECK-NOVSX-DAG: ld 4, 8([[BASEREG]])
+; CHECK-NOVSX: bl i128_increment_by_one
+; CHECK-NOVSX: blr
+}
+
+define i128 @call_i128_increment_by_val() nounwind {
+ %tmp = load i128, i128* @a, align 16
+ %tmp2 = load i128, i128* @b, align 16
+ %ret = call i128 @i128_increment_by_val(i128 %tmp, i128 %tmp2)
+ ret i128 %ret
+; CHECK-LE-LABEL: @call_i128_increment_by_val
+; CHECK-LE-DAG: ld 3, 0([[P1BASEREG:[0-9]+]])
+; CHECK-LE-DAG: ld 4, 8([[P1BASEREG]])
+; CHECK-LE-DAG: ld 5, 0([[P2BASEREG:[0-9]+]])
+; CHECK-LE-DAG: ld 6, 8([[P2BASEREG]])
+; CHECK-LE: bl i128_increment_by_val
+; CHECK-LE: blr
+
+; CHECK-BE-LABEL: @call_i128_increment_by_val
+; CHECK-BE-DAG: ld 3, 0([[P1BASEREG:[0-9]+]])
+; CHECK-BE-DAG: ld 4, 8([[P1BASEREG]])
+; CHECK-BE-DAG: ld 5, 0([[P2BASEREG:[0-9]+]])
+; CHECK-BE-DAG: ld 6, 8([[P2BASEREG]])
+; CHECK-BE: bl i128_increment_by_val
+; CHECK-BE: blr
+
+; CHECK-NOVSX-LABEL: @call_i128_increment_by_val
+; CHECK-NOVSX-DAG: ld 3, 0([[P1BASEREG:[0-9]+]])
+; CHECK-NOVSX-DAG: ld 4, 8([[P1BASEREG]])
+; CHECK-NOVSX-DAG: ld 5, 0([[P2BASEREG:[0-9]+]])
+; CHECK-NOVSX-DAG: ld 6, 8([[P2BASEREG]])
+; CHECK-NOVSX: bl i128_increment_by_val
+; CHECK-NOVSX: blr
+}
+
+
diff --git a/test/CodeGen/PowerPC/ppc64-icbt-pwr7.ll b/test/CodeGen/PowerPC/ppc64-icbt-pwr7.ll
new file mode 100644
index 000000000000..e8617ccfc8a5
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-icbt-pwr7.ll
@@ -0,0 +1,19 @@
+; Test the ICBT instruction is not emitted on POWER7
+; Based on the ppc64-prefetch.ll test
+; RUN: not llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s 2>&1 | FileCheck %s
+
+declare void @llvm.prefetch(i8*, i32, i32, i32)
+
+define void @test(i8* %a, ...) nounwind {
+entry:
+ call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 0)
+ ret void
+
+; FIXME: Crashing is not really the correct behavior here, we really should just emit nothing
+; CHECK: Cannot select: 0x{{[0-9,a-f]+}}: ch = Prefetch
+; CHECK: 0x{{[0-9,a-f]+}}: i32 = Constant<0>
+; CHECK-NEXT: 0x{{[0-9,a-f]+}}: i32 = Constant<3>
+; CHECK-NEXT: 0x{{[0-9,a-f]+}}: i32 = Constant<0>
+
+}
+
diff --git a/test/CodeGen/PowerPC/ppc64-icbt-pwr8.ll b/test/CodeGen/PowerPC/ppc64-icbt-pwr8.ll
new file mode 100644
index 000000000000..a0f084a6bf96
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-icbt-pwr8.ll
@@ -0,0 +1,16 @@
+; Test the ICBT instruction on POWER8
+; Copied from the ppc64-prefetch.ll test
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+
+declare void @llvm.prefetch(i8*, i32, i32, i32)
+
+define void @test(i8* %a, ...) nounwind {
+entry:
+ call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 0)
+ ret void
+
+; CHECK-LABEL: @test
+; CHECK: icbt
+}
+
+
diff --git a/test/CodeGen/PowerPC/ppc64-linux-func-size.ll b/test/CodeGen/PowerPC/ppc64-linux-func-size.ll
index e1d50bac51a2..fb017bc224ba 100644
--- a/test/CodeGen/PowerPC/ppc64-linux-func-size.ll
+++ b/test/CodeGen/PowerPC/ppc64-linux-func-size.ll
@@ -3,11 +3,11 @@
; CHECK: .section .opd,"aw",@progbits
; CHECK-NEXT: test1:
; CHECK-NEXT: .align 3
-; CHECK-NEXT: .quad .L.test1
+; CHECK-NEXT: .quad .L[[BEGIN:.*]]
; CHECK-NEXT: .quad .TOC.@tocbase
; CHECK-NEXT: .quad 0
; CHECK-NEXT: .text
-; CHECK-NEXT: .L.test1:
+; CHECK-NEXT: .L[[BEGIN]]:
define i32 @test1(i32 %a) nounwind {
entry:
@@ -19,4 +19,4 @@ entry:
; however, using this directive with recent binutils will result in the error:
; .size expression for XXX does not evaluate to a constant
; so we must use the label which actually tags the start of the function.
-; CHECK: .size test1, .Ltmp0-.L.test1
+; CHECK: .size test1, .Lfunc_end0-.L[[BEGIN]]
diff --git a/test/CodeGen/PowerPC/ppc64-nonfunc-calls.ll b/test/CodeGen/PowerPC/ppc64-nonfunc-calls.ll
index 9f56f0102b7c..b1d3f39e2f89 100644
--- a/test/CodeGen/PowerPC/ppc64-nonfunc-calls.ll
+++ b/test/CodeGen/PowerPC/ppc64-nonfunc-calls.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=ppc64 < %s | FileCheck %s
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
diff --git a/test/CodeGen/PowerPC/ppc64-patchpoint.ll b/test/CodeGen/PowerPC/ppc64-patchpoint.ll
index 5e58fdab2168..67b26268a3a3 100644
--- a/test/CodeGen/PowerPC/ppc64-patchpoint.ll
+++ b/test/CodeGen/PowerPC/ppc64-patchpoint.ll
@@ -1,6 +1,8 @@
-; RUN: llc < %s | FileCheck %s
-; RUN: llc -fast-isel -fast-isel-abort < %s | FileCheck %s
-target datalayout = "E-m:e-i64:64-n32:64"
+; RUN: llc < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
+; RUN: llc -fast-isel -fast-isel-abort=1 < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -fast-isel -fast-isel-abort=1 < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
+
target triple = "powerpc64-unknown-linux-gnu"
; Trivial patchpoint codegen
@@ -9,26 +11,26 @@ define i64 @trivial_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
entry:
; CHECK-LABEL: trivial_patchpoint_codegen:
-; CHECK: li 11, -8531
-; CHECK-NEXT: rldic 11, 11, 32, 16
-; CHECK-NEXT: oris 11, 11, 48879
-; CHECK-NEXT: ori 11, 11, 51966
-; CHECK-NEXT: mtctr 11
+; CHECK: li 12, -8531
+; CHECK-NEXT: rldic 12, 12, 32, 16
+; CHECK-NEXT: oris 12, 12, 48879
+; CHECK-NEXT: ori 12, 12, 51966
+; CHECK-NEXT: mtctr 12
; CHECK-NEXT: bctrl
-; CHECK: li 11, -8531
-; CHECK-NEXT: rldic 11, 11, 32, 16
-; CHECK-NEXT: oris 11, 11, 48879
-; CHECK-NEXT: ori 11, 11, 51967
-; CHECK-NEXT: mtctr 11
+; CHECK: li 12, -8531
+; CHECK-NEXT: rldic 12, 12, 32, 16
+; CHECK-NEXT: oris 12, 12, 48879
+; CHECK-NEXT: ori 12, 12, 51967
+; CHECK-NEXT: mtctr 12
; CHECK-NEXT: bctrl
; CHECK: blr
%resolveCall2 = inttoptr i64 244837814094590 to i8*
- %result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 2, i32 24, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
+ %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 24, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
%resolveCall3 = inttoptr i64 244837814094591 to i8*
- tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 3, i32 24, i8* %resolveCall3, i32 2, i64 %p1, i64 %result)
+ tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 3, i32 24, i8* %resolveCall3, i32 2, i64 %p1, i64 %result)
ret i64 %result
}
@@ -36,9 +38,11 @@ entry:
; as a leaf function.
;
; CHECK-LABEL: caller_meta_leaf
-; CHECK: stdu 1, -80(1)
+; CHECK-BE: stdu 1, -80(1)
+; CHECK-LE: stdu 1, -64(1)
; CHECK: Ltmp
-; CHECK: addi 1, 1, 80
+; CHECK-BE: addi 1, 1, 80
+; CHECK-LE: addi 1, 1, 64
; CHECK: blr
define void @caller_meta_leaf() {
@@ -47,7 +51,7 @@ entry:
store i64 11, i64* %metadata
store i64 12, i64* %metadata
store i64 13, i64* %metadata
- call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
+ call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
ret void
}
@@ -59,15 +63,15 @@ define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, i64* %tmp33, i64
entry:
%tmp80 = add i64 %tmp79, -16
%tmp81 = inttoptr i64 %tmp80 to i64*
- %tmp82 = load i64* %tmp81, align 8
- tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 14, i32 8, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82)
- tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 15, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
- %tmp83 = load i64* %tmp33, align 8
+ %tmp82 = load i64, i64* %tmp81, align 8
+ tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 14, i32 8, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82)
+ tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 15, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
+ %tmp83 = load i64, i64* %tmp33, align 8
%tmp84 = add i64 %tmp83, -24
%tmp85 = inttoptr i64 %tmp84 to i64*
- %tmp86 = load i64* %tmp85, align 8
- tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 17, i32 8, i64 %arg, i64 %tmp10, i64 %tmp86)
- tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 18, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
+ %tmp86 = load i64, i64* %tmp85, align 8
+ tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 17, i32 8, i64 %arg, i64 %tmp10, i64 %tmp86)
+ tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 18, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
ret i64 10
}
@@ -83,7 +87,7 @@ entry:
; CHECK-NEXT: nop
; CHECK-NOT: nop
; CHECK: blr
- %result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* null, i32 2, i64 %p1, i64 %p2)
+ %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* null, i32 2, i64 %p1, i64 %p2)
ret void
}
diff --git a/test/CodeGen/PowerPC/ppc64-r2-alloc.ll b/test/CodeGen/PowerPC/ppc64-r2-alloc.ll
new file mode 100644
index 000000000000..87292d821294
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-r2-alloc.ll
@@ -0,0 +1,81 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define signext i32 @foo(i32 signext %a, i32 signext %d) #0 {
+entry:
+ %div = sdiv i32 %a, %d
+ %div1 = sdiv i32 %div, %d
+ %div2 = sdiv i32 %div1, %d
+ %div3 = sdiv i32 %div2, %d
+ %div4 = sdiv i32 %div3, %d
+ %div5 = sdiv i32 %div4, %d
+ %div6 = sdiv i32 %div5, %d
+ %div7 = sdiv i32 %div6, %d
+ %div8 = sdiv i32 %div7, %d
+ %div9 = sdiv i32 %div8, %d
+ %div10 = sdiv i32 %div9, %d
+ %div11 = sdiv i32 %div10, %d
+ %div12 = sdiv i32 %div11, %d
+ %div13 = sdiv i32 %div12, %d
+ %div14 = sdiv i32 %div13, %d
+ %div15 = sdiv i32 %div14, %d
+ %div16 = sdiv i32 %div15, %d
+ %div17 = sdiv i32 %div16, %d
+ %div18 = sdiv i32 %div17, %d
+ %div19 = sdiv i32 %div18, %d
+ %div20 = sdiv i32 %div19, %d
+ %div21 = sdiv i32 %div20, %d
+ %div22 = sdiv i32 %div21, %d
+ %div23 = sdiv i32 %div22, %d
+ %div24 = sdiv i32 %div23, %d
+ %div25 = sdiv i32 %div24, %d
+ %div26 = sdiv i32 %div25, %d
+ %div27 = sdiv i32 %div26, %d
+ %div28 = sdiv i32 %div27, %d
+ %div29 = sdiv i32 %div28, %d
+ %div30 = sdiv i32 %div29, %d
+ %div31 = sdiv i32 %div30, %d
+ %div32 = sdiv i32 %div31, %d
+ %div33 = sdiv i32 %div32, %div31
+ %div34 = sdiv i32 %div33, %div30
+ %div35 = sdiv i32 %div34, %div29
+ %div36 = sdiv i32 %div35, %div28
+ %div37 = sdiv i32 %div36, %div27
+ %div38 = sdiv i32 %div37, %div26
+ %div39 = sdiv i32 %div38, %div25
+ %div40 = sdiv i32 %div39, %div24
+ %div41 = sdiv i32 %div40, %div23
+ %div42 = sdiv i32 %div41, %div22
+ %div43 = sdiv i32 %div42, %div21
+ %div44 = sdiv i32 %div43, %div20
+ %div45 = sdiv i32 %div44, %div19
+ %div46 = sdiv i32 %div45, %div18
+ %div47 = sdiv i32 %div46, %div17
+ %div48 = sdiv i32 %div47, %div16
+ %div49 = sdiv i32 %div48, %div15
+ %div50 = sdiv i32 %div49, %div14
+ %div51 = sdiv i32 %div50, %div13
+ %div52 = sdiv i32 %div51, %div12
+ %div53 = sdiv i32 %div52, %div11
+ %div54 = sdiv i32 %div53, %div10
+ %div55 = sdiv i32 %div54, %div9
+ %div56 = sdiv i32 %div55, %div8
+ %div57 = sdiv i32 %div56, %div7
+ %div58 = sdiv i32 %div57, %div6
+ %div59 = sdiv i32 %div58, %div5
+ %div60 = sdiv i32 %div59, %div4
+ %div61 = sdiv i32 %div60, %div3
+ %div62 = sdiv i32 %div61, %div2
+ %div63 = sdiv i32 %div62, %div1
+ %div64 = sdiv i32 %div63, %div
+ ret i32 %div64
+}
+
+; This function will need to use all non-reserved GPRs (and then some), make
+; sure that r2 is among them.
+; CHECK-LABEL: @foo
+; CHECK: std 2,
+; CHECK: ld 2,
+; CHECK: blr
+
diff --git a/test/CodeGen/PowerPC/ppc64-smallarg.ll b/test/CodeGen/PowerPC/ppc64-smallarg.ll
index 0d5b078e217a..27aca1077cd6 100644
--- a/test/CodeGen/PowerPC/ppc64-smallarg.ll
+++ b/test/CodeGen/PowerPC/ppc64-smallarg.ll
@@ -17,7 +17,7 @@ define void @callee1(%struct.small_arg* noalias nocapture sret %agg.result, %str
entry:
%0 = bitcast %struct.small_arg* %x to i32*
%1 = bitcast %struct.small_arg* %agg.result to i32*
- %2 = load i32* %0, align 2
+ %2 = load i32, i32* %0, align 2
store i32 %2, i32* %1, align 2
ret void
}
@@ -47,7 +47,7 @@ entry:
define void @caller2() {
entry:
- %0 = load float* @gf, align 4
+ %0 = load float, float* @gf, align 4
%call = tail call float @test2(float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float %0)
ret void
}
diff --git a/test/CodeGen/PowerPC/ppc64-stackmap-nops.ll b/test/CodeGen/PowerPC/ppc64-stackmap-nops.ll
index 368ddc5c8335..19d65b983b0a 100644
--- a/test/CodeGen/PowerPC/ppc64-stackmap-nops.ll
+++ b/test/CodeGen/PowerPC/ppc64-stackmap-nops.ll
@@ -16,7 +16,7 @@ entry:
; CHECK: mtlr [[REG1]]
; CHECK: blr
- tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 0, i32 32)
+ tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 0, i32 32)
ret void
}
diff --git a/test/CodeGen/PowerPC/ppc64-stackmap.ll b/test/CodeGen/PowerPC/ppc64-stackmap.ll
index 9be8d0c8ad44..917fa7422512 100644
--- a/test/CodeGen/PowerPC/ppc64-stackmap.ll
+++ b/test/CodeGen/PowerPC/ppc64-stackmap.ll
@@ -7,6 +7,40 @@
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
+; CHECK-LABEL: constantargs:
+; CHECK: {{^}}.L[[constantargs_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: osrinline:
+; CHECK: {{^}}.L[[osrinline_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: osrcold:
+; CHECK: {{^}}.L[[osrcold_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: propertyRead:
+; CHECK: {{^}}.L[[propertyRead_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: propertyWrite:
+; CHECK: {{^}}.L[[propertyWrite_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: jsVoidCall:
+; CHECK: {{^}}.L[[jsVoidCall_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: jsIntCall:
+; CHECK: {{^}}.L[[jsIntCall_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: spilledValue:
+; CHECK: {{^}}.L[[spilledValue_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: spilledStackMapValue:
+; CHECK: {{^}}.L[[spilledStackMapValue_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: liveConstant:
+; CHECK: {{^}}.L[[liveConstant_BEGIN:.*]]:{{$}}
+
+; CHECK-LABEL: clobberLR:
+; CHECK: {{^}}.L[[clobberLR_BEGIN:.*]]:{{$}}
+
+
; CHECK-LABEL: .section .llvm_stackmaps
; CHECK-NEXT: __LLVM_StackMaps:
; Header
@@ -36,7 +70,7 @@ target triple = "powerpc64-unknown-linux-gnu"
; CHECK-NEXT: .quad jsIntCall
; CHECK-NEXT: .quad 128
; CHECK-NEXT: .quad spilledValue
-; CHECK-NEXT: .quad 320
+; CHECK-NEXT: .quad 304
; CHECK-NEXT: .quad spilledStackMapValue
; CHECK-NEXT: .quad 224
; CHECK-NEXT: .quad liveConstant
@@ -51,7 +85,7 @@ target triple = "powerpc64-unknown-linux-gnu"
; Constant arguments
;
; CHECK-NEXT: .quad 1
-; CHECK-NEXT: .long .L{{.*}}-.L.constantargs
+; CHECK-NEXT: .long .L{{.*}}-.L[[constantargs_BEGIN]]
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 4
; SmallConstant
@@ -78,13 +112,13 @@ target triple = "powerpc64-unknown-linux-gnu"
define void @constantargs() {
entry:
%0 = inttoptr i64 244837814094590 to i8*
- tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 1, i32 24, i8* %0, i32 0, i64 65535, i64 65536, i64 4294967295, i64 4294967296)
+ tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 24, i8* %0, i32 0, i64 65535, i64 65536, i64 4294967295, i64 4294967296)
ret void
}
; Inline OSR Exit
;
-; CHECK-LABEL: .long .L{{.*}}-.L.osrinline
+; CHECK: .long .L{{.*}}-.L[[osrinline_BEGIN]]
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 2
; CHECK-NEXT: .byte 1
@@ -100,7 +134,7 @@ entry:
; Runtime void->void call.
call void inttoptr (i64 244837814094590 to void ()*)()
; Followed by inline OSR patchpoint with 12-byte shadow and 2 live vars.
- call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 3, i32 12, i64 %a, i64 %b)
+ call void (i64, i32, ...) @llvm.experimental.stackmap(i64 3, i32 12, i64 %a, i64 %b)
ret void
}
@@ -108,7 +142,7 @@ entry:
;
; 2 live variables in register.
;
-; CHECK-LABEL: .long .L{{.*}}-.L.osrcold
+; CHECK: .long .L{{.*}}-.L[[osrcold_BEGIN]]
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 2
; CHECK-NEXT: .byte 1
@@ -126,14 +160,14 @@ entry:
cold:
; OSR patchpoint with 12-byte nop-slide and 2 live vars.
%thunk = inttoptr i64 244837814094590 to i8*
- call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 4, i32 24, i8* %thunk, i32 0, i64 %a, i64 %b)
+ call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4, i32 24, i8* %thunk, i32 0, i64 %a, i64 %b)
unreachable
ret:
ret void
}
; Property Read
-; CHECK-LABEL: .long .L{{.*}}-.L.propertyRead
+; CHECK: .long .L{{.*}}-.L[[propertyRead_BEGIN]]
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 0
;
@@ -142,13 +176,13 @@ ret:
define i64 @propertyRead(i64* %obj) {
entry:
%resolveRead = inttoptr i64 244837814094590 to i8*
- %result = call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 24, i8* %resolveRead, i32 1, i64* %obj)
+ %result = call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 24, i8* %resolveRead, i32 1, i64* %obj)
%add = add i64 %result, 3
ret i64 %add
}
; Property Write
-; CHECK-LABEL: .long .L{{.*}}-.L.propertyWrite
+; CHECK: .long .L{{.*}}-.L[[propertyWrite_BEGIN]]
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 2
; CHECK-NEXT: .byte 1
@@ -162,7 +196,7 @@ entry:
define void @propertyWrite(i64 %dummy1, i64* %obj, i64 %dummy2, i64 %a) {
entry:
%resolveWrite = inttoptr i64 244837814094590 to i8*
- call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 6, i32 24, i8* %resolveWrite, i32 2, i64* %obj, i64 %a)
+ call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 24, i8* %resolveWrite, i32 2, i64* %obj, i64 %a)
ret void
}
@@ -170,7 +204,7 @@ entry:
;
; 2 live variables in registers.
;
-; CHECK-LABEL: .long .L{{.*}}-.L.jsVoidCall
+; CHECK: .long .L{{.*}}-.L[[jsVoidCall_BEGIN]]
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 2
; CHECK-NEXT: .byte 1
@@ -184,7 +218,7 @@ entry:
define void @jsVoidCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
entry:
%resolveCall = inttoptr i64 244837814094590 to i8*
- call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 7, i32 24, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 7, i32 24, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
ret void
}
@@ -192,7 +226,7 @@ entry:
;
; 2 live variables in registers.
;
-; CHECK-LABEL: .long .L{{.*}}-.L.jsIntCall
+; CHECK: .long .L{{.*}}-.L[[jsIntCall_BEGIN]]
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 2
; CHECK-NEXT: .byte 1
@@ -206,7 +240,7 @@ entry:
define i64 @jsIntCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
entry:
%resolveCall = inttoptr i64 244837814094590 to i8*
- %result = call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 8, i32 24, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ %result = call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 8, i32 24, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
%add = add i64 %result, 3
ret i64 %add
}
@@ -215,7 +249,7 @@ entry:
;
; Verify 28 stack map entries.
;
-; CHECK-LABEL: .long .L{{.*}}-.L.spilledValue
+; CHECK: .long .L{{.*}}-.L[[spilledValue_BEGIN]]
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 28
;
@@ -226,7 +260,7 @@ entry:
; CHECK-NEXT: .short 31
define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27) {
entry:
- call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 11, i32 24, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27)
+ call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 24, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27)
ret void
}
@@ -234,7 +268,7 @@ entry:
;
; Verify 30 stack map entries.
;
-; CHECK-LABEL: .long .L{{.*}}-.L.spilledStackMapValue
+; CHECK: .long .L{{.*}}-.L[[spilledStackMapValue_BEGIN]]
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 30
;
@@ -245,14 +279,14 @@ entry:
; CHECK-NEXT: .short 31
define webkit_jscc void @spilledStackMapValue(i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27, i64 %l28, i64 %l29) {
entry:
- call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 12, i32 16, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27, i64 %l28, i64 %l29)
+ call void (i64, i32, ...) @llvm.experimental.stackmap(i64 12, i32 16, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27, i64 %l28, i64 %l29)
ret void
}
; Map a constant value.
;
-; CHECK-LABEL: .long .L{{.*}}-.L.liveConstant
+; CHECK: .long .L{{.*}}-.L[[liveConstant_BEGIN]]
; CHECK-NEXT: .short 0
; 1 location
; CHECK-NEXT: .short 1
@@ -263,13 +297,13 @@ entry:
; CHECK-NEXT: .long 33
define void @liveConstant() {
- tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 15, i32 8, i32 33)
+ tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 15, i32 8, i32 33)
ret void
}
; Map a value when LR is the only free register.
;
-; CHECK-LABEL: .long .L{{.*}}-.L.clobberLR
+; CHECK: .long .L{{.*}}-.L[[clobberLR_BEGIN]]
; CHECK-NEXT: .short 0
; 1 location
; CHECK-NEXT: .short 1
@@ -280,7 +314,7 @@ define void @liveConstant() {
; CHECK-NEXT: .long {{[0-9]+}}
define void @clobberLR(i32 %a) {
tail call void asm sideeffect "nop", "~{r0},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r14},~{r15},~{r16},~{r17},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27},~{r28},~{r29},~{r30},~{r31}"() nounwind
- tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 16, i32 8, i32 %a)
+ tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 16, i32 8, i32 %a)
ret void
}
diff --git a/test/CodeGen/PowerPC/ppc64-toc.ll b/test/CodeGen/PowerPC/ppc64-toc.ll
index f349919b7e99..7500ed606636 100644
--- a/test/CodeGen/PowerPC/ppc64-toc.ll
+++ b/test/CodeGen/PowerPC/ppc64-toc.ll
@@ -10,11 +10,12 @@ define i64 @access_int64(i64 %a) nounwind readonly {
entry:
; CHECK-LABEL: access_int64:
; CHECK-NEXT: .align 3
-; CHECK-NEXT: .quad .L.access_int64
+; CHECK-NEXT: .quad .L[[BEGIN:.*]]
; CHECK-NEXT: .quad .TOC.@tocbase
; CHECK-NEXT: .quad 0
; CHECK-NEXT: .text
- %0 = load i64* @number64, align 8
+; CHECK-NEXT: .L[[BEGIN]]:
+ %0 = load i64, i64* @number64, align 8
; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2)
%cmp = icmp eq i64 %0, %a
%conv1 = zext i1 %cmp to i64
@@ -25,7 +26,7 @@ define i64 @internal_static_var(i64 %a) nounwind {
entry:
; CHECK-LABEL: internal_static_var:
; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2)
- %0 = load i64* @internal_static_var.x, align 8
+ %0 = load i64, i64* @internal_static_var.x, align 8
%cmp = icmp eq i64 %0, %a
%conv1 = zext i1 %cmp to i64
ret i64 %conv1
@@ -45,8 +46,8 @@ define i32 @access_double_array(double %a, i32 %i) nounwind readonly {
entry:
; CHECK-LABEL: access_double_array:
%idxprom = sext i32 %i to i64
- %arrayidx = getelementptr inbounds [32 x double]* @double_array, i64 0, i64 %idxprom
- %0 = load double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds [32 x double], [32 x double]* @double_array, i64 0, i64 %idxprom
+ %0 = load double, double* %arrayidx, align 8
; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2)
%cmp = fcmp oeq double %0, %a
%conv = zext i1 %cmp to i32
diff --git a/test/CodeGen/PowerPC/ppc64-zext.ll b/test/CodeGen/PowerPC/ppc64-zext.ll
index eb55445cc6c9..bbd4856babde 100644
--- a/test/CodeGen/PowerPC/ppc64-zext.ll
+++ b/test/CodeGen/PowerPC/ppc64-zext.ll
@@ -4,7 +4,7 @@ target triple = "powerpc64-unknown-linux"
define i64 @fun(i32 %arg32) nounwind {
entry:
-; CHECK: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 32
+; CHECK: clrldi {{[0-9]+}}, {{[0-9]+}}, 32
%o = zext i32 %arg32 to i64
ret i64 %o
}
diff --git a/test/CodeGen/PowerPC/ppc64le-aggregates.ll b/test/CodeGen/PowerPC/ppc64le-aggregates.ll
index 4edd8d59e526..a4bec759050b 100644
--- a/test/CodeGen/PowerPC/ppc64le-aggregates.ll
+++ b/test/CodeGen/PowerPC/ppc64le-aggregates.ll
@@ -1,8 +1,11 @@
; RUN: llc < %s -march=ppc64le -mcpu=pwr8 -mattr=+altivec -mattr=-vsx | FileCheck %s
+; RUN: llc < %s -march=ppc64le -mattr=+altivec -mattr=-vsx | FileCheck %s
; Currently VSX support is disabled for this test because we generate lxsdx
; instead of lfd, and stxsdx instead of stfd. That is a poor choice when we
; have reg+imm addressing, and is on the list of things to be fixed.
+; The second run step is to ensure that -march=ppc64le is adequate to select
+; the same feature set as with -mcpu=pwr8 since that is the baseline for ppc64le.
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
@@ -254,33 +257,33 @@ entry:
define void @caller2() {
entry:
- %0 = load [8 x float]* getelementptr inbounds (%struct.float8* @g8, i64 0, i32 0), align 4
- %1 = load [5 x float]* getelementptr inbounds (%struct.float5* @g5, i64 0, i32 0), align 4
- %2 = load [2 x float]* getelementptr inbounds (%struct.float2* @g2, i64 0, i32 0), align 4
+ %0 = load [8 x float], [8 x float]* getelementptr inbounds (%struct.float8, %struct.float8* @g8, i64 0, i32 0), align 4
+ %1 = load [5 x float], [5 x float]* getelementptr inbounds (%struct.float5, %struct.float5* @g5, i64 0, i32 0), align 4
+ %2 = load [2 x float], [2 x float]* getelementptr inbounds (%struct.float2, %struct.float2* @g2, i64 0, i32 0), align 4
tail call void @test2([8 x float] %0, [5 x float] %1, [2 x float] %2)
ret void
}
; CHECK-LABEL: @caller2
-; CHECK: ld [[REG:[0-9]+]], .LC
-; CHECK-DAG: lfs 1, 0([[REG]])
-; CHECK-DAG: lfs 2, 4([[REG]])
-; CHECK-DAG: lfs 3, 8([[REG]])
-; CHECK-DAG: lfs 4, 12([[REG]])
-; CHECK-DAG: lfs 5, 16([[REG]])
-; CHECK-DAG: lfs 6, 20([[REG]])
-; CHECK-DAG: lfs 7, 24([[REG]])
-; CHECK-DAG: lfs 8, 28([[REG]])
-; CHECK: ld [[REG:[0-9]+]], .LC
-; CHECK-DAG: lfs 9, 0([[REG]])
-; CHECK-DAG: lfs 10, 4([[REG]])
-; CHECK-DAG: lfs 11, 8([[REG]])
-; CHECK-DAG: lfs 12, 12([[REG]])
-; CHECK-DAG: lfs 13, 16([[REG]])
-; CHECK: ld [[REG:[0-9]+]], .LC
-; CHECK-DAG: lwz [[REG0:[0-9]+]], 0([[REG]])
-; CHECK-DAG: lwz [[REG1:[0-9]+]], 4([[REG]])
-; CHECK-DAG: sldi [[REG1]], [[REG1]], 32
-; CHECK-DAG: or 10, [[REG0]], [[REG1]]
+; CHECK: ld {{[0-9]+}}, .LC
+; CHECK-DAG: lfs 1, 0({{[0-9]+}})
+; CHECK-DAG: lfs 2, 4({{[0-9]+}})
+; CHECK-DAG: lfs 3, 8({{[0-9]+}})
+; CHECK-DAG: lfs 4, 12({{[0-9]+}})
+; CHECK-DAG: lfs 5, 16({{[0-9]+}})
+; CHECK-DAG: lfs 6, 20({{[0-9]+}})
+; CHECK-DAG: lfs 7, 24({{[0-9]+}})
+; CHECK-DAG: lfs 8, 28({{[0-9]+}})
+
+; CHECK-DAG: lfs 9, 0({{[0-9]+}})
+; CHECK-DAG: lfs 10, 4({{[0-9]+}})
+; CHECK-DAG: lfs 11, 8({{[0-9]+}})
+; CHECK-DAG: lfs 12, 12({{[0-9]+}})
+; CHECK-DAG: lfs 13, 16({{[0-9]+}})
+
+; CHECK-DAG: lwz [[REG0:[0-9]+]], 0({{[0-9]+}})
+; CHECK-DAG: lwz [[REG1:[0-9]+]], 4({{[0-9]+}})
+; CHECK-DAG: sldi [[REG2:[0-9]+]], [[REG1]], 32
+; CHECK-DAG: or 10, [[REG0]], [[REG2]]
; CHECK: bl test2
declare void @test2([8 x float], [5 x float], [2 x float])
@@ -296,8 +299,8 @@ entry:
define void @caller3(double %d) {
entry:
- %0 = load [8 x float]* getelementptr inbounds (%struct.float8* @g8, i64 0, i32 0), align 4
- %1 = load [5 x float]* getelementptr inbounds (%struct.float5* @g5, i64 0, i32 0), align 4
+ %0 = load [8 x float], [8 x float]* getelementptr inbounds (%struct.float8, %struct.float8* @g8, i64 0, i32 0), align 4
+ %1 = load [5 x float], [5 x float]* getelementptr inbounds (%struct.float5, %struct.float5* @g5, i64 0, i32 0), align 4
tail call void @test3([8 x float] %0, [5 x float] %1, double %d)
ret void
}
@@ -319,8 +322,8 @@ entry:
define void @caller4(float %f) {
entry:
- %0 = load [8 x float]* getelementptr inbounds (%struct.float8* @g8, i64 0, i32 0), align 4
- %1 = load [5 x float]* getelementptr inbounds (%struct.float5* @g5, i64 0, i32 0), align 4
+ %0 = load [8 x float], [8 x float]* getelementptr inbounds (%struct.float8, %struct.float8* @g8, i64 0, i32 0), align 4
+ %1 = load [5 x float], [5 x float]* getelementptr inbounds (%struct.float5, %struct.float5* @g5, i64 0, i32 0), align 4
tail call void @test4([8 x float] %0, [5 x float] %1, float %f)
ret void
}
diff --git a/test/CodeGen/PowerPC/ppc64le-calls.ll b/test/CodeGen/PowerPC/ppc64le-calls.ll
index 0d667dde96b4..b65b9549b6b1 100644
--- a/test/CodeGen/PowerPC/ppc64le-calls.ll
+++ b/test/CodeGen/PowerPC/ppc64le-calls.ll
@@ -1,4 +1,8 @@
; RUN: llc -march=ppc64le -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -march=ppc64le < %s | FileCheck %s
+
+; The second run of the test case is to ensure the behaviour is the same
+; without specifying -mcpu=pwr8 as that is now the baseline for ppc64le.
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
diff --git a/test/CodeGen/PowerPC/ppc64le-localentry.ll b/test/CodeGen/PowerPC/ppc64le-localentry.ll
index 4676ce8eadc6..be64f1151769 100644
--- a/test/CodeGen/PowerPC/ppc64le-localentry.ll
+++ b/test/CodeGen/PowerPC/ppc64le-localentry.ll
@@ -1,5 +1,10 @@
; RUN: llc -march=ppc64le -mcpu=pwr8 < %s | FileCheck %s
; RUN: llc -march=ppc64le -mcpu=pwr8 -O0 < %s | FileCheck %s
+; RUN: llc -march=ppc64le < %s | FileCheck %s
+; RUN: llc -march=ppc64le -O0 < %s | FileCheck %s
+
+; The second run of the test case is to ensure the behaviour is the same
+; without specifying -mcpu=pwr8 as that is now the baseline for ppc64le.
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
@@ -11,13 +16,14 @@ target triple = "powerpc64le-unknown-linux-gnu"
define i64 @use_toc(i64 %a) nounwind {
entry:
; CHECK-LABEL: @use_toc
+; CHECK-NEXT: .L{{.*}}:
; CHECK-NEXT: .Ltmp[[TMP1:[0-9]+]]:
; CHECK-NEXT: addis 2, 12, .TOC.-.Ltmp[[TMP1]]@ha
; CHECK-NEXT: addi 2, 2, .TOC.-.Ltmp[[TMP1]]@l
; CHECK-NEXT: .Ltmp[[TMP2:[0-9]+]]:
; CHECK-NEXT: .localentry use_toc, .Ltmp[[TMP2]]-.Ltmp[[TMP1]]
; CHECK-NEXT: %entry
- %0 = load i64* @number64, align 8
+ %0 = load i64, i64* @number64, align 8
%cmp = icmp eq i64 %0, %a
%conv1 = zext i1 %cmp to i64
ret i64 %conv1
@@ -27,6 +33,7 @@ declare void @callee()
define void @use_toc_implicit() nounwind {
entry:
; CHECK-LABEL: @use_toc_implicit
+; CHECK-NEXT: .L{{.*}}:
; CHECK-NEXT: .Ltmp[[TMP1:[0-9]+]]:
; CHECK-NEXT: addis 2, 12, .TOC.-.Ltmp[[TMP1]]@ha
; CHECK-NEXT: addi 2, 2, .TOC.-.Ltmp[[TMP1]]@l
@@ -40,6 +47,7 @@ entry:
define i64 @no_toc(i64 %a) nounwind {
entry:
; CHECK-LABEL: @no_toc
+; CHECK-NEXT: .L{{.*}}:
; CHECK-NEXT: %entry
ret i64 %a
}
diff --git a/test/CodeGen/PowerPC/ppc64le-smallarg.ll b/test/CodeGen/PowerPC/ppc64le-smallarg.ll
index 120c14039f99..070a617ffe4f 100644
--- a/test/CodeGen/PowerPC/ppc64le-smallarg.ll
+++ b/test/CodeGen/PowerPC/ppc64le-smallarg.ll
@@ -17,7 +17,7 @@ define void @callee1(%struct.small_arg* noalias nocapture sret %agg.result, %str
entry:
%0 = bitcast %struct.small_arg* %x to i32*
%1 = bitcast %struct.small_arg* %agg.result to i32*
- %2 = load i32* %0, align 2
+ %2 = load i32, i32* %0, align 2
store i32 %2, i32* %1, align 2
ret void
}
@@ -42,17 +42,19 @@ entry:
ret float %x
}
; CHECK: @callee2
-; CHECK: lfs {{[0-9]+}}, 136(1)
+; CHECK: addi [[TOCREG:[0-9]+]], 1, 136
+; CHECK: lxsspx {{[0-9]+}}, {{[0-9]+}}, [[TOCREG]]
; CHECK: blr
define void @caller2() {
entry:
- %0 = load float* @gf, align 4
+ %0 = load float, float* @gf, align 4
%call = tail call float @test2(float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float %0)
ret void
}
; CHECK: @caller2
-; CHECK: stfs {{[0-9]+}}, 136(1)
+; CHECK: li [[TOCOFF:[0-9]+]], 136
+; CHECK: stxsspx {{[0-9]+}}, 1, [[TOCOFF]]
; CHECK: bl test2
declare float @test2(float, float, float, float, float, float, float, float, float, float, float, float, float, float)
diff --git a/test/CodeGen/PowerPC/ppcf128-1.ll b/test/CodeGen/PowerPC/ppcf128-1.ll
index 2cec934c66fd..f0e58f61a867 100644
--- a/test/CodeGen/PowerPC/ppcf128-1.ll
+++ b/test/CodeGen/PowerPC/ppcf128-1.ll
@@ -12,16 +12,16 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store ppc_fp128 %x, ppc_fp128* %x_addr
store ppc_fp128 %y, ppc_fp128* %y_addr
- %tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
%tmp3 = fadd ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
- %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
br label %return
return: ; preds = %entry
- %retval5 = load ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
+ %retval5 = load ppc_fp128, ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
ret ppc_fp128 %retval5
}
@@ -34,16 +34,16 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store ppc_fp128 %x, ppc_fp128* %x_addr
store ppc_fp128 %y, ppc_fp128* %y_addr
- %tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
%tmp3 = fsub ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
- %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
br label %return
return: ; preds = %entry
- %retval5 = load ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
+ %retval5 = load ppc_fp128, ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
ret ppc_fp128 %retval5
}
@@ -56,16 +56,16 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store ppc_fp128 %x, ppc_fp128* %x_addr
store ppc_fp128 %y, ppc_fp128* %y_addr
- %tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
%tmp3 = fmul ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
- %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
br label %return
return: ; preds = %entry
- %retval5 = load ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
+ %retval5 = load ppc_fp128, ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
ret ppc_fp128 %retval5
}
@@ -78,15 +78,15 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store ppc_fp128 %x, ppc_fp128* %x_addr
store ppc_fp128 %y, ppc_fp128* %y_addr
- %tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
%tmp3 = fdiv ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
- %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
br label %return
return: ; preds = %entry
- %retval5 = load ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
+ %retval5 = load ppc_fp128, ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
ret ppc_fp128 %retval5
}
diff --git a/test/CodeGen/PowerPC/ppcf128-3.ll b/test/CodeGen/PowerPC/ppcf128-3.ll
index 5043b622584b..fe3b4188d11c 100644
--- a/test/CodeGen/PowerPC/ppcf128-3.ll
+++ b/test/CodeGen/PowerPC/ppcf128-3.ll
@@ -4,28 +4,28 @@
define i32 @stp_sequence_set_short_data(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
entry:
%tmp1112 = sitofp i16 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
- %tmp13 = call i32 (...)* @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
+ %tmp13 = call i32 (...) @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
ret i32 0
}
define i32 @stp_sequence_set_short_data2(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
entry:
%tmp1112 = sitofp i8 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
- %tmp13 = call i32 (...)* @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
+ %tmp13 = call i32 (...) @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
ret i32 0
}
define i32 @stp_sequence_set_short_data3(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
entry:
%tmp1112 = uitofp i16 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
- %tmp13 = call i32 (...)* @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
+ %tmp13 = call i32 (...) @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
ret i32 0
}
define i32 @stp_sequence_set_short_data4(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
entry:
%tmp1112 = uitofp i8 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
- %tmp13 = call i32 (...)* @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
+ %tmp13 = call i32 (...) @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
ret i32 0
}
diff --git a/test/CodeGen/PowerPC/ppcf128-endian.ll b/test/CodeGen/PowerPC/ppcf128-endian.ll
index 180fedf5c9f4..ee314c1db58b 100644
--- a/test/CodeGen/PowerPC/ppcf128-endian.ll
+++ b/test/CodeGen/PowerPC/ppcf128-endian.ll
@@ -9,7 +9,7 @@ define void @callee(ppc_fp128 %x) {
entry:
%x.addr = alloca ppc_fp128, align 16
store ppc_fp128 %x, ppc_fp128* %x.addr, align 16
- %0 = load ppc_fp128* %x.addr, align 16
+ %0 = load ppc_fp128, ppc_fp128* %x.addr, align 16
store ppc_fp128 %0, ppc_fp128* @g, align 16
ret void
}
@@ -21,7 +21,7 @@ entry:
define void @caller() {
entry:
- %0 = load ppc_fp128* @g, align 16
+ %0 = load ppc_fp128, ppc_fp128* @g, align 16
call void @test(ppc_fp128 %0)
ret void
}
@@ -51,7 +51,7 @@ entry:
define ppc_fp128 @result() {
entry:
- %0 = load ppc_fp128* @g, align 16
+ %0 = load ppc_fp128, ppc_fp128* @g, align 16
ret ppc_fp128 %0
}
; CHECK: @result
diff --git a/test/CodeGen/PowerPC/pr13891.ll b/test/CodeGen/PowerPC/pr13891.ll
index 4be65dd43d6a..5b695eb9f0df 100644
--- a/test/CodeGen/PowerPC/pr13891.ll
+++ b/test/CodeGen/PowerPC/pr13891.ll
@@ -10,7 +10,7 @@ define void @_Z5check3foos(%struct.foo* nocapture byval %f, i16 signext %i) noin
; CHECK: lha {{[0-9]+}}, {{[0-9]+}}(1)
entry:
%0 = bitcast %struct.foo* %f to i16*
- %1 = load i16* %0, align 2
+ %1 = load i16, i16* %0, align 2
%bf.val.sext = ashr i16 %1, 8
%cmp = icmp eq i16 %bf.val.sext, %i
br i1 %cmp, label %if.end, label %if.then
diff --git a/test/CodeGen/PowerPC/pr15031.ll b/test/CodeGen/PowerPC/pr15031.ll
index e58ad80e139b..d1b9932ca22b 100644
--- a/test/CodeGen/PowerPC/pr15031.ll
+++ b/test/CodeGen/PowerPC/pr15031.ll
@@ -298,50 +298,50 @@ declare zeroext i32 @_ZNK4llvm14MCRegisterInfo9getSubRegEjj(%"class.llvm::MCRegi
define void @_ZN4llvm14MachineOperand12substPhysRegEjRKNS_18TargetRegisterInfoE(%"class.llvm::MachineOperand"* %this, i32 zeroext %Reg, %"class.llvm::TargetRegisterInfo"* %TRI) align 2 {
entry:
- %SubReg_TargetFlags.i = getelementptr inbounds %"class.llvm::MachineOperand"* %this, i64 0, i32 1
+ %SubReg_TargetFlags.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %this, i64 0, i32 1
%0 = bitcast [3 x i8]* %SubReg_TargetFlags.i to i24*
- %bf.load.i = load i24* %0, align 1
+ %bf.load.i = load i24, i24* %0, align 1
%bf.lshr.i = lshr i24 %bf.load.i, 12
%tobool = icmp eq i24 %bf.lshr.i, 0
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
%bf.cast.i = zext i24 %bf.lshr.i to i32
- %add.ptr = getelementptr inbounds %"class.llvm::TargetRegisterInfo"* %TRI, i64 0, i32 1
+ %add.ptr = getelementptr inbounds %"class.llvm::TargetRegisterInfo", %"class.llvm::TargetRegisterInfo"* %TRI, i64 0, i32 1
%call3 = tail call zeroext i32 @_ZNK4llvm14MCRegisterInfo9getSubRegEjj(%"class.llvm::MCRegisterInfo"* %add.ptr, i32 zeroext %Reg, i32 zeroext %bf.cast.i)
- %bf.load.i10 = load i24* %0, align 1
+ %bf.load.i10 = load i24, i24* %0, align 1
%bf.clear.i = and i24 %bf.load.i10, 4095
store i24 %bf.clear.i, i24* %0, align 1
br label %if.end
if.end: ; preds = %entry, %if.then
%Reg.addr.0 = phi i32 [ %call3, %if.then ], [ %Reg, %entry ]
- %RegNo.i.i = getelementptr inbounds %"class.llvm::MachineOperand"* %this, i64 0, i32 2, i32 0
- %1 = load i32* %RegNo.i.i, align 4
+ %RegNo.i.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %this, i64 0, i32 2, i32 0
+ %1 = load i32, i32* %RegNo.i.i, align 4
%cmp.i = icmp eq i32 %1, %Reg.addr.0
br i1 %cmp.i, label %_ZN4llvm14MachineOperand6setRegEj.exit, label %if.end.i
if.end.i: ; preds = %if.end
- %ParentMI.i.i = getelementptr inbounds %"class.llvm::MachineOperand"* %this, i64 0, i32 3
- %2 = load %"class.llvm::MachineInstr"** %ParentMI.i.i, align 8
+ %ParentMI.i.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %this, i64 0, i32 3
+ %2 = load %"class.llvm::MachineInstr"*, %"class.llvm::MachineInstr"** %ParentMI.i.i, align 8
%tobool.i = icmp eq %"class.llvm::MachineInstr"* %2, null
br i1 %tobool.i, label %if.end13.i, label %if.then3.i
if.then3.i: ; preds = %if.end.i
- %Parent.i.i = getelementptr inbounds %"class.llvm::MachineInstr"* %2, i64 0, i32 2
- %3 = load %"class.llvm::MachineBasicBlock"** %Parent.i.i, align 8
+ %Parent.i.i = getelementptr inbounds %"class.llvm::MachineInstr", %"class.llvm::MachineInstr"* %2, i64 0, i32 2
+ %3 = load %"class.llvm::MachineBasicBlock"*, %"class.llvm::MachineBasicBlock"** %Parent.i.i, align 8
%tobool5.i = icmp eq %"class.llvm::MachineBasicBlock"* %3, null
br i1 %tobool5.i, label %if.end13.i, label %if.then6.i
if.then6.i: ; preds = %if.then3.i
- %xParent.i.i = getelementptr inbounds %"class.llvm::MachineBasicBlock"* %3, i64 0, i32 4
- %4 = load %"class.llvm::MachineFunction"** %xParent.i.i, align 8
+ %xParent.i.i = getelementptr inbounds %"class.llvm::MachineBasicBlock", %"class.llvm::MachineBasicBlock"* %3, i64 0, i32 4
+ %4 = load %"class.llvm::MachineFunction"*, %"class.llvm::MachineFunction"** %xParent.i.i, align 8
%tobool8.i = icmp eq %"class.llvm::MachineFunction"* %4, null
br i1 %tobool8.i, label %if.end13.i, label %if.then9.i
if.then9.i: ; preds = %if.then6.i
- %RegInfo.i.i = getelementptr inbounds %"class.llvm::MachineFunction"* %4, i64 0, i32 5
- %5 = load %"class.llvm::MachineRegisterInfo"** %RegInfo.i.i, align 8
+ %RegInfo.i.i = getelementptr inbounds %"class.llvm::MachineFunction", %"class.llvm::MachineFunction"* %4, i64 0, i32 5
+ %5 = load %"class.llvm::MachineRegisterInfo"*, %"class.llvm::MachineRegisterInfo"** %RegInfo.i.i, align 8
tail call void @_ZN4llvm19MachineRegisterInfo27removeRegOperandFromUseListEPNS_14MachineOperandE(%"class.llvm::MachineRegisterInfo"* %5, %"class.llvm::MachineOperand"* %this)
store i32 %Reg.addr.0, i32* %RegNo.i.i, align 4
tail call void @_ZN4llvm19MachineRegisterInfo22addRegOperandToUseListEPNS_14MachineOperandE(%"class.llvm::MachineRegisterInfo"* %5, %"class.llvm::MachineOperand"* %this)
diff --git a/test/CodeGen/PowerPC/pr15630.ll b/test/CodeGen/PowerPC/pr15630.ll
index 3c1b604f0090..54a1b36868e9 100644
--- a/test/CodeGen/PowerPC/pr15630.ll
+++ b/test/CodeGen/PowerPC/pr15630.ll
@@ -8,7 +8,7 @@ entry:
%newval = alloca i8
%ordering = alloca i32, align 4
store i8 %newval_arg, i8* %newval
- %tmp = load i8* %newval
+ %tmp = load i8, i8* %newval
store atomic volatile i8 %tmp, i8* %val_arg seq_cst, align 1
ret void
}
diff --git a/test/CodeGen/PowerPC/pr16556-2.ll b/test/CodeGen/PowerPC/pr16556-2.ll
index e2dae4573c72..9155ed5926f5 100644
--- a/test/CodeGen/PowerPC/pr16556-2.ll
+++ b/test/CodeGen/PowerPC/pr16556-2.ll
@@ -11,7 +11,7 @@ target triple = "powerpc-unknown-linux-gnu"
@_D4core4time12TickDuration11ticksPerSecyl = global i64 0
@.str5 = internal unnamed_addr constant [40 x i8] c"..\5Cldc\5Cruntime\5Cdruntime\5Csrc\5Ccore\5Ctime.d\00"
@.str83 = internal constant [10 x i8] c"null this\00"
-@.modulefilename = internal constant { i32, i8* } { i32 39, i8* getelementptr inbounds ([40 x i8]* @.str5, i32 0, i32 0) }
+@.modulefilename = internal constant { i32, i8* } { i32 39, i8* getelementptr inbounds ([40 x i8], [40 x i8]* @.str5, i32 0, i32 0) }
declare i8* @_d_assert_msg({ i32, i8* }, { i32, i8* }, i32)
@@ -23,15 +23,15 @@ entry:
br i1 %tmp, label %noassert, label %assert
assert: ; preds = %entry
- %tmp1 = load { i32, i8* }* @.modulefilename
- %0 = call i8* @_d_assert_msg({ i32, i8* } { i32 9, i8* getelementptr inbounds ([10 x i8]* @.str83, i32 0, i32 0) }, { i32, i8* } %tmp1, i32 1586)
+ %tmp1 = load { i32, i8* }, { i32, i8* }* @.modulefilename
+ %0 = call i8* @_d_assert_msg({ i32, i8* } { i32 9, i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str83, i32 0, i32 0) }, { i32, i8* } %tmp1, i32 1586)
unreachable
noassert: ; preds = %entry
- %tmp2 = getelementptr %core.time.TickDuration* %.this_arg, i32 0, i32 0
- %tmp3 = load i64* %tmp2
+ %tmp2 = getelementptr %core.time.TickDuration, %core.time.TickDuration* %.this_arg, i32 0, i32 0
+ %tmp3 = load i64, i64* %tmp2
%tmp4 = sitofp i64 %tmp3 to ppc_fp128
- %tmp5 = load i64* @_D4core4time12TickDuration11ticksPerSecyl
+ %tmp5 = load i64, i64* @_D4core4time12TickDuration11ticksPerSecyl
%tmp6 = sitofp i64 %tmp5 to ppc_fp128
%tmp7 = fdiv ppc_fp128 %tmp6, 0xM80000000000000000000000000000000
%tmp8 = fdiv ppc_fp128 %tmp4, %tmp7
diff --git a/test/CodeGen/PowerPC/pr17168.ll b/test/CodeGen/PowerPC/pr17168.ll
index 62a9ede0200b..096895491381 100644
--- a/test/CodeGen/PowerPC/pr17168.ll
+++ b/test/CodeGen/PowerPC/pr17168.ll
@@ -24,8 +24,8 @@ for.cond968.preheader: ; preds = %for.cond968.prehead
for.end1042: ; preds = %for.cond968.preheader, %for.cond964.preheader, %entry
%0 = phi i32 [ undef, %for.cond964.preheader ], [ undef, %for.cond968.preheader ], [ undef, %entry ]
- %1 = load i32* getelementptr inbounds ([3 x i32]* @grid_points, i64 0, i64 0), align 4, !dbg !443, !tbaa !444
- tail call void @llvm.dbg.value(metadata i32 1, i64 0, metadata !119, metadata !{!"0x102"}), !dbg !448
+ %1 = load i32, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @grid_points, i64 0, i64 0), align 4, !dbg !443, !tbaa !444
+ tail call void @llvm.dbg.value(metadata i32 1, i64 0, metadata !119, metadata !DIExpression()), !dbg !448
%sub10454270 = add nsw i32 %0, -1, !dbg !448
%cmp10464271 = icmp sgt i32 %sub10454270, 1, !dbg !448
%sub11134263 = add nsw i32 %1, -1, !dbg !450
@@ -54,468 +54,468 @@ attributes #1 = { nounwind readnone }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!438, !464}
-!0 = !{!"0x11\0012\00clang version 3.4 (trunk 190311)\001\00\000\00\000", !1, !2, !2, !3, !298, !2} ; [ DW_TAG_compile_unit ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c] [DW_LANG_C99]
-!1 = !{!"bt.c", !"/home/hfinkel/src/NPB2.3-omp-C/BT"}
+!0 = !DICompileUnit(language: DW_LANG_C99, producer: "clang version 3.4 (trunk 190311)", isOptimized: true, emissionKind: 0, file: !1, enums: !2, retainedTypes: !2, subprograms: !3, globals: !298, imports: !2)
+!1 = !DIFile(filename: "bt.c", directory: "/home/hfinkel/src/NPB2.3-omp-C/BT")
!2 = !{}
!3 = !{!4, !82, !102, !114, !132, !145, !154, !155, !162, !183, !200, !201, !207, !208, !215, !221, !230, !238, !246, !255, !260, !261, !268, !274, !279, !280, !287, !293}
-!4 = !{!"0x2e\00main\00main\00\0074\000\001\000\006\00256\001\0074", !1, !5, !6, null, null, null, null, !12} ; [ DW_TAG_subprogram ] [line 74] [def] [main]
-!5 = !{!"0x29", !1} ; [ DW_TAG_file_type ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!6 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !DISubprogram(name: "main", line: 74, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 74, file: !1, scope: !5, type: !6, variables: !12)
+!5 = !DIFile(filename: "bt.c", directory: "/home/hfinkel/src/NPB2.3-omp-C/BT")
+!6 = !DISubroutineType(types: !7)
!7 = !{!8, !8, !9}
-!8 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!9 = !{!"0xf\00\000\0064\0064\000\000", null, null, !10} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
-!10 = !{!"0xf\00\000\0064\0064\000\000", null, null, !11} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from char]
-!11 = !{!"0x24\00char\000\008\008\000\000\008", null, null} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_unsigned_char]
+!8 = !DIBasicType(tag: DW_TAG_base_type, name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+!9 = !DIDerivedType(tag: DW_TAG_pointer_type, size: 64, align: 64, baseType: !10)
+!10 = !DIDerivedType(tag: DW_TAG_pointer_type, size: 64, align: 64, baseType: !11)
+!11 = !DIBasicType(tag: DW_TAG_base_type, name: "char", size: 8, align: 8, encoding: DW_ATE_unsigned_char)
!12 = !{!13, !14, !15, !16, !17, !18, !19, !21, !22, !23, !25, !26}
-!13 = !{!"0x101\00argc\0016777290\000", !4, !5, !8} ; [ DW_TAG_arg_variable ] [argc] [line 74]
-!14 = !{!"0x101\00argv\0033554506\000", !4, !5, !9} ; [ DW_TAG_arg_variable ] [argv] [line 74]
-!15 = !{!"0x100\00niter\0076\000", !4, !5, !8} ; [ DW_TAG_auto_variable ] [niter] [line 76]
-!16 = !{!"0x100\00step\0076\000", !4, !5, !8} ; [ DW_TAG_auto_variable ] [step] [line 76]
-!17 = !{!"0x100\00n3\0076\000", !4, !5, !8} ; [ DW_TAG_auto_variable ] [n3] [line 76]
-!18 = !{!"0x100\00nthreads\0077\000", !4, !5, !8} ; [ DW_TAG_auto_variable ] [nthreads] [line 77]
-!19 = !{!"0x100\00navg\0078\000", !4, !5, !20} ; [ DW_TAG_auto_variable ] [navg] [line 78]
-!20 = !{!"0x24\00double\000\0064\0064\000\000\004", null, null} ; [ DW_TAG_base_type ] [double] [line 0, size 64, align 64, offset 0, enc DW_ATE_float]
-!21 = !{!"0x100\00mflops\0078\000", !4, !5, !20} ; [ DW_TAG_auto_variable ] [mflops] [line 78]
-!22 = !{!"0x100\00tmax\0080\000", !4, !5, !20} ; [ DW_TAG_auto_variable ] [tmax] [line 80]
-!23 = !{!"0x100\00verified\0081\000", !4, !5, !24} ; [ DW_TAG_auto_variable ] [verified] [line 81]
-!24 = !{!"0x16\00boolean\0012\000\000\000\000", !1, null, !8} ; [ DW_TAG_typedef ] [boolean] [line 12, size 0, align 0, offset 0] [from int]
-!25 = !{!"0x100\00class\0082\000", !4, !5, !11} ; [ DW_TAG_auto_variable ] [class] [line 82]
-!26 = !{!"0x100\00fp\0083\000", !4, !5, !27} ; [ DW_TAG_auto_variable ] [fp] [line 83]
-!27 = !{!"0xf\00\000\0064\0064\000\000", null, null, !28} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from FILE]
-!28 = !{!"0x16\00FILE\0049\000\000\000\000", !1, null, !29} ; [ DW_TAG_typedef ] [FILE] [line 49, size 0, align 0, offset 0] [from _IO_FILE]
-!29 = !{!"0x13\00_IO_FILE\00271\001728\0064\000\000\000", !30, null, null, !31, null, null, null} ; [ DW_TAG_structure_type ] [_IO_FILE] [line 271, size 1728, align 64, offset 0] [def] [from ]
-!30 = !{!"/usr/include/libio.h", !"/home/hfinkel/src/NPB2.3-omp-C/BT"}
+!13 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "argc", line: 74, arg: 1, scope: !4, file: !5, type: !8)
+!14 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "argv", line: 74, arg: 2, scope: !4, file: !5, type: !9)
+!15 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "niter", line: 76, scope: !4, file: !5, type: !8)
+!16 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "step", line: 76, scope: !4, file: !5, type: !8)
+!17 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "n3", line: 76, scope: !4, file: !5, type: !8)
+!18 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "nthreads", line: 77, scope: !4, file: !5, type: !8)
+!19 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "navg", line: 78, scope: !4, file: !5, type: !20)
+!20 = !DIBasicType(tag: DW_TAG_base_type, name: "double", size: 64, align: 64, encoding: DW_ATE_float)
+!21 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "mflops", line: 78, scope: !4, file: !5, type: !20)
+!22 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "tmax", line: 80, scope: !4, file: !5, type: !20)
+!23 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "verified", line: 81, scope: !4, file: !5, type: !24)
+!24 = !DIDerivedType(tag: DW_TAG_typedef, name: "boolean", line: 12, file: !1, baseType: !8)
+!25 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "class", line: 82, scope: !4, file: !5, type: !11)
+!26 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "fp", line: 83, scope: !4, file: !5, type: !27)
+!27 = !DIDerivedType(tag: DW_TAG_pointer_type, size: 64, align: 64, baseType: !28)
+!28 = !DIDerivedType(tag: DW_TAG_typedef, name: "FILE", line: 49, file: !1, baseType: !29)
+!29 = !DICompositeType(tag: DW_TAG_structure_type, name: "_IO_FILE", line: 271, size: 1728, align: 64, file: !30, elements: !31)
+!30 = !DIFile(filename: "/usr/include/libio.h", directory: "/home/hfinkel/src/NPB2.3-omp-C/BT")
!31 = !{!32, !33, !34, !35, !36, !37, !38, !39, !40, !41, !42, !43, !44, !52, !53, !54, !55, !58, !60, !62, !66, !68, !70, !71, !72, !73, !74, !77, !78}
-!32 = !{!"0xd\00_flags\00272\0032\0032\000\000", !30, !29, !8} ; [ DW_TAG_member ] [_flags] [line 272, size 32, align 32, offset 0] [from int]
-!33 = !{!"0xd\00_IO_read_ptr\00277\0064\0064\0064\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_read_ptr] [line 277, size 64, align 64, offset 64] [from ]
-!34 = !{!"0xd\00_IO_read_end\00278\0064\0064\00128\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_read_end] [line 278, size 64, align 64, offset 128] [from ]
-!35 = !{!"0xd\00_IO_read_base\00279\0064\0064\00192\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_read_base] [line 279, size 64, align 64, offset 192] [from ]
-!36 = !{!"0xd\00_IO_write_base\00280\0064\0064\00256\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_write_base] [line 280, size 64, align 64, offset 256] [from ]
-!37 = !{!"0xd\00_IO_write_ptr\00281\0064\0064\00320\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_write_ptr] [line 281, size 64, align 64, offset 320] [from ]
-!38 = !{!"0xd\00_IO_write_end\00282\0064\0064\00384\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_write_end] [line 282, size 64, align 64, offset 384] [from ]
-!39 = !{!"0xd\00_IO_buf_base\00283\0064\0064\00448\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_buf_base] [line 283, size 64, align 64, offset 448] [from ]
-!40 = !{!"0xd\00_IO_buf_end\00284\0064\0064\00512\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_buf_end] [line 284, size 64, align 64, offset 512] [from ]
-!41 = !{!"0xd\00_IO_save_base\00286\0064\0064\00576\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_save_base] [line 286, size 64, align 64, offset 576] [from ]
-!42 = !{!"0xd\00_IO_backup_base\00287\0064\0064\00640\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_backup_base] [line 287, size 64, align 64, offset 640] [from ]
-!43 = !{!"0xd\00_IO_save_end\00288\0064\0064\00704\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_save_end] [line 288, size 64, align 64, offset 704] [from ]
-!44 = !{!"0xd\00_markers\00290\0064\0064\00768\000", !30, !29, !45} ; [ DW_TAG_member ] [_markers] [line 290, size 64, align 64, offset 768] [from ]
-!45 = !{!"0xf\00\000\0064\0064\000\000", null, null, !46} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _IO_marker]
-!46 = !{!"0x13\00_IO_marker\00186\00192\0064\000\000\000", !30, null, null, !47, null, null, null} ; [ DW_TAG_structure_type ] [_IO_marker] [line 186, size 192, align 64, offset 0] [def] [from ]
+!32 = !DIDerivedType(tag: DW_TAG_member, name: "_flags", line: 272, size: 32, align: 32, file: !30, scope: !29, baseType: !8)
+!33 = !DIDerivedType(tag: DW_TAG_member, name: "_IO_read_ptr", line: 277, size: 64, align: 64, offset: 64, file: !30, scope: !29, baseType: !10)
+!34 = !DIDerivedType(tag: DW_TAG_member, name: "_IO_read_end", line: 278, size: 64, align: 64, offset: 128, file: !30, scope: !29, baseType: !10)
+!35 = !DIDerivedType(tag: DW_TAG_member, name: "_IO_read_base", line: 279, size: 64, align: 64, offset: 192, file: !30, scope: !29, baseType: !10)
+!36 = !DIDerivedType(tag: DW_TAG_member, name: "_IO_write_base", line: 280, size: 64, align: 64, offset: 256, file: !30, scope: !29, baseType: !10)
+!37 = !DIDerivedType(tag: DW_TAG_member, name: "_IO_write_ptr", line: 281, size: 64, align: 64, offset: 320, file: !30, scope: !29, baseType: !10)
+!38 = !DIDerivedType(tag: DW_TAG_member, name: "_IO_write_end", line: 282, size: 64, align: 64, offset: 384, file: !30, scope: !29, baseType: !10)
+!39 = !DIDerivedType(tag: DW_TAG_member, name: "_IO_buf_base", line: 283, size: 64, align: 64, offset: 448, file: !30, scope: !29, baseType: !10)
+!40 = !DIDerivedType(tag: DW_TAG_member, name: "_IO_buf_end", line: 284, size: 64, align: 64, offset: 512, file: !30, scope: !29, baseType: !10)
+!41 = !DIDerivedType(tag: DW_TAG_member, name: "_IO_save_base", line: 286, size: 64, align: 64, offset: 576, file: !30, scope: !29, baseType: !10)
+!42 = !DIDerivedType(tag: DW_TAG_member, name: "_IO_backup_base", line: 287, size: 64, align: 64, offset: 640, file: !30, scope: !29, baseType: !10)
+!43 = !DIDerivedType(tag: DW_TAG_member, name: "_IO_save_end", line: 288, size: 64, align: 64, offset: 704, file: !30, scope: !29, baseType: !10)
+!44 = !DIDerivedType(tag: DW_TAG_member, name: "_markers", line: 290, size: 64, align: 64, offset: 768, file: !30, scope: !29, baseType: !45)
+!45 = !DIDerivedType(tag: DW_TAG_pointer_type, size: 64, align: 64, baseType: !46)
+!46 = !DICompositeType(tag: DW_TAG_structure_type, name: "_IO_marker", line: 186, size: 192, align: 64, file: !30, elements: !47)
!47 = !{!48, !49, !51}
-!48 = !{!"0xd\00_next\00187\0064\0064\000\000", !30, !46, !45} ; [ DW_TAG_member ] [_next] [line 187, size 64, align 64, offset 0] [from ]
-!49 = !{!"0xd\00_sbuf\00188\0064\0064\0064\000", !30, !46, !50} ; [ DW_TAG_member ] [_sbuf] [line 188, size 64, align 64, offset 64] [from ]
-!50 = !{!"0xf\00\000\0064\0064\000\000", null, null, !29} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _IO_FILE]
-!51 = !{!"0xd\00_pos\00192\0032\0032\00128\000", !30, !46, !8} ; [ DW_TAG_member ] [_pos] [line 192, size 32, align 32, offset 128] [from int]
-!52 = !{!"0xd\00_chain\00292\0064\0064\00832\000", !30, !29, !50} ; [ DW_TAG_member ] [_chain] [line 292, size 64, align 64, offset 832] [from ]
-!53 = !{!"0xd\00_fileno\00294\0032\0032\00896\000", !30, !29, !8} ; [ DW_TAG_member ] [_fileno] [line 294, size 32, align 32, offset 896] [from int]
-!54 = !{!"0xd\00_flags2\00298\0032\0032\00928\000", !30, !29, !8} ; [ DW_TAG_member ] [_flags2] [line 298, size 32, align 32, offset 928] [from int]
-!55 = !{!"0xd\00_old_offset\00300\0064\0064\00960\000", !30, !29, !56} ; [ DW_TAG_member ] [_old_offset] [line 300, size 64, align 64, offset 960] [from __off_t]
-!56 = !{!"0x16\00__off_t\00141\000\000\000\000", !30, null, !57} ; [ DW_TAG_typedef ] [__off_t] [line 141, size 0, align 0, offset 0] [from long int]
-!57 = !{!"0x24\00long int\000\0064\0064\000\000\005", null, null} ; [ DW_TAG_base_type ] [long int] [line 0, size 64, align 64, offset 0, enc DW_ATE_signed]
-!58 = !{!"0xd\00_cur_column\00304\0016\0016\001024\000", !30, !29, !59} ; [ DW_TAG_member ] [_cur_column] [line 304, size 16, align 16, offset 1024] [from unsigned short]
-!59 = !{!"0x24\00unsigned short\000\0016\0016\000\000\007", null, null} ; [ DW_TAG_base_type ] [unsigned short] [line 0, size 16, align 16, offset 0, enc DW_ATE_unsigned]
-!60 = !{!"0xd\00_vtable_offset\00305\008\008\001040\000", !30, !29, !61} ; [ DW_TAG_member ] [_vtable_offset] [line 305, size 8, align 8, offset 1040] [from signed char]
-!61 = !{!"0x24\00signed char\000\008\008\000\000\006", null, null} ; [ DW_TAG_base_type ] [signed char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
-!62 = !{!"0xd\00_shortbuf\00306\008\008\001048\000", !30, !29, !63} ; [ DW_TAG_member ] [_shortbuf] [line 306, size 8, align 8, offset 1048] [from ]
-!63 = !{!"0x1\00\000\008\008\000\000", null, null, !11, !64, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 8, align 8, offset 0] [from char]
+!48 = !DIDerivedType(tag: DW_TAG_member, name: "_next", line: 187, size: 64, align: 64, file: !30, scope: !46, baseType: !45)
+!49 = !DIDerivedType(tag: DW_TAG_member, name: "_sbuf", line: 188, size: 64, align: 64, offset: 64, file: !30, scope: !46, baseType: !50)
+!50 = !DIDerivedType(tag: DW_TAG_pointer_type, size: 64, align: 64, baseType: !29)
+!51 = !DIDerivedType(tag: DW_TAG_member, name: "_pos", line: 192, size: 32, align: 32, offset: 128, file: !30, scope: !46, baseType: !8)
+!52 = !DIDerivedType(tag: DW_TAG_member, name: "_chain", line: 292, size: 64, align: 64, offset: 832, file: !30, scope: !29, baseType: !50)
+!53 = !DIDerivedType(tag: DW_TAG_member, name: "_fileno", line: 294, size: 32, align: 32, offset: 896, file: !30, scope: !29, baseType: !8)
+!54 = !DIDerivedType(tag: DW_TAG_member, name: "_flags2", line: 298, size: 32, align: 32, offset: 928, file: !30, scope: !29, baseType: !8)
+!55 = !DIDerivedType(tag: DW_TAG_member, name: "_old_offset", line: 300, size: 64, align: 64, offset: 960, file: !30, scope: !29, baseType: !56)
+!56 = !DIDerivedType(tag: DW_TAG_typedef, name: "__off_t", line: 141, file: !30, baseType: !57)
+!57 = !DIBasicType(tag: DW_TAG_base_type, name: "long int", size: 64, align: 64, encoding: DW_ATE_signed)
+!58 = !DIDerivedType(tag: DW_TAG_member, name: "_cur_column", line: 304, size: 16, align: 16, offset: 1024, file: !30, scope: !29, baseType: !59)
+!59 = !DIBasicType(tag: DW_TAG_base_type, name: "unsigned short", size: 16, align: 16, encoding: DW_ATE_unsigned)
+!60 = !DIDerivedType(tag: DW_TAG_member, name: "_vtable_offset", line: 305, size: 8, align: 8, offset: 1040, file: !30, scope: !29, baseType: !61)
+!61 = !DIBasicType(tag: DW_TAG_base_type, name: "signed char", size: 8, align: 8, encoding: DW_ATE_signed_char)
+!62 = !DIDerivedType(tag: DW_TAG_member, name: "_shortbuf", line: 306, size: 8, align: 8, offset: 1048, file: !30, scope: !29, baseType: !63)
+!63 = !DICompositeType(tag: DW_TAG_array_type, size: 8, align: 8, baseType: !11, elements: !64)
!64 = !{!65}
-!65 = !{!"0x21\000\001"} ; [ DW_TAG_subrange_type ] [0, 0]
-!66 = !{!"0xd\00_lock\00310\0064\0064\001088\000", !30, !29, !67} ; [ DW_TAG_member ] [_lock] [line 310, size 64, align 64, offset 1088] [from ]
-!67 = !{!"0xf\00\000\0064\0064\000\000", null, null, null} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
-!68 = !{!"0xd\00_offset\00319\0064\0064\001152\000", !30, !29, !69} ; [ DW_TAG_member ] [_offset] [line 319, size 64, align 64, offset 1152] [from __off64_t]
-!69 = !{!"0x16\00__off64_t\00142\000\000\000\000", !30, null, !57} ; [ DW_TAG_typedef ] [__off64_t] [line 142, size 0, align 0, offset 0] [from long int]
-!70 = !{!"0xd\00__pad1\00328\0064\0064\001216\000", !30, !29, !67} ; [ DW_TAG_member ] [__pad1] [line 328, size 64, align 64, offset 1216] [from ]
-!71 = !{!"0xd\00__pad2\00329\0064\0064\001280\000", !30, !29, !67} ; [ DW_TAG_member ] [__pad2] [line 329, size 64, align 64, offset 1280] [from ]
-!72 = !{!"0xd\00__pad3\00330\0064\0064\001344\000", !30, !29, !67} ; [ DW_TAG_member ] [__pad3] [line 330, size 64, align 64, offset 1344] [from ]
-!73 = !{!"0xd\00__pad4\00331\0064\0064\001408\000", !30, !29, !67} ; [ DW_TAG_member ] [__pad4] [line 331, size 64, align 64, offset 1408] [from ]
-!74 = !{!"0xd\00__pad5\00332\0064\0064\001472\000", !30, !29, !75} ; [ DW_TAG_member ] [__pad5] [line 332, size 64, align 64, offset 1472] [from size_t]
-!75 = !{!"0x16\00size_t\0042\000\000\000\000", !30, null, !76} ; [ DW_TAG_typedef ] [size_t] [line 42, size 0, align 0, offset 0] [from long unsigned int]
-!76 = !{!"0x24\00long unsigned int\000\0064\0064\000\000\007", null, null} ; [ DW_TAG_base_type ] [long unsigned int] [line 0, size 64, align 64, offset 0, enc DW_ATE_unsigned]
-!77 = !{!"0xd\00_mode\00334\0032\0032\001536\000", !30, !29, !8} ; [ DW_TAG_member ] [_mode] [line 334, size 32, align 32, offset 1536] [from int]
-!78 = !{!"0xd\00_unused2\00336\00160\008\001568\000", !30, !29, !79} ; [ DW_TAG_member ] [_unused2] [line 336, size 160, align 8, offset 1568] [from ]
-!79 = !{!"0x1\00\000\00160\008\000\000", null, null, !11, !80, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 160, align 8, offset 0] [from char]
+!65 = !DISubrange(count: 1)
+!66 = !DIDerivedType(tag: DW_TAG_member, name: "_lock", line: 310, size: 64, align: 64, offset: 1088, file: !30, scope: !29, baseType: !67)
+!67 = !DIDerivedType(tag: DW_TAG_pointer_type, size: 64, align: 64, baseType: null)
+!68 = !DIDerivedType(tag: DW_TAG_member, name: "_offset", line: 319, size: 64, align: 64, offset: 1152, file: !30, scope: !29, baseType: !69)
+!69 = !DIDerivedType(tag: DW_TAG_typedef, name: "__off64_t", line: 142, file: !30, baseType: !57)
+!70 = !DIDerivedType(tag: DW_TAG_member, name: "__pad1", line: 328, size: 64, align: 64, offset: 1216, file: !30, scope: !29, baseType: !67)
+!71 = !DIDerivedType(tag: DW_TAG_member, name: "__pad2", line: 329, size: 64, align: 64, offset: 1280, file: !30, scope: !29, baseType: !67)
+!72 = !DIDerivedType(tag: DW_TAG_member, name: "__pad3", line: 330, size: 64, align: 64, offset: 1344, file: !30, scope: !29, baseType: !67)
+!73 = !DIDerivedType(tag: DW_TAG_member, name: "__pad4", line: 331, size: 64, align: 64, offset: 1408, file: !30, scope: !29, baseType: !67)
+!74 = !DIDerivedType(tag: DW_TAG_member, name: "__pad5", line: 332, size: 64, align: 64, offset: 1472, file: !30, scope: !29, baseType: !75)
+!75 = !DIDerivedType(tag: DW_TAG_typedef, name: "size_t", line: 42, file: !30, baseType: !76)
+!76 = !DIBasicType(tag: DW_TAG_base_type, name: "long unsigned int", size: 64, align: 64, encoding: DW_ATE_unsigned)
+!77 = !DIDerivedType(tag: DW_TAG_member, name: "_mode", line: 334, size: 32, align: 32, offset: 1536, file: !30, scope: !29, baseType: !8)
+!78 = !DIDerivedType(tag: DW_TAG_member, name: "_unused2", line: 336, size: 160, align: 8, offset: 1568, file: !30, scope: !29, baseType: !79)
+!79 = !DICompositeType(tag: DW_TAG_array_type, size: 160, align: 8, baseType: !11, elements: !80)
!80 = !{!81}
-!81 = !{!"0x21\000\0020"} ; [ DW_TAG_subrange_type ] [0, 19]
-!82 = !{!"0x2e\00verify\00verify\00\002388\001\001\000\006\00256\001\002388", !1, !5, !83, null, null, null, null, !86} ; [ DW_TAG_subprogram ] [line 2388] [local] [def] [verify]
-!83 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !84, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!81 = !DISubrange(count: 20)
+!82 = !DISubprogram(name: "verify", line: 2388, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 2388, file: !1, scope: !5, type: !83, variables: !86)
+!83 = !DISubroutineType(types: !84)
!84 = !{null, !8, !10, !85}
-!85 = !{!"0xf\00\000\0064\0064\000\000", null, null, !24} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from boolean]
+!85 = !DIDerivedType(tag: DW_TAG_pointer_type, size: 64, align: 64, baseType: !24)
!86 = !{!87, !88, !89, !90, !94, !95, !96, !97, !98, !99, !100, !101}
-!87 = !{!"0x101\00no_time_steps\0016779604\000", !82, !5, !8} ; [ DW_TAG_arg_variable ] [no_time_steps] [line 2388]
-!88 = !{!"0x101\00class\0033556820\000", !82, !5, !10} ; [ DW_TAG_arg_variable ] [class] [line 2388]
-!89 = !{!"0x101\00verified\0050334036\000", !82, !5, !85} ; [ DW_TAG_arg_variable ] [verified] [line 2388]
-!90 = !{!"0x100\00xcrref\002397\000", !82, !5, !91} ; [ DW_TAG_auto_variable ] [xcrref] [line 2397]
-!91 = !{!"0x1\00\000\00320\0064\000\000", null, null, !20, !92, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 320, align 64, offset 0] [from double]
+!87 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "no_time_steps", line: 2388, arg: 1, scope: !82, file: !5, type: !8)
+!88 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "class", line: 2388, arg: 2, scope: !82, file: !5, type: !10)
+!89 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "verified", line: 2388, arg: 3, scope: !82, file: !5, type: !85)
+!90 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "xcrref", line: 2397, scope: !82, file: !5, type: !91)
+!91 = !DICompositeType(tag: DW_TAG_array_type, size: 320, align: 64, baseType: !20, elements: !92)
!92 = !{!93}
-!93 = !{!"0x21\000\005"} ; [ DW_TAG_subrange_type ] [0, 4]
-!94 = !{!"0x100\00xceref\002397\000", !82, !5, !91} ; [ DW_TAG_auto_variable ] [xceref] [line 2397]
-!95 = !{!"0x100\00xcrdif\002397\000", !82, !5, !91} ; [ DW_TAG_auto_variable ] [xcrdif] [line 2397]
-!96 = !{!"0x100\00xcedif\002397\000", !82, !5, !91} ; [ DW_TAG_auto_variable ] [xcedif] [line 2397]
-!97 = !{!"0x100\00epsilon\002398\000", !82, !5, !20} ; [ DW_TAG_auto_variable ] [epsilon] [line 2398]
-!98 = !{!"0x100\00xce\002398\000", !82, !5, !91} ; [ DW_TAG_auto_variable ] [xce] [line 2398]
-!99 = !{!"0x100\00xcr\002398\000", !82, !5, !91} ; [ DW_TAG_auto_variable ] [xcr] [line 2398]
-!100 = !{!"0x100\00dtref\002398\000", !82, !5, !20} ; [ DW_TAG_auto_variable ] [dtref] [line 2398]
-!101 = !{!"0x100\00m\002399\000", !82, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 2399]
-!102 = !{!"0x2e\00rhs_norm\00rhs_norm\00\00266\001\001\000\006\00256\001\00266", !1, !5, !103, null, null, null, null, !106} ; [ DW_TAG_subprogram ] [line 266] [local] [def] [rhs_norm]
-!103 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !104, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!93 = !DISubrange(count: 5)
+!94 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "xceref", line: 2397, scope: !82, file: !5, type: !91)
+!95 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "xcrdif", line: 2397, scope: !82, file: !5, type: !91)
+!96 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "xcedif", line: 2397, scope: !82, file: !5, type: !91)
+!97 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "epsilon", line: 2398, scope: !82, file: !5, type: !20)
+!98 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "xce", line: 2398, scope: !82, file: !5, type: !91)
+!99 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "xcr", line: 2398, scope: !82, file: !5, type: !91)
+!100 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "dtref", line: 2398, scope: !82, file: !5, type: !20)
+!101 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "m", line: 2399, scope: !82, file: !5, type: !8)
+!102 = !DISubprogram(name: "rhs_norm", line: 266, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 266, file: !1, scope: !5, type: !103, variables: !106)
+!103 = !DISubroutineType(types: !104)
!104 = !{null, !105}
-!105 = !{!"0xf\00\000\0064\0064\000\000", null, null, !20} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from double]
+!105 = !DIDerivedType(tag: DW_TAG_pointer_type, size: 64, align: 64, baseType: !20)
!106 = !{!107, !108, !109, !110, !111, !112, !113}
-!107 = !{!"0x101\00rms\0016777482\000", !102, !5, !105} ; [ DW_TAG_arg_variable ] [rms] [line 266]
-!108 = !{!"0x100\00i\00271\000", !102, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 271]
-!109 = !{!"0x100\00j\00271\000", !102, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 271]
-!110 = !{!"0x100\00k\00271\000", !102, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 271]
-!111 = !{!"0x100\00d\00271\000", !102, !5, !8} ; [ DW_TAG_auto_variable ] [d] [line 271]
-!112 = !{!"0x100\00m\00271\000", !102, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 271]
-!113 = !{!"0x100\00add\00272\000", !102, !5, !20} ; [ DW_TAG_auto_variable ] [add] [line 272]
-!114 = !{!"0x2e\00compute_rhs\00compute_rhs\00\001767\001\001\000\006\00256\001\001767", !1, !5, !115, null, void ()* @compute_rhs, null, null, !117} ; [ DW_TAG_subprogram ] [line 1767] [local] [def] [compute_rhs]
-!115 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !116, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!107 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "rms", line: 266, arg: 1, scope: !102, file: !5, type: !105)
+!108 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 271, scope: !102, file: !5, type: !8)
+!109 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 271, scope: !102, file: !5, type: !8)
+!110 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 271, scope: !102, file: !5, type: !8)
+!111 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "d", line: 271, scope: !102, file: !5, type: !8)
+!112 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "m", line: 271, scope: !102, file: !5, type: !8)
+!113 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "add", line: 272, scope: !102, file: !5, type: !20)
+!114 = !DISubprogram(name: "compute_rhs", line: 1767, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 1767, file: !1, scope: !5, type: !115, function: void ()* @compute_rhs, variables: !117)
+!115 = !DISubroutineType(types: !116)
!116 = !{null}
!117 = !{!118, !119, !120, !121, !122, !123, !124, !125, !126, !127, !128, !129, !130, !131}
-!118 = !{!"0x100\00i\001769\000", !114, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 1769]
-!119 = !{!"0x100\00j\001769\000", !114, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 1769]
-!120 = !{!"0x100\00k\001769\000", !114, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 1769]
-!121 = !{!"0x100\00m\001769\000", !114, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 1769]
-!122 = !{!"0x100\00rho_inv\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [rho_inv] [line 1770]
-!123 = !{!"0x100\00uijk\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [uijk] [line 1770]
-!124 = !{!"0x100\00up1\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [up1] [line 1770]
-!125 = !{!"0x100\00um1\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [um1] [line 1770]
-!126 = !{!"0x100\00vijk\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [vijk] [line 1770]
-!127 = !{!"0x100\00vp1\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [vp1] [line 1770]
-!128 = !{!"0x100\00vm1\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [vm1] [line 1770]
-!129 = !{!"0x100\00wijk\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [wijk] [line 1770]
-!130 = !{!"0x100\00wp1\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [wp1] [line 1770]
-!131 = !{!"0x100\00wm1\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [wm1] [line 1770]
-!132 = !{!"0x2e\00error_norm\00error_norm\00\00225\001\001\000\006\00256\001\00225", !1, !5, !103, null, null, null, null, !133} ; [ DW_TAG_subprogram ] [line 225] [local] [def] [error_norm]
+!118 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 1769, scope: !114, file: !5, type: !8)
+!119 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 1769, scope: !114, file: !5, type: !8)
+!120 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 1769, scope: !114, file: !5, type: !8)
+!121 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "m", line: 1769, scope: !114, file: !5, type: !8)
+!122 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "rho_inv", line: 1770, scope: !114, file: !5, type: !20)
+!123 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "uijk", line: 1770, scope: !114, file: !5, type: !20)
+!124 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "up1", line: 1770, scope: !114, file: !5, type: !20)
+!125 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "um1", line: 1770, scope: !114, file: !5, type: !20)
+!126 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "vijk", line: 1770, scope: !114, file: !5, type: !20)
+!127 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "vp1", line: 1770, scope: !114, file: !5, type: !20)
+!128 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "vm1", line: 1770, scope: !114, file: !5, type: !20)
+!129 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "wijk", line: 1770, scope: !114, file: !5, type: !20)
+!130 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "wp1", line: 1770, scope: !114, file: !5, type: !20)
+!131 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "wm1", line: 1770, scope: !114, file: !5, type: !20)
+!132 = !DISubprogram(name: "error_norm", line: 225, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 225, file: !1, scope: !5, type: !103, variables: !133)
!133 = !{!134, !135, !136, !137, !138, !139, !140, !141, !142, !143, !144}
-!134 = !{!"0x101\00rms\0016777441\000", !132, !5, !105} ; [ DW_TAG_arg_variable ] [rms] [line 225]
-!135 = !{!"0x100\00i\00232\000", !132, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 232]
-!136 = !{!"0x100\00j\00232\000", !132, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 232]
-!137 = !{!"0x100\00k\00232\000", !132, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 232]
-!138 = !{!"0x100\00m\00232\000", !132, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 232]
-!139 = !{!"0x100\00d\00232\000", !132, !5, !8} ; [ DW_TAG_auto_variable ] [d] [line 232]
-!140 = !{!"0x100\00xi\00233\000", !132, !5, !20} ; [ DW_TAG_auto_variable ] [xi] [line 233]
-!141 = !{!"0x100\00eta\00233\000", !132, !5, !20} ; [ DW_TAG_auto_variable ] [eta] [line 233]
-!142 = !{!"0x100\00zeta\00233\000", !132, !5, !20} ; [ DW_TAG_auto_variable ] [zeta] [line 233]
-!143 = !{!"0x100\00u_exact\00233\000", !132, !5, !91} ; [ DW_TAG_auto_variable ] [u_exact] [line 233]
-!144 = !{!"0x100\00add\00233\000", !132, !5, !20} ; [ DW_TAG_auto_variable ] [add] [line 233]
-!145 = !{!"0x2e\00exact_solution\00exact_solution\00\00643\001\001\000\006\00256\001\00644", !1, !5, !146, null, null, null, null, !148} ; [ DW_TAG_subprogram ] [line 643] [local] [def] [scope 644] [exact_solution]
-!146 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !147, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!134 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "rms", line: 225, arg: 1, scope: !132, file: !5, type: !105)
+!135 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 232, scope: !132, file: !5, type: !8)
+!136 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 232, scope: !132, file: !5, type: !8)
+!137 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 232, scope: !132, file: !5, type: !8)
+!138 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "m", line: 232, scope: !132, file: !5, type: !8)
+!139 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "d", line: 232, scope: !132, file: !5, type: !8)
+!140 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "xi", line: 233, scope: !132, file: !5, type: !20)
+!141 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "eta", line: 233, scope: !132, file: !5, type: !20)
+!142 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "zeta", line: 233, scope: !132, file: !5, type: !20)
+!143 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "u_exact", line: 233, scope: !132, file: !5, type: !91)
+!144 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "add", line: 233, scope: !132, file: !5, type: !20)
+!145 = !DISubprogram(name: "exact_solution", line: 643, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 644, file: !1, scope: !5, type: !146, variables: !148)
+!146 = !DISubroutineType(types: !147)
!147 = !{null, !20, !20, !20, !105}
!148 = !{!149, !150, !151, !152, !153}
-!149 = !{!"0x101\00xi\0016777859\000", !145, !5, !20} ; [ DW_TAG_arg_variable ] [xi] [line 643]
-!150 = !{!"0x101\00eta\0033555075\000", !145, !5, !20} ; [ DW_TAG_arg_variable ] [eta] [line 643]
-!151 = !{!"0x101\00zeta\0050332291\000", !145, !5, !20} ; [ DW_TAG_arg_variable ] [zeta] [line 643]
-!152 = !{!"0x101\00dtemp\0067109508\000", !145, !5, !105} ; [ DW_TAG_arg_variable ] [dtemp] [line 644]
-!153 = !{!"0x100\00m\00653\000", !145, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 653]
-!154 = !{!"0x2e\00set_constants\00set_constants\00\002191\001\001\000\006\00256\001\002191", !1, !5, !115, null, null, null, null, !2} ; [ DW_TAG_subprogram ] [line 2191] [local] [def] [set_constants]
-!155 = !{!"0x2e\00lhsinit\00lhsinit\00\00855\001\001\000\006\00256\001\00855", !1, !5, !115, null, null, null, null, !156} ; [ DW_TAG_subprogram ] [line 855] [local] [def] [lhsinit]
+!149 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "xi", line: 643, arg: 1, scope: !145, file: !5, type: !20)
+!150 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "eta", line: 643, arg: 2, scope: !145, file: !5, type: !20)
+!151 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "zeta", line: 643, arg: 3, scope: !145, file: !5, type: !20)
+!152 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "dtemp", line: 644, arg: 4, scope: !145, file: !5, type: !105)
+!153 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "m", line: 653, scope: !145, file: !5, type: !8)
+!154 = !DISubprogram(name: "set_constants", line: 2191, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 2191, file: !1, scope: !5, type: !115, variables: !2)
+!155 = !DISubprogram(name: "lhsinit", line: 855, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 855, file: !1, scope: !5, type: !115, variables: !156)
!156 = !{!157, !158, !159, !160, !161}
-!157 = !{!"0x100\00i\00857\000", !155, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 857]
-!158 = !{!"0x100\00j\00857\000", !155, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 857]
-!159 = !{!"0x100\00k\00857\000", !155, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 857]
-!160 = !{!"0x100\00m\00857\000", !155, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 857]
-!161 = !{!"0x100\00n\00857\000", !155, !5, !8} ; [ DW_TAG_auto_variable ] [n] [line 857]
-!162 = !{!"0x2e\00initialize\00initialize\00\00669\001\001\000\006\00256\001\00669", !1, !5, !115, null, null, null, null, !163} ; [ DW_TAG_subprogram ] [line 669] [local] [def] [initialize]
+!157 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 857, scope: !155, file: !5, type: !8)
+!158 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 857, scope: !155, file: !5, type: !8)
+!159 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 857, scope: !155, file: !5, type: !8)
+!160 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "m", line: 857, scope: !155, file: !5, type: !8)
+!161 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "n", line: 857, scope: !155, file: !5, type: !8)
+!162 = !DISubprogram(name: "initialize", line: 669, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 669, file: !1, scope: !5, type: !115, variables: !163)
!163 = !{!164, !165, !166, !167, !168, !169, !170, !171, !172, !173, !174, !179, !180, !181, !182}
-!164 = !{!"0x100\00i\00679\000", !162, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 679]
-!165 = !{!"0x100\00j\00679\000", !162, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 679]
-!166 = !{!"0x100\00k\00679\000", !162, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 679]
-!167 = !{!"0x100\00m\00679\000", !162, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 679]
-!168 = !{!"0x100\00ix\00679\000", !162, !5, !8} ; [ DW_TAG_auto_variable ] [ix] [line 679]
-!169 = !{!"0x100\00iy\00679\000", !162, !5, !8} ; [ DW_TAG_auto_variable ] [iy] [line 679]
-!170 = !{!"0x100\00iz\00679\000", !162, !5, !8} ; [ DW_TAG_auto_variable ] [iz] [line 679]
-!171 = !{!"0x100\00xi\00680\000", !162, !5, !20} ; [ DW_TAG_auto_variable ] [xi] [line 680]
-!172 = !{!"0x100\00eta\00680\000", !162, !5, !20} ; [ DW_TAG_auto_variable ] [eta] [line 680]
-!173 = !{!"0x100\00zeta\00680\000", !162, !5, !20} ; [ DW_TAG_auto_variable ] [zeta] [line 680]
-!174 = !{!"0x100\00Pface\00680\000", !162, !5, !175} ; [ DW_TAG_auto_variable ] [Pface] [line 680]
-!175 = !{!"0x1\00\000\001920\0064\000\000", null, null, !20, !176, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 1920, align 64, offset 0] [from double]
+!164 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 679, scope: !162, file: !5, type: !8)
+!165 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 679, scope: !162, file: !5, type: !8)
+!166 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 679, scope: !162, file: !5, type: !8)
+!167 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "m", line: 679, scope: !162, file: !5, type: !8)
+!168 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "ix", line: 679, scope: !162, file: !5, type: !8)
+!169 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "iy", line: 679, scope: !162, file: !5, type: !8)
+!170 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "iz", line: 679, scope: !162, file: !5, type: !8)
+!171 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "xi", line: 680, scope: !162, file: !5, type: !20)
+!172 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "eta", line: 680, scope: !162, file: !5, type: !20)
+!173 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "zeta", line: 680, scope: !162, file: !5, type: !20)
+!174 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "Pface", line: 680, scope: !162, file: !5, type: !175)
+!175 = !DICompositeType(tag: DW_TAG_array_type, size: 1920, align: 64, baseType: !20, elements: !176)
!176 = !{!177, !178, !93}
-!177 = !{!"0x21\000\002"} ; [ DW_TAG_subrange_type ] [0, 1]
-!178 = !{!"0x21\000\003"} ; [ DW_TAG_subrange_type ] [0, 2]
-!179 = !{!"0x100\00Pxi\00680\000", !162, !5, !20} ; [ DW_TAG_auto_variable ] [Pxi] [line 680]
-!180 = !{!"0x100\00Peta\00680\000", !162, !5, !20} ; [ DW_TAG_auto_variable ] [Peta] [line 680]
-!181 = !{!"0x100\00Pzeta\00680\000", !162, !5, !20} ; [ DW_TAG_auto_variable ] [Pzeta] [line 680]
-!182 = !{!"0x100\00temp\00680\000", !162, !5, !91} ; [ DW_TAG_auto_variable ] [temp] [line 680]
-!183 = !{!"0x2e\00exact_rhs\00exact_rhs\00\00301\001\001\000\006\00256\001\00301", !1, !5, !115, null, null, null, null, !184} ; [ DW_TAG_subprogram ] [line 301] [local] [def] [exact_rhs]
+!177 = !DISubrange(count: 2)
+!178 = !DISubrange(count: 3)
+!179 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "Pxi", line: 680, scope: !162, file: !5, type: !20)
+!180 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "Peta", line: 680, scope: !162, file: !5, type: !20)
+!181 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "Pzeta", line: 680, scope: !162, file: !5, type: !20)
+!182 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "temp", line: 680, scope: !162, file: !5, type: !91)
+!183 = !DISubprogram(name: "exact_rhs", line: 301, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 301, file: !1, scope: !5, type: !115, variables: !184)
!184 = !{!185, !186, !187, !188, !189, !190, !191, !192, !193, !194, !195, !196, !197, !198, !199}
-!185 = !{!"0x100\00dtemp\00310\000", !183, !5, !91} ; [ DW_TAG_auto_variable ] [dtemp] [line 310]
-!186 = !{!"0x100\00xi\00310\000", !183, !5, !20} ; [ DW_TAG_auto_variable ] [xi] [line 310]
-!187 = !{!"0x100\00eta\00310\000", !183, !5, !20} ; [ DW_TAG_auto_variable ] [eta] [line 310]
-!188 = !{!"0x100\00zeta\00310\000", !183, !5, !20} ; [ DW_TAG_auto_variable ] [zeta] [line 310]
-!189 = !{!"0x100\00dtpp\00310\000", !183, !5, !20} ; [ DW_TAG_auto_variable ] [dtpp] [line 310]
-!190 = !{!"0x100\00m\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 311]
-!191 = !{!"0x100\00i\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 311]
-!192 = !{!"0x100\00j\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 311]
-!193 = !{!"0x100\00k\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 311]
-!194 = !{!"0x100\00ip1\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [ip1] [line 311]
-!195 = !{!"0x100\00im1\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [im1] [line 311]
-!196 = !{!"0x100\00jp1\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [jp1] [line 311]
-!197 = !{!"0x100\00jm1\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [jm1] [line 311]
-!198 = !{!"0x100\00km1\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [km1] [line 311]
-!199 = !{!"0x100\00kp1\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [kp1] [line 311]
-!200 = !{!"0x2e\00adi\00adi\00\00210\001\001\000\006\00256\001\00210", !1, !5, !115, null, null, null, null, !2} ; [ DW_TAG_subprogram ] [line 210] [local] [def] [adi]
-!201 = !{!"0x2e\00add\00add\00\00187\001\001\000\006\00256\001\00187", !1, !5, !115, null, null, null, null, !202} ; [ DW_TAG_subprogram ] [line 187] [local] [def] [add]
+!185 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "dtemp", line: 310, scope: !183, file: !5, type: !91)
+!186 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "xi", line: 310, scope: !183, file: !5, type: !20)
+!187 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "eta", line: 310, scope: !183, file: !5, type: !20)
+!188 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "zeta", line: 310, scope: !183, file: !5, type: !20)
+!189 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "dtpp", line: 310, scope: !183, file: !5, type: !20)
+!190 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "m", line: 311, scope: !183, file: !5, type: !8)
+!191 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 311, scope: !183, file: !5, type: !8)
+!192 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 311, scope: !183, file: !5, type: !8)
+!193 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 311, scope: !183, file: !5, type: !8)
+!194 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "ip1", line: 311, scope: !183, file: !5, type: !8)
+!195 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "im1", line: 311, scope: !183, file: !5, type: !8)
+!196 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "jp1", line: 311, scope: !183, file: !5, type: !8)
+!197 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "jm1", line: 311, scope: !183, file: !5, type: !8)
+!198 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "km1", line: 311, scope: !183, file: !5, type: !8)
+!199 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "kp1", line: 311, scope: !183, file: !5, type: !8)
+!200 = !DISubprogram(name: "adi", line: 210, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 210, file: !1, scope: !5, type: !115, variables: !2)
+!201 = !DISubprogram(name: "add", line: 187, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 187, file: !1, scope: !5, type: !115, variables: !202)
!202 = !{!203, !204, !205, !206}
-!203 = !{!"0x100\00i\00193\000", !201, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 193]
-!204 = !{!"0x100\00j\00193\000", !201, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 193]
-!205 = !{!"0x100\00k\00193\000", !201, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 193]
-!206 = !{!"0x100\00m\00193\000", !201, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 193]
-!207 = !{!"0x2e\00z_solve\00z_solve\00\003457\001\001\000\006\00256\001\003457", !1, !5, !115, null, null, null, null, !2} ; [ DW_TAG_subprogram ] [line 3457] [local] [def] [z_solve]
-!208 = !{!"0x2e\00z_backsubstitute\00z_backsubstitute\00\003480\001\001\000\006\00256\001\003480", !1, !5, !115, null, null, null, null, !209} ; [ DW_TAG_subprogram ] [line 3480] [local] [def] [z_backsubstitute]
+!203 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 193, scope: !201, file: !5, type: !8)
+!204 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 193, scope: !201, file: !5, type: !8)
+!205 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 193, scope: !201, file: !5, type: !8)
+!206 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "m", line: 193, scope: !201, file: !5, type: !8)
+!207 = !DISubprogram(name: "z_solve", line: 3457, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 3457, file: !1, scope: !5, type: !115, variables: !2)
+!208 = !DISubprogram(name: "z_backsubstitute", line: 3480, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 3480, file: !1, scope: !5, type: !115, variables: !209)
!209 = !{!210, !211, !212, !213, !214}
-!210 = !{!"0x100\00i\003492\000", !208, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 3492]
-!211 = !{!"0x100\00j\003492\000", !208, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 3492]
-!212 = !{!"0x100\00k\003492\000", !208, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 3492]
-!213 = !{!"0x100\00m\003492\000", !208, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 3492]
-!214 = !{!"0x100\00n\003492\000", !208, !5, !8} ; [ DW_TAG_auto_variable ] [n] [line 3492]
-!215 = !{!"0x2e\00z_solve_cell\00z_solve_cell\00\003512\001\001\000\006\00256\001\003512", !1, !5, !115, null, null, null, null, !216} ; [ DW_TAG_subprogram ] [line 3512] [local] [def] [z_solve_cell]
+!210 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 3492, scope: !208, file: !5, type: !8)
+!211 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 3492, scope: !208, file: !5, type: !8)
+!212 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 3492, scope: !208, file: !5, type: !8)
+!213 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "m", line: 3492, scope: !208, file: !5, type: !8)
+!214 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "n", line: 3492, scope: !208, file: !5, type: !8)
+!215 = !DISubprogram(name: "z_solve_cell", line: 3512, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 3512, file: !1, scope: !5, type: !115, variables: !216)
!216 = !{!217, !218, !219, !220}
-!217 = !{!"0x100\00i\003527\000", !215, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 3527]
-!218 = !{!"0x100\00j\003527\000", !215, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 3527]
-!219 = !{!"0x100\00k\003527\000", !215, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 3527]
-!220 = !{!"0x100\00ksize\003527\000", !215, !5, !8} ; [ DW_TAG_auto_variable ] [ksize] [line 3527]
-!221 = !{!"0x2e\00binvrhs\00binvrhs\00\003154\001\001\000\006\00256\001\003154", !1, !5, !222, null, null, null, null, !225} ; [ DW_TAG_subprogram ] [line 3154] [local] [def] [binvrhs]
-!222 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !223, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!217 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 3527, scope: !215, file: !5, type: !8)
+!218 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 3527, scope: !215, file: !5, type: !8)
+!219 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 3527, scope: !215, file: !5, type: !8)
+!220 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "ksize", line: 3527, scope: !215, file: !5, type: !8)
+!221 = !DISubprogram(name: "binvrhs", line: 3154, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 3154, file: !1, scope: !5, type: !222, variables: !225)
+!222 = !DISubroutineType(types: !223)
!223 = !{null, !224, !105}
-!224 = !{!"0xf\00\000\0064\0064\000\000", null, null, !91} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!224 = !DIDerivedType(tag: DW_TAG_pointer_type, size: 64, align: 64, baseType: !91)
!225 = !{!226, !227, !228, !229}
-!226 = !{!"0x101\00lhs\0016780370\000", !221, !5, !224} ; [ DW_TAG_arg_variable ] [lhs] [line 3154]
-!227 = !{!"0x101\00r\0033557586\000", !221, !5, !105} ; [ DW_TAG_arg_variable ] [r] [line 3154]
-!228 = !{!"0x100\00pivot\003159\000", !221, !5, !20} ; [ DW_TAG_auto_variable ] [pivot] [line 3159]
-!229 = !{!"0x100\00coeff\003159\000", !221, !5, !20} ; [ DW_TAG_auto_variable ] [coeff] [line 3159]
-!230 = !{!"0x2e\00matmul_sub\00matmul_sub\00\002841\001\001\000\006\00256\001\002842", !1, !5, !231, null, null, null, null, !233} ; [ DW_TAG_subprogram ] [line 2841] [local] [def] [scope 2842] [matmul_sub]
-!231 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !232, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!226 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "lhs", line: 3154, arg: 1, scope: !221, file: !5, type: !224)
+!227 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "r", line: 3154, arg: 2, scope: !221, file: !5, type: !105)
+!228 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "pivot", line: 3159, scope: !221, file: !5, type: !20)
+!229 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "coeff", line: 3159, scope: !221, file: !5, type: !20)
+!230 = !DISubprogram(name: "matmul_sub", line: 2841, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 2842, file: !1, scope: !5, type: !231, variables: !233)
+!231 = !DISubroutineType(types: !232)
!232 = !{null, !224, !224, !224}
!233 = !{!234, !235, !236, !237}
-!234 = !{!"0x101\00ablock\0016780057\000", !230, !5, !224} ; [ DW_TAG_arg_variable ] [ablock] [line 2841]
-!235 = !{!"0x101\00bblock\0033557273\000", !230, !5, !224} ; [ DW_TAG_arg_variable ] [bblock] [line 2841]
-!236 = !{!"0x101\00cblock\0050334490\000", !230, !5, !224} ; [ DW_TAG_arg_variable ] [cblock] [line 2842]
-!237 = !{!"0x100\00j\002851\000", !230, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 2851]
-!238 = !{!"0x2e\00matvec_sub\00matvec_sub\00\002814\001\001\000\006\00256\001\002814", !1, !5, !239, null, null, null, null, !241} ; [ DW_TAG_subprogram ] [line 2814] [local] [def] [matvec_sub]
-!239 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !240, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!234 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "ablock", line: 2841, arg: 1, scope: !230, file: !5, type: !224)
+!235 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "bblock", line: 2841, arg: 2, scope: !230, file: !5, type: !224)
+!236 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "cblock", line: 2842, arg: 3, scope: !230, file: !5, type: !224)
+!237 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 2851, scope: !230, file: !5, type: !8)
+!238 = !DISubprogram(name: "matvec_sub", line: 2814, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 2814, file: !1, scope: !5, type: !239, variables: !241)
+!239 = !DISubroutineType(types: !240)
!240 = !{null, !224, !105, !105}
!241 = !{!242, !243, !244, !245}
-!242 = !{!"0x101\00ablock\0016780030\000", !238, !5, !224} ; [ DW_TAG_arg_variable ] [ablock] [line 2814]
-!243 = !{!"0x101\00avec\0033557246\000", !238, !5, !105} ; [ DW_TAG_arg_variable ] [avec] [line 2814]
-!244 = !{!"0x101\00bvec\0050334462\000", !238, !5, !105} ; [ DW_TAG_arg_variable ] [bvec] [line 2814]
-!245 = !{!"0x100\00i\002823\000", !238, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 2823]
-!246 = !{!"0x2e\00binvcrhs\00binvcrhs\00\002885\001\001\000\006\00256\001\002885", !1, !5, !247, null, null, null, null, !249} ; [ DW_TAG_subprogram ] [line 2885] [local] [def] [binvcrhs]
-!247 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !248, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!242 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "ablock", line: 2814, arg: 1, scope: !238, file: !5, type: !224)
+!243 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "avec", line: 2814, arg: 2, scope: !238, file: !5, type: !105)
+!244 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "bvec", line: 2814, arg: 3, scope: !238, file: !5, type: !105)
+!245 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 2823, scope: !238, file: !5, type: !8)
+!246 = !DISubprogram(name: "binvcrhs", line: 2885, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 2885, file: !1, scope: !5, type: !247, variables: !249)
+!247 = !DISubroutineType(types: !248)
!248 = !{null, !224, !224, !105}
!249 = !{!250, !251, !252, !253, !254}
-!250 = !{!"0x101\00lhs\0016780101\000", !246, !5, !224} ; [ DW_TAG_arg_variable ] [lhs] [line 2885]
-!251 = !{!"0x101\00c\0033557317\000", !246, !5, !224} ; [ DW_TAG_arg_variable ] [c] [line 2885]
-!252 = !{!"0x101\00r\0050334533\000", !246, !5, !105} ; [ DW_TAG_arg_variable ] [r] [line 2885]
-!253 = !{!"0x100\00pivot\002890\000", !246, !5, !20} ; [ DW_TAG_auto_variable ] [pivot] [line 2890]
-!254 = !{!"0x100\00coeff\002890\000", !246, !5, !20} ; [ DW_TAG_auto_variable ] [coeff] [line 2890]
-!255 = !{!"0x2e\00lhsz\00lhsz\00\001475\001\001\000\006\00256\001\001475", !1, !5, !115, null, null, null, null, !256} ; [ DW_TAG_subprogram ] [line 1475] [local] [def] [lhsz]
+!250 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "lhs", line: 2885, arg: 1, scope: !246, file: !5, type: !224)
+!251 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "c", line: 2885, arg: 2, scope: !246, file: !5, type: !224)
+!252 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "r", line: 2885, arg: 3, scope: !246, file: !5, type: !105)
+!253 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "pivot", line: 2890, scope: !246, file: !5, type: !20)
+!254 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "coeff", line: 2890, scope: !246, file: !5, type: !20)
+!255 = !DISubprogram(name: "lhsz", line: 1475, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 1475, file: !1, scope: !5, type: !115, variables: !256)
!256 = !{!257, !258, !259}
-!257 = !{!"0x100\00i\001484\000", !255, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 1484]
-!258 = !{!"0x100\00j\001484\000", !255, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 1484]
-!259 = !{!"0x100\00k\001484\000", !255, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 1484]
-!260 = !{!"0x2e\00y_solve\00y_solve\00\003299\001\001\000\006\00256\001\003299", !1, !5, !115, null, null, null, null, !2} ; [ DW_TAG_subprogram ] [line 3299] [local] [def] [y_solve]
-!261 = !{!"0x2e\00y_backsubstitute\00y_backsubstitute\00\003323\001\001\000\006\00256\001\003323", !1, !5, !115, null, null, null, null, !262} ; [ DW_TAG_subprogram ] [line 3323] [local] [def] [y_backsubstitute]
+!257 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 1484, scope: !255, file: !5, type: !8)
+!258 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 1484, scope: !255, file: !5, type: !8)
+!259 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 1484, scope: !255, file: !5, type: !8)
+!260 = !DISubprogram(name: "y_solve", line: 3299, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 3299, file: !1, scope: !5, type: !115, variables: !2)
+!261 = !DISubprogram(name: "y_backsubstitute", line: 3323, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 3323, file: !1, scope: !5, type: !115, variables: !262)
!262 = !{!263, !264, !265, !266, !267}
-!263 = !{!"0x100\00i\003335\000", !261, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 3335]
-!264 = !{!"0x100\00j\003335\000", !261, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 3335]
-!265 = !{!"0x100\00k\003335\000", !261, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 3335]
-!266 = !{!"0x100\00m\003335\000", !261, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 3335]
-!267 = !{!"0x100\00n\003335\000", !261, !5, !8} ; [ DW_TAG_auto_variable ] [n] [line 3335]
-!268 = !{!"0x2e\00y_solve_cell\00y_solve_cell\00\003355\001\001\000\006\00256\001\003355", !1, !5, !115, null, null, null, null, !269} ; [ DW_TAG_subprogram ] [line 3355] [local] [def] [y_solve_cell]
+!263 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 3335, scope: !261, file: !5, type: !8)
+!264 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 3335, scope: !261, file: !5, type: !8)
+!265 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 3335, scope: !261, file: !5, type: !8)
+!266 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "m", line: 3335, scope: !261, file: !5, type: !8)
+!267 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "n", line: 3335, scope: !261, file: !5, type: !8)
+!268 = !DISubprogram(name: "y_solve_cell", line: 3355, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 3355, file: !1, scope: !5, type: !115, variables: !269)
!269 = !{!270, !271, !272, !273}
-!270 = !{!"0x100\00i\003370\000", !268, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 3370]
-!271 = !{!"0x100\00j\003370\000", !268, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 3370]
-!272 = !{!"0x100\00k\003370\000", !268, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 3370]
-!273 = !{!"0x100\00jsize\003370\000", !268, !5, !8} ; [ DW_TAG_auto_variable ] [jsize] [line 3370]
-!274 = !{!"0x2e\00lhsy\00lhsy\00\001181\001\001\000\006\00256\001\001181", !1, !5, !115, null, null, null, null, !275} ; [ DW_TAG_subprogram ] [line 1181] [local] [def] [lhsy]
+!270 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 3370, scope: !268, file: !5, type: !8)
+!271 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 3370, scope: !268, file: !5, type: !8)
+!272 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 3370, scope: !268, file: !5, type: !8)
+!273 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "jsize", line: 3370, scope: !268, file: !5, type: !8)
+!274 = !DISubprogram(name: "lhsy", line: 1181, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 1181, file: !1, scope: !5, type: !115, variables: !275)
!275 = !{!276, !277, !278}
-!276 = !{!"0x100\00i\001190\000", !274, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 1190]
-!277 = !{!"0x100\00j\001190\000", !274, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 1190]
-!278 = !{!"0x100\00k\001190\000", !274, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 1190]
-!279 = !{!"0x2e\00x_solve\00x_solve\00\002658\001\001\000\006\00256\001\002658", !1, !5, !115, null, null, null, null, !2} ; [ DW_TAG_subprogram ] [line 2658] [local] [def] [x_solve]
-!280 = !{!"0x2e\00x_backsubstitute\00x_backsubstitute\00\002684\001\001\000\006\00256\001\002684", !1, !5, !115, null, null, null, null, !281} ; [ DW_TAG_subprogram ] [line 2684] [local] [def] [x_backsubstitute]
+!276 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 1190, scope: !274, file: !5, type: !8)
+!277 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 1190, scope: !274, file: !5, type: !8)
+!278 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 1190, scope: !274, file: !5, type: !8)
+!279 = !DISubprogram(name: "x_solve", line: 2658, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 2658, file: !1, scope: !5, type: !115, variables: !2)
+!280 = !DISubprogram(name: "x_backsubstitute", line: 2684, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 2684, file: !1, scope: !5, type: !115, variables: !281)
!281 = !{!282, !283, !284, !285, !286}
-!282 = !{!"0x100\00i\002696\000", !280, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 2696]
-!283 = !{!"0x100\00j\002696\000", !280, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 2696]
-!284 = !{!"0x100\00k\002696\000", !280, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 2696]
-!285 = !{!"0x100\00m\002696\000", !280, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 2696]
-!286 = !{!"0x100\00n\002696\000", !280, !5, !8} ; [ DW_TAG_auto_variable ] [n] [line 2696]
-!287 = !{!"0x2e\00x_solve_cell\00x_solve_cell\00\002716\001\001\000\006\00256\001\002716", !1, !5, !115, null, null, null, null, !288} ; [ DW_TAG_subprogram ] [line 2716] [local] [def] [x_solve_cell]
+!282 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 2696, scope: !280, file: !5, type: !8)
+!283 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 2696, scope: !280, file: !5, type: !8)
+!284 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 2696, scope: !280, file: !5, type: !8)
+!285 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "m", line: 2696, scope: !280, file: !5, type: !8)
+!286 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "n", line: 2696, scope: !280, file: !5, type: !8)
+!287 = !DISubprogram(name: "x_solve_cell", line: 2716, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 2716, file: !1, scope: !5, type: !115, variables: !288)
!288 = !{!289, !290, !291, !292}
-!289 = !{!"0x100\00i\002728\000", !287, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 2728]
-!290 = !{!"0x100\00j\002728\000", !287, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 2728]
-!291 = !{!"0x100\00k\002728\000", !287, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 2728]
-!292 = !{!"0x100\00isize\002728\000", !287, !5, !8} ; [ DW_TAG_auto_variable ] [isize] [line 2728]
-!293 = !{!"0x2e\00lhsx\00lhsx\00\00898\001\001\000\006\00256\001\00898", !1, !5, !115, null, null, null, null, !294} ; [ DW_TAG_subprogram ] [line 898] [local] [def] [lhsx]
+!289 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 2728, scope: !287, file: !5, type: !8)
+!290 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 2728, scope: !287, file: !5, type: !8)
+!291 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 2728, scope: !287, file: !5, type: !8)
+!292 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "isize", line: 2728, scope: !287, file: !5, type: !8)
+!293 = !DISubprogram(name: "lhsx", line: 898, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 898, file: !1, scope: !5, type: !115, variables: !294)
!294 = !{!295, !296, !297}
-!295 = !{!"0x100\00i\00907\000", !293, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 907]
-!296 = !{!"0x100\00j\00907\000", !293, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 907]
-!297 = !{!"0x100\00k\00907\000", !293, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 907]
+!295 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 907, scope: !293, file: !5, type: !8)
+!296 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "j", line: 907, scope: !293, file: !5, type: !8)
+!297 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "k", line: 907, scope: !293, file: !5, type: !8)
!298 = !{!299, !304, !305, !309, !310, !311, !312, !313, !314, !315, !316, !317, !318, !319, !320, !321, !322, !323, !324, !325, !326, !327, !328, !329, !330, !331, !332, !333, !334, !335, !336, !337, !338, !339, !340, !341, !342, !343, !347, !350, !351, !352, !353, !354, !355, !356, !360, !361, !362, !363, !364, !365, !366, !367, !368, !369, !370, !371, !372, !373, !374, !375, !376, !377, !378, !379, !380, !381, !382, !383, !384, !385, !386, !387, !388, !389, !390, !391, !392, !393, !394, !395, !396, !397, !398, !399, !400, !401, !402, !403, !404, !405, !406, !407, !408, !409, !410, !411, !412, !413, !414, !415, !416, !417, !418, !419, !422, !426, !427, !430, !431, !434, !435, !436, !437}
-!299 = !{!"0x34\00grid_points\00grid_points\00\0028\001\001", null, !300, !302, [3 x i32]* @grid_points, null} ; [ DW_TAG_variable ] [grid_points] [line 28] [local] [def]
-!300 = !{!"0x29", !301} ; [ DW_TAG_file_type ] [/home/hfinkel/src/NPB2.3-omp-C/BT/./header.h]
+!299 = !DIGlobalVariable(name: "grid_points", line: 28, isLocal: true, isDefinition: true, scope: null, file: !300, type: !302, variable: [3 x i32]* @grid_points)
+!300 = !DIFile(filename: "./header.h", directory: "/home/hfinkel/src/NPB2.3-omp-C/BT")
!301 = !{!"./header.h", !"/home/hfinkel/src/NPB2.3-omp-C/BT"}
-!302 = !{!"0x1\00\000\0096\0032\000\000", null, null, !8, !303, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 96, align 32, offset 0] [from int]
+!302 = !DICompositeType(tag: DW_TAG_array_type, size: 96, align: 32, baseType: !8, elements: !303)
!303 = !{!178}
-!304 = !{!"0x34\00dt\00dt\00\0035\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dt] [line 35] [local] [def]
-!305 = !{!"0x34\00rhs\00rhs\00\0068\001\001", null, !300, !306, null, null} ; [ DW_TAG_variable ] [rhs] [line 68] [local] [def]
-!306 = !{!"0x1\00\000\001385839040\0064\000\000", null, null, !20, !307, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 1385839040, align 64, offset 0] [from double]
+!304 = !DIGlobalVariable(name: "dt", line: 35, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!305 = !DIGlobalVariable(name: "rhs", line: 68, isLocal: true, isDefinition: true, scope: null, file: !300, type: !306)
+!306 = !DICompositeType(tag: DW_TAG_array_type, size: 1385839040, align: 64, baseType: !20, elements: !307)
!307 = !{!308, !308, !308, !93}
-!308 = !{!"0x21\000\00163"} ; [ DW_TAG_subrange_type ] [0, 162]
-!309 = !{!"0x34\00zzcon5\00zzcon5\00\0042\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [zzcon5] [line 42] [local] [def]
-!310 = !{!"0x34\00zzcon4\00zzcon4\00\0042\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [zzcon4] [line 42] [local] [def]
-!311 = !{!"0x34\00zzcon3\00zzcon3\00\0042\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [zzcon3] [line 42] [local] [def]
-!312 = !{!"0x34\00dz5tz1\00dz5tz1\00\0043\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz5tz1] [line 43] [local] [def]
-!313 = !{!"0x34\00dz4tz1\00dz4tz1\00\0043\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz4tz1] [line 43] [local] [def]
-!314 = !{!"0x34\00dz3tz1\00dz3tz1\00\0043\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz3tz1] [line 43] [local] [def]
-!315 = !{!"0x34\00zzcon2\00zzcon2\00\0042\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [zzcon2] [line 42] [local] [def]
-!316 = !{!"0x34\00dz2tz1\00dz2tz1\00\0043\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz2tz1] [line 43] [local] [def]
-!317 = !{!"0x34\00tz2\00tz2\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tz2] [line 31] [local] [def]
-!318 = !{!"0x34\00dz1tz1\00dz1tz1\00\0043\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz1tz1] [line 43] [local] [def]
-!319 = !{!"0x34\00yycon5\00yycon5\00\0040\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [yycon5] [line 40] [local] [def]
-!320 = !{!"0x34\00yycon4\00yycon4\00\0040\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [yycon4] [line 40] [local] [def]
-!321 = !{!"0x34\00yycon3\00yycon3\00\0040\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [yycon3] [line 40] [local] [def]
-!322 = !{!"0x34\00dy5ty1\00dy5ty1\00\0041\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy5ty1] [line 41] [local] [def]
-!323 = !{!"0x34\00dy4ty1\00dy4ty1\00\0041\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy4ty1] [line 41] [local] [def]
-!324 = !{!"0x34\00dy3ty1\00dy3ty1\00\0041\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy3ty1] [line 41] [local] [def]
-!325 = !{!"0x34\00yycon2\00yycon2\00\0040\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [yycon2] [line 40] [local] [def]
-!326 = !{!"0x34\00dy2ty1\00dy2ty1\00\0041\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy2ty1] [line 41] [local] [def]
-!327 = !{!"0x34\00ty2\00ty2\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [ty2] [line 31] [local] [def]
-!328 = !{!"0x34\00dy1ty1\00dy1ty1\00\0041\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy1ty1] [line 41] [local] [def]
-!329 = !{!"0x34\00dssp\00dssp\00\0035\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dssp] [line 35] [local] [def]
-!330 = !{!"0x34\00c1\00c1\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c1] [line 45] [local] [def]
-!331 = !{!"0x34\00xxcon5\00xxcon5\00\0038\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [xxcon5] [line 38] [local] [def]
-!332 = !{!"0x34\00xxcon4\00xxcon4\00\0038\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [xxcon4] [line 38] [local] [def]
-!333 = !{!"0x34\00xxcon3\00xxcon3\00\0038\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [xxcon3] [line 38] [local] [def]
-!334 = !{!"0x34\00dx5tx1\00dx5tx1\00\0039\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx5tx1] [line 39] [local] [def]
-!335 = !{!"0x34\00dx4tx1\00dx4tx1\00\0039\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx4tx1] [line 39] [local] [def]
-!336 = !{!"0x34\00dx3tx1\00dx3tx1\00\0039\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx3tx1] [line 39] [local] [def]
-!337 = !{!"0x34\00c2\00c2\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c2] [line 45] [local] [def]
-!338 = !{!"0x34\00con43\00con43\00\0048\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [con43] [line 48] [local] [def]
-!339 = !{!"0x34\00xxcon2\00xxcon2\00\0038\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [xxcon2] [line 38] [local] [def]
-!340 = !{!"0x34\00dx2tx1\00dx2tx1\00\0039\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx2tx1] [line 39] [local] [def]
-!341 = !{!"0x34\00tx2\00tx2\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tx2] [line 31] [local] [def]
-!342 = !{!"0x34\00dx1tx1\00dx1tx1\00\0039\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx1tx1] [line 39] [local] [def]
-!343 = !{!"0x34\00forcing\00forcing\00\0066\001\001", null, !300, !344, null, null} ; [ DW_TAG_variable ] [forcing] [line 66] [local] [def]
-!344 = !{!"0x1\00\000\001663006848\0064\000\000", null, null, !20, !345, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 1663006848, align 64, offset 0] [from double]
+!308 = !DISubrange(count: 163)
+!309 = !DIGlobalVariable(name: "zzcon5", line: 42, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!310 = !DIGlobalVariable(name: "zzcon4", line: 42, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!311 = !DIGlobalVariable(name: "zzcon3", line: 42, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!312 = !DIGlobalVariable(name: "dz5tz1", line: 43, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!313 = !DIGlobalVariable(name: "dz4tz1", line: 43, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!314 = !DIGlobalVariable(name: "dz3tz1", line: 43, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!315 = !DIGlobalVariable(name: "zzcon2", line: 42, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!316 = !DIGlobalVariable(name: "dz2tz1", line: 43, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!317 = !DIGlobalVariable(name: "tz2", line: 31, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!318 = !DIGlobalVariable(name: "dz1tz1", line: 43, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!319 = !DIGlobalVariable(name: "yycon5", line: 40, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!320 = !DIGlobalVariable(name: "yycon4", line: 40, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!321 = !DIGlobalVariable(name: "yycon3", line: 40, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!322 = !DIGlobalVariable(name: "dy5ty1", line: 41, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!323 = !DIGlobalVariable(name: "dy4ty1", line: 41, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!324 = !DIGlobalVariable(name: "dy3ty1", line: 41, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!325 = !DIGlobalVariable(name: "yycon2", line: 40, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!326 = !DIGlobalVariable(name: "dy2ty1", line: 41, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!327 = !DIGlobalVariable(name: "ty2", line: 31, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!328 = !DIGlobalVariable(name: "dy1ty1", line: 41, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!329 = !DIGlobalVariable(name: "dssp", line: 35, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!330 = !DIGlobalVariable(name: "c1", line: 45, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!331 = !DIGlobalVariable(name: "xxcon5", line: 38, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!332 = !DIGlobalVariable(name: "xxcon4", line: 38, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!333 = !DIGlobalVariable(name: "xxcon3", line: 38, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!334 = !DIGlobalVariable(name: "dx5tx1", line: 39, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!335 = !DIGlobalVariable(name: "dx4tx1", line: 39, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!336 = !DIGlobalVariable(name: "dx3tx1", line: 39, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!337 = !DIGlobalVariable(name: "c2", line: 45, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!338 = !DIGlobalVariable(name: "con43", line: 48, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!339 = !DIGlobalVariable(name: "xxcon2", line: 38, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!340 = !DIGlobalVariable(name: "dx2tx1", line: 39, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!341 = !DIGlobalVariable(name: "tx2", line: 31, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!342 = !DIGlobalVariable(name: "dx1tx1", line: 39, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!343 = !DIGlobalVariable(name: "forcing", line: 66, isLocal: true, isDefinition: true, scope: null, file: !300, type: !344)
+!344 = !DICompositeType(tag: DW_TAG_array_type, size: 1663006848, align: 64, baseType: !20, elements: !345)
!345 = !{!308, !308, !308, !346}
-!346 = !{!"0x21\000\006"} ; [ DW_TAG_subrange_type ] [0, 5]
-!347 = !{!"0x34\00qs\00qs\00\0063\001\001", null, !300, !348, null, null} ; [ DW_TAG_variable ] [qs] [line 63] [local] [def]
-!348 = !{!"0x1\00\000\00277167808\0064\000\000", null, null, !20, !349, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 277167808, align 64, offset 0] [from double]
+!346 = !DISubrange(count: 6)
+!347 = !DIGlobalVariable(name: "qs", line: 63, isLocal: true, isDefinition: true, scope: null, file: !300, type: !348)
+!348 = !DICompositeType(tag: DW_TAG_array_type, size: 277167808, align: 64, baseType: !20, elements: !349)
!349 = !{!308, !308, !308}
-!350 = !{!"0x34\00square\00square\00\0065\001\001", null, !300, !348, null, null} ; [ DW_TAG_variable ] [square] [line 65] [local] [def]
-!351 = !{!"0x34\00ws\00ws\00\0062\001\001", null, !300, !348, null, null} ; [ DW_TAG_variable ] [ws] [line 62] [local] [def]
-!352 = !{!"0x34\00vs\00vs\00\0061\001\001", null, !300, !348, null, null} ; [ DW_TAG_variable ] [vs] [line 61] [local] [def]
-!353 = !{!"0x34\00us\00us\00\0060\001\001", null, !300, !348, null, null} ; [ DW_TAG_variable ] [us] [line 60] [local] [def]
-!354 = !{!"0x34\00rho_i\00rho_i\00\0064\001\001", null, !300, !348, null, null} ; [ DW_TAG_variable ] [rho_i] [line 64] [local] [def]
-!355 = !{!"0x34\00u\00u\00\0067\001\001", null, !300, !306, null, null} ; [ DW_TAG_variable ] [u] [line 67] [local] [def]
-!356 = !{!"0x34\00ce\00ce\00\0036\001\001", null, !300, !357, null, null} ; [ DW_TAG_variable ] [ce] [line 36] [local] [def]
-!357 = !{!"0x1\00\000\004160\0064\000\000", null, null, !20, !358, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 4160, align 64, offset 0] [from double]
+!350 = !DIGlobalVariable(name: "square", line: 65, isLocal: true, isDefinition: true, scope: null, file: !300, type: !348)
+!351 = !DIGlobalVariable(name: "ws", line: 62, isLocal: true, isDefinition: true, scope: null, file: !300, type: !348)
+!352 = !DIGlobalVariable(name: "vs", line: 61, isLocal: true, isDefinition: true, scope: null, file: !300, type: !348)
+!353 = !DIGlobalVariable(name: "us", line: 60, isLocal: true, isDefinition: true, scope: null, file: !300, type: !348)
+!354 = !DIGlobalVariable(name: "rho_i", line: 64, isLocal: true, isDefinition: true, scope: null, file: !300, type: !348)
+!355 = !DIGlobalVariable(name: "u", line: 67, isLocal: true, isDefinition: true, scope: null, file: !300, type: !306)
+!356 = !DIGlobalVariable(name: "ce", line: 36, isLocal: true, isDefinition: true, scope: null, file: !300, type: !357)
+!357 = !DICompositeType(tag: DW_TAG_array_type, size: 4160, align: 64, baseType: !20, elements: !358)
!358 = !{!93, !359}
-!359 = !{!"0x21\000\0013"} ; [ DW_TAG_subrange_type ] [0, 12]
-!360 = !{!"0x34\00dnzm1\00dnzm1\00\0044\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dnzm1] [line 44] [local] [def]
-!361 = !{!"0x34\00dnym1\00dnym1\00\0044\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dnym1] [line 44] [local] [def]
-!362 = !{!"0x34\00dnxm1\00dnxm1\00\0044\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dnxm1] [line 44] [local] [def]
-!363 = !{!"0x34\00zzcon1\00zzcon1\00\0042\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [zzcon1] [line 42] [local] [def]
-!364 = !{!"0x34\00yycon1\00yycon1\00\0040\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [yycon1] [line 40] [local] [def]
-!365 = !{!"0x34\00xxcon1\00xxcon1\00\0038\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [xxcon1] [line 38] [local] [def]
-!366 = !{!"0x34\00con16\00con16\00\0048\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [con16] [line 48] [local] [def]
-!367 = !{!"0x34\00c2iv\00c2iv\00\0048\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c2iv] [line 48] [local] [def]
-!368 = !{!"0x34\00c3c4tz3\00c3c4tz3\00\0048\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c3c4tz3] [line 48] [local] [def]
-!369 = !{!"0x34\00c3c4ty3\00c3c4ty3\00\0048\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c3c4ty3] [line 48] [local] [def]
-!370 = !{!"0x34\00c3c4tx3\00c3c4tx3\00\0048\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c3c4tx3] [line 48] [local] [def]
-!371 = !{!"0x34\00comz6\00comz6\00\0047\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [comz6] [line 47] [local] [def]
-!372 = !{!"0x34\00comz5\00comz5\00\0047\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [comz5] [line 47] [local] [def]
-!373 = !{!"0x34\00comz4\00comz4\00\0047\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [comz4] [line 47] [local] [def]
-!374 = !{!"0x34\00comz1\00comz1\00\0047\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [comz1] [line 47] [local] [def]
-!375 = !{!"0x34\00dtdssp\00dtdssp\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dtdssp] [line 45] [local] [def]
-!376 = !{!"0x34\00c2dttz1\00c2dttz1\00\0047\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c2dttz1] [line 47] [local] [def]
-!377 = !{!"0x34\00c2dtty1\00c2dtty1\00\0047\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c2dtty1] [line 47] [local] [def]
-!378 = !{!"0x34\00c2dttx1\00c2dttx1\00\0047\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c2dttx1] [line 47] [local] [def]
-!379 = !{!"0x34\00dttz2\00dttz2\00\0046\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dttz2] [line 46] [local] [def]
-!380 = !{!"0x34\00dttz1\00dttz1\00\0046\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dttz1] [line 46] [local] [def]
-!381 = !{!"0x34\00dtty2\00dtty2\00\0046\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dtty2] [line 46] [local] [def]
-!382 = !{!"0x34\00dtty1\00dtty1\00\0046\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dtty1] [line 46] [local] [def]
-!383 = !{!"0x34\00dttx2\00dttx2\00\0046\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dttx2] [line 46] [local] [def]
-!384 = !{!"0x34\00dttx1\00dttx1\00\0046\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dttx1] [line 46] [local] [def]
-!385 = !{!"0x34\00c5dssp\00c5dssp\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c5dssp] [line 45] [local] [def]
-!386 = !{!"0x34\00c4dssp\00c4dssp\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c4dssp] [line 45] [local] [def]
-!387 = !{!"0x34\00dzmax\00dzmax\00\0037\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dzmax] [line 37] [local] [def]
-!388 = !{!"0x34\00dymax\00dymax\00\0037\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dymax] [line 37] [local] [def]
-!389 = !{!"0x34\00dxmax\00dxmax\00\0037\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dxmax] [line 37] [local] [def]
-!390 = !{!"0x34\00dz5\00dz5\00\0034\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz5] [line 34] [local] [def]
-!391 = !{!"0x34\00dz4\00dz4\00\0034\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz4] [line 34] [local] [def]
-!392 = !{!"0x34\00dz3\00dz3\00\0034\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz3] [line 34] [local] [def]
-!393 = !{!"0x34\00dz2\00dz2\00\0034\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz2] [line 34] [local] [def]
-!394 = !{!"0x34\00dz1\00dz1\00\0034\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz1] [line 34] [local] [def]
-!395 = !{!"0x34\00dy5\00dy5\00\0033\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy5] [line 33] [local] [def]
-!396 = !{!"0x34\00dy4\00dy4\00\0033\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy4] [line 33] [local] [def]
-!397 = !{!"0x34\00dy3\00dy3\00\0033\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy3] [line 33] [local] [def]
-!398 = !{!"0x34\00dy2\00dy2\00\0033\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy2] [line 33] [local] [def]
-!399 = !{!"0x34\00dy1\00dy1\00\0033\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy1] [line 33] [local] [def]
-!400 = !{!"0x34\00dx5\00dx5\00\0032\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx5] [line 32] [local] [def]
-!401 = !{!"0x34\00dx4\00dx4\00\0032\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx4] [line 32] [local] [def]
-!402 = !{!"0x34\00dx3\00dx3\00\0032\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx3] [line 32] [local] [def]
-!403 = !{!"0x34\00dx2\00dx2\00\0032\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx2] [line 32] [local] [def]
-!404 = !{!"0x34\00dx1\00dx1\00\0032\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx1] [line 32] [local] [def]
-!405 = !{!"0x34\00tz3\00tz3\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tz3] [line 31] [local] [def]
-!406 = !{!"0x34\00tz1\00tz1\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tz1] [line 31] [local] [def]
-!407 = !{!"0x34\00ty3\00ty3\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [ty3] [line 31] [local] [def]
-!408 = !{!"0x34\00ty1\00ty1\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [ty1] [line 31] [local] [def]
-!409 = !{!"0x34\00tx3\00tx3\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tx3] [line 31] [local] [def]
-!410 = !{!"0x34\00tx1\00tx1\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tx1] [line 31] [local] [def]
-!411 = !{!"0x34\00conz1\00conz1\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [conz1] [line 45] [local] [def]
-!412 = !{!"0x34\00c1345\00c1345\00\0044\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c1345] [line 44] [local] [def]
-!413 = !{!"0x34\00c3c4\00c3c4\00\0044\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c3c4] [line 44] [local] [def]
-!414 = !{!"0x34\00c1c5\00c1c5\00\0044\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c1c5] [line 44] [local] [def]
-!415 = !{!"0x34\00c1c2\00c1c2\00\0044\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c1c2] [line 44] [local] [def]
-!416 = !{!"0x34\00c5\00c5\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c5] [line 45] [local] [def]
-!417 = !{!"0x34\00c4\00c4\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c4] [line 45] [local] [def]
-!418 = !{!"0x34\00c3\00c3\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c3] [line 45] [local] [def]
-!419 = !{!"0x34\00lhs\00lhs\00\0069\001\001", null, !300, !420, null, null} ; [ DW_TAG_variable ] [lhs] [line 69] [local] [def]
-!420 = !{!"0x1\00\000\0020787585600\0064\000\000", null, null, !20, !421, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 20787585600, align 64, offset 0] [from double]
+!359 = !DISubrange(count: 13)
+!360 = !DIGlobalVariable(name: "dnzm1", line: 44, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!361 = !DIGlobalVariable(name: "dnym1", line: 44, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!362 = !DIGlobalVariable(name: "dnxm1", line: 44, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!363 = !DIGlobalVariable(name: "zzcon1", line: 42, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!364 = !DIGlobalVariable(name: "yycon1", line: 40, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!365 = !DIGlobalVariable(name: "xxcon1", line: 38, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!366 = !DIGlobalVariable(name: "con16", line: 48, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!367 = !DIGlobalVariable(name: "c2iv", line: 48, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!368 = !DIGlobalVariable(name: "c3c4tz3", line: 48, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!369 = !DIGlobalVariable(name: "c3c4ty3", line: 48, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!370 = !DIGlobalVariable(name: "c3c4tx3", line: 48, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!371 = !DIGlobalVariable(name: "comz6", line: 47, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!372 = !DIGlobalVariable(name: "comz5", line: 47, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!373 = !DIGlobalVariable(name: "comz4", line: 47, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!374 = !DIGlobalVariable(name: "comz1", line: 47, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!375 = !DIGlobalVariable(name: "dtdssp", line: 45, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!376 = !DIGlobalVariable(name: "c2dttz1", line: 47, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!377 = !DIGlobalVariable(name: "c2dtty1", line: 47, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!378 = !DIGlobalVariable(name: "c2dttx1", line: 47, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!379 = !DIGlobalVariable(name: "dttz2", line: 46, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!380 = !DIGlobalVariable(name: "dttz1", line: 46, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!381 = !DIGlobalVariable(name: "dtty2", line: 46, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!382 = !DIGlobalVariable(name: "dtty1", line: 46, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!383 = !DIGlobalVariable(name: "dttx2", line: 46, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!384 = !DIGlobalVariable(name: "dttx1", line: 46, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!385 = !DIGlobalVariable(name: "c5dssp", line: 45, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!386 = !DIGlobalVariable(name: "c4dssp", line: 45, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!387 = !DIGlobalVariable(name: "dzmax", line: 37, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!388 = !DIGlobalVariable(name: "dymax", line: 37, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!389 = !DIGlobalVariable(name: "dxmax", line: 37, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!390 = !DIGlobalVariable(name: "dz5", line: 34, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!391 = !DIGlobalVariable(name: "dz4", line: 34, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!392 = !DIGlobalVariable(name: "dz3", line: 34, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!393 = !DIGlobalVariable(name: "dz2", line: 34, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!394 = !DIGlobalVariable(name: "dz1", line: 34, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!395 = !DIGlobalVariable(name: "dy5", line: 33, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!396 = !DIGlobalVariable(name: "dy4", line: 33, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!397 = !DIGlobalVariable(name: "dy3", line: 33, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!398 = !DIGlobalVariable(name: "dy2", line: 33, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!399 = !DIGlobalVariable(name: "dy1", line: 33, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!400 = !DIGlobalVariable(name: "dx5", line: 32, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!401 = !DIGlobalVariable(name: "dx4", line: 32, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!402 = !DIGlobalVariable(name: "dx3", line: 32, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!403 = !DIGlobalVariable(name: "dx2", line: 32, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!404 = !DIGlobalVariable(name: "dx1", line: 32, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!405 = !DIGlobalVariable(name: "tz3", line: 31, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!406 = !DIGlobalVariable(name: "tz1", line: 31, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!407 = !DIGlobalVariable(name: "ty3", line: 31, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!408 = !DIGlobalVariable(name: "ty1", line: 31, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!409 = !DIGlobalVariable(name: "tx3", line: 31, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!410 = !DIGlobalVariable(name: "tx1", line: 31, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!411 = !DIGlobalVariable(name: "conz1", line: 45, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!412 = !DIGlobalVariable(name: "c1345", line: 44, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!413 = !DIGlobalVariable(name: "c3c4", line: 44, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!414 = !DIGlobalVariable(name: "c1c5", line: 44, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!415 = !DIGlobalVariable(name: "c1c2", line: 44, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!416 = !DIGlobalVariable(name: "c5", line: 45, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!417 = !DIGlobalVariable(name: "c4", line: 45, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!418 = !DIGlobalVariable(name: "c3", line: 45, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!419 = !DIGlobalVariable(name: "lhs", line: 69, isLocal: true, isDefinition: true, scope: null, file: !300, type: !420)
+!420 = !DICompositeType(tag: DW_TAG_array_type, size: 20787585600, align: 64, baseType: !20, elements: !421)
!421 = !{!308, !308, !308, !178, !93, !93}
-!422 = !{!"0x34\00q\00q\00\0073\001\001", null, !300, !423, null, null} ; [ DW_TAG_variable ] [q] [line 73] [local] [def]
-!423 = !{!"0x1\00\000\0010368\0064\000\000", null, null, !20, !424, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 10368, align 64, offset 0] [from double]
+!422 = !DIGlobalVariable(name: "q", line: 73, isLocal: true, isDefinition: true, scope: null, file: !300, type: !423)
+!423 = !DICompositeType(tag: DW_TAG_array_type, size: 10368, align: 64, baseType: !20, elements: !424)
!424 = !{!425}
-!425 = !{!"0x21\000\00162"} ; [ DW_TAG_subrange_type ] [0, 161]
-!426 = !{!"0x34\00cuf\00cuf\00\0072\001\001", null, !300, !423, null, null} ; [ DW_TAG_variable ] [cuf] [line 72] [local] [def]
-!427 = !{!"0x34\00buf\00buf\00\0075\001\001", null, !300, !428, null, null} ; [ DW_TAG_variable ] [buf] [line 75] [local] [def]
-!428 = !{!"0x1\00\000\0051840\0064\000\000", null, null, !20, !429, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 51840, align 64, offset 0] [from double]
+!425 = !DISubrange(count: 162)
+!426 = !DIGlobalVariable(name: "cuf", line: 72, isLocal: true, isDefinition: true, scope: null, file: !300, type: !423)
+!427 = !DIGlobalVariable(name: "buf", line: 75, isLocal: true, isDefinition: true, scope: null, file: !300, type: !428)
+!428 = !DICompositeType(tag: DW_TAG_array_type, size: 51840, align: 64, baseType: !20, elements: !429)
!429 = !{!425, !93}
-!430 = !{!"0x34\00ue\00ue\00\0074\001\001", null, !300, !428, null, null} ; [ DW_TAG_variable ] [ue] [line 74] [local] [def]
-!431 = !{!"0x34\00njac\00njac\00\0086\001\001", null, !300, !432, null, null} ; [ DW_TAG_variable ] [njac] [line 86] [local] [def]
-!432 = !{!"0x1\00\000\006886684800\0064\000\000", null, null, !20, !433, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 6886684800, align 64, offset 0] [from double]
+!430 = !DIGlobalVariable(name: "ue", line: 74, isLocal: true, isDefinition: true, scope: null, file: !300, type: !428)
+!431 = !DIGlobalVariable(name: "njac", line: 86, isLocal: true, isDefinition: true, scope: null, file: !300, type: !432)
+!432 = !DICompositeType(tag: DW_TAG_array_type, size: 6886684800, align: 64, baseType: !20, elements: !433)
!433 = !{!308, !308, !425, !93, !93}
-!434 = !{!"0x34\00fjac\00fjac\00\0084\001\001", null, !300, !432, null, null} ; [ DW_TAG_variable ] [fjac] [line 84] [local] [def]
-!435 = !{!"0x34\00tmp3\00tmp3\00\0088\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tmp3] [line 88] [local] [def]
-!436 = !{!"0x34\00tmp2\00tmp2\00\0088\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tmp2] [line 88] [local] [def]
-!437 = !{!"0x34\00tmp1\00tmp1\00\0088\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tmp1] [line 88] [local] [def]
+!434 = !DIGlobalVariable(name: "fjac", line: 84, isLocal: true, isDefinition: true, scope: null, file: !300, type: !432)
+!435 = !DIGlobalVariable(name: "tmp3", line: 88, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!436 = !DIGlobalVariable(name: "tmp2", line: 88, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
+!437 = !DIGlobalVariable(name: "tmp1", line: 88, isLocal: true, isDefinition: true, scope: null, file: !300, type: !20)
!438 = !{i32 2, !"Dwarf Version", i32 4}
-!439 = !MDLocation(line: 1898, scope: !440)
-!440 = !{!"0xb\001898\000\00107", !1, !114} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!441 = !MDLocation(line: 1913, scope: !442)
-!442 = !{!"0xb\001913\000\00115", !1, !114} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!443 = !MDLocation(line: 1923, scope: !114)
+!439 = !DILocation(line: 1898, scope: !440)
+!440 = distinct !DILexicalBlock(line: 1898, column: 0, file: !1, scope: !114)
+!441 = !DILocation(line: 1913, scope: !442)
+!442 = distinct !DILexicalBlock(line: 1913, column: 0, file: !1, scope: !114)
+!443 = !DILocation(line: 1923, scope: !114)
!444 = !{!"int", !445}
!445 = !{!"omnipotent char", !446}
!446 = !{!"Simple C/C++ TBAA"}
!447 = !{i32 1}
-!448 = !MDLocation(line: 1925, scope: !449)
-!449 = !{!"0xb\001925\000\00121", !1, !114} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!450 = !MDLocation(line: 1939, scope: !451)
-!451 = !{!"0xb\001939\000\00127", !1, !114} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!452 = !MDLocation(line: 1940, scope: !453)
-!453 = !{!"0xb\001940\000\00129", !1, !454} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!454 = !{!"0xb\001939\000\00128", !1, !451} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!455 = !MDLocation(line: 1941, scope: !456)
-!456 = !{!"0xb\001941\000\00131", !1, !457} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!457 = !{!"0xb\001940\000\00130", !1, !453} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!458 = !MDLocation(line: 2020, scope: !459)
-!459 = !{!"0xb\002020\000\00149", !1, !460} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!460 = !{!"0xb\002019\000\00148", !1, !461} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!461 = !{!"0xb\002019\000\00147", !1, !462} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!462 = !{!"0xb\002018\000\00146", !1, !463} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!463 = !{!"0xb\002018\000\00145", !1, !114} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!464 = !{i32 1, !"Debug Info Version", i32 2}
+!448 = !DILocation(line: 1925, scope: !449)
+!449 = distinct !DILexicalBlock(line: 1925, column: 0, file: !1, scope: !114)
+!450 = !DILocation(line: 1939, scope: !451)
+!451 = distinct !DILexicalBlock(line: 1939, column: 0, file: !1, scope: !114)
+!452 = !DILocation(line: 1940, scope: !453)
+!453 = distinct !DILexicalBlock(line: 1940, column: 0, file: !1, scope: !454)
+!454 = distinct !DILexicalBlock(line: 1939, column: 0, file: !1, scope: !451)
+!455 = !DILocation(line: 1941, scope: !456)
+!456 = distinct !DILexicalBlock(line: 1941, column: 0, file: !1, scope: !457)
+!457 = distinct !DILexicalBlock(line: 1940, column: 0, file: !1, scope: !453)
+!458 = !DILocation(line: 2020, scope: !459)
+!459 = distinct !DILexicalBlock(line: 2020, column: 0, file: !1, scope: !460)
+!460 = distinct !DILexicalBlock(line: 2019, column: 0, file: !1, scope: !461)
+!461 = distinct !DILexicalBlock(line: 2019, column: 0, file: !1, scope: !462)
+!462 = distinct !DILexicalBlock(line: 2018, column: 0, file: !1, scope: !463)
+!463 = distinct !DILexicalBlock(line: 2018, column: 0, file: !1, scope: !114)
+!464 = !{i32 1, !"Debug Info Version", i32 3}
diff --git a/test/CodeGen/PowerPC/pr17354.ll b/test/CodeGen/PowerPC/pr17354.ll
index dca81b1c2ca6..ed6fd3480dd4 100644
--- a/test/CodeGen/PowerPC/pr17354.ll
+++ b/test/CodeGen/PowerPC/pr17354.ll
@@ -14,7 +14,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define internal void @__cxx_global_var_init() section ".text.startup" {
entry:
- call void @_Z4funcv(%struct.CS* sret getelementptr inbounds ([1 x %struct.CS]* @_ZL3glb, i64 0, i64 0))
+ call void @_Z4funcv(%struct.CS* sret getelementptr inbounds ([1 x %struct.CS], [1 x %struct.CS]* @_ZL3glb, i64 0, i64 0))
ret void
}
@@ -25,7 +25,7 @@ entry:
; Function Attrs: nounwind
define void @_Z4funcv(%struct.CS* noalias sret %agg.result) #0 {
entry:
- %a_ = getelementptr inbounds %struct.CS* %agg.result, i32 0, i32 0
+ %a_ = getelementptr inbounds %struct.CS, %struct.CS* %agg.result, i32 0, i32 0
store i32 0, i32* %a_, align 4
ret void
}
diff --git a/test/CodeGen/PowerPC/pr18663.ll b/test/CodeGen/PowerPC/pr18663.ll
index 1b85223aa09a..04bc39276f90 100644
--- a/test/CodeGen/PowerPC/pr18663.ll
+++ b/test/CodeGen/PowerPC/pr18663.ll
@@ -61,21 +61,21 @@
define void @_ZNK18TriaObjectAccessorILi3ELi3EE10barycenterEv(%class.Point.1* noalias nocapture sret %agg.result, %class.TriaObjectAccessor.57* %this) #0 align 2 {
entry:
- %0 = load double* null, align 8
- %1 = load double* undef, align 8
+ %0 = load double, double* null, align 8
+ %1 = load double, double* undef, align 8
%call18 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 6)
- %2 = load double* undef, align 8
+ %2 = load double, double* undef, align 8
%call21 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 7)
- %3 = load double* undef, align 8
+ %3 = load double, double* undef, align 8
%call33 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 3)
- %4 = load double* null, align 8
- %5 = load double* undef, align 8
+ %4 = load double, double* null, align 8
+ %5 = load double, double* undef, align 8
%call45 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 7)
- %6 = load double* undef, align 8
+ %6 = load double, double* undef, align 8
%call48 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 0)
- %7 = load double* undef, align 8
+ %7 = load double, double* undef, align 8
%call66 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 6)
- %8 = load double* undef, align 8
+ %8 = load double, double* undef, align 8
%mul334 = fmul double undef, 2.000000e+00
%mul579 = fmul double %2, %5
%mul597 = fmul double undef, %mul579
diff --git a/test/CodeGen/PowerPC/pr20442.ll b/test/CodeGen/PowerPC/pr20442.ll
index ad43a04e70c4..555c3dae1f47 100644
--- a/test/CodeGen/PowerPC/pr20442.ll
+++ b/test/CodeGen/PowerPC/pr20442.ll
@@ -20,15 +20,15 @@ target triple = "powerpc-unknown-linux-gnu"
; Function Attrs: nounwind readonly uwtable
define i32 @fn1() #0 {
entry:
- %0 = load %struct.anon** @b, align 4
+ %0 = load %struct.anon*, %struct.anon** @b, align 4
%1 = ptrtoint %struct.anon* %0 to i32
%cmp = icmp sgt %struct.anon* %0, null
- %2 = load %struct.anon.0** @a, align 4
+ %2 = load %struct.anon.0*, %struct.anon.0** @a, align 4
br i1 %cmp, label %for.bodythread-pre-split, label %if.end8
for.bodythread-pre-split: ; preds = %entry
- %aclass = getelementptr inbounds %struct.anon.0* %2, i32 0, i32 0
- %.pr = load i32* %aclass, align 4
+ %aclass = getelementptr inbounds %struct.anon.0, %struct.anon.0* %2, i32 0, i32 0
+ %.pr = load i32, i32* %aclass, align 4
br label %for.body
for.body: ; preds = %for.bodythread-pre-split, %for.body
@@ -51,10 +51,10 @@ while.cond: ; preds = %while.body
while.body: ; preds = %while.body.lr.ph, %while.cond
%j.110 = phi i32 [ %j.1.ph13, %while.body.lr.ph ], [ %inc7, %while.cond ]
- %aclass_index = getelementptr inbounds %struct.anon* %0, i32 %j.110, i32 0
- %3 = load i32* %aclass_index, align 4
- %aclass5 = getelementptr inbounds %struct.anon.0* %2, i32 %3, i32 0
- %4 = load i32* %aclass5, align 4
+ %aclass_index = getelementptr inbounds %struct.anon, %struct.anon* %0, i32 %j.110, i32 0
+ %3 = load i32, i32* %aclass_index, align 4
+ %aclass5 = getelementptr inbounds %struct.anon.0, %struct.anon.0* %2, i32 %3, i32 0
+ %4 = load i32, i32* %aclass5, align 4
%tobool = icmp eq i32 %4, 0
%inc7 = add nsw i32 %j.110, 1
br i1 %tobool, label %while.cond, label %if.then6
diff --git a/test/CodeGen/PowerPC/pr22711.ll b/test/CodeGen/PowerPC/pr22711.ll
new file mode 100644
index 000000000000..fb1e971d4416
--- /dev/null
+++ b/test/CodeGen/PowerPC/pr22711.ll
@@ -0,0 +1,78 @@
+; Verify that the .toc section is aligned on an 8-byte boundary.
+
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -filetype=obj -o - | llvm-readobj --sections | FileCheck %s
+
+define void @test(i32* %a) {
+entry:
+ %a.addr = alloca i32*, align 8
+ store i32* %a, i32** %a.addr, align 8
+ %0 = load i32*, i32** %a.addr, align 8
+ %incdec.ptr = getelementptr inbounds i32, i32* %0, i32 1
+ store i32* %incdec.ptr, i32** %a.addr, align 8
+ %1 = load i32, i32* %0, align 4
+ switch i32 %1, label %sw.epilog [
+ i32 17, label %sw.bb
+ i32 13, label %sw.bb1
+ i32 11, label %sw.bb2
+ i32 7, label %sw.bb3
+ i32 5, label %sw.bb4
+ i32 3, label %sw.bb5
+ i32 2, label %sw.bb6
+ ]
+
+sw.bb: ; preds = %entry
+ %2 = load i32*, i32** %a.addr, align 8
+ store i32 2, i32* %2, align 4
+ br label %sw.epilog
+
+sw.bb1: ; preds = %entry
+ %3 = load i32*, i32** %a.addr, align 8
+ store i32 3, i32* %3, align 4
+ br label %sw.epilog
+
+sw.bb2: ; preds = %entry
+ %4 = load i32*, i32** %a.addr, align 8
+ store i32 5, i32* %4, align 4
+ br label %sw.epilog
+
+sw.bb3: ; preds = %entry
+ %5 = load i32*, i32** %a.addr, align 8
+ store i32 7, i32* %5, align 4
+ br label %sw.epilog
+
+sw.bb4: ; preds = %entry
+ %6 = load i32*, i32** %a.addr, align 8
+ store i32 11, i32* %6, align 4
+ br label %sw.epilog
+
+sw.bb5: ; preds = %entry
+ %7 = load i32*, i32** %a.addr, align 8
+ store i32 13, i32* %7, align 4
+ br label %sw.epilog
+
+sw.bb6: ; preds = %entry
+ %8 = load i32*, i32** %a.addr, align 8
+ store i32 17, i32* %8, align 4
+ br label %sw.epilog
+
+sw.epilog: ; preds = %entry, %sw.bb6, %sw.bb5, %sw.bb4, %sw.bb3, %sw.bb2, %sw.bb1, %sw.bb
+ ret void
+}
+
+; CHECK: Name: .toc
+; CHECK: AddressAlignment: 8
+; CHECK: Name: .rela.toc
+
+; This test was generated from the following from PR22711:
+
+;void test(int *a) {
+; switch (*a++) {
+; case 17: *a = 2; break;
+; case 13: *a = 3; break;
+; case 11: *a = 5; break;
+; case 7: *a = 7; break;
+; case 5: *a = 11; break;
+; case 3: *a = 13; break;
+; case 2: *a = 17; break;
+; }
+;}
diff --git a/test/CodeGen/PowerPC/preinc-ld-sel-crash.ll b/test/CodeGen/PowerPC/preinc-ld-sel-crash.ll
new file mode 100644
index 000000000000..cb2f035e5662
--- /dev/null
+++ b/test/CodeGen/PowerPC/preinc-ld-sel-crash.ll
@@ -0,0 +1,63 @@
+; RUN: llc < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-bgq-linux"
+
+%t1 = type { %t2*, %t3* }
+%t2 = type <{ %t3*, i32, [4 x i8] }>
+%t3 = type { %t3* }
+
+@_ZN4Foam10SLListBase13endConstIter_E = external global %t1
+
+define void @_ZN4FoamrsIbEERNS_7IstreamES2_RNS_4ListIT_EE() #0 {
+entry:
+ switch i32 undef, label %if.else82 [
+ i32 9, label %if.then
+ i32 6, label %invoke.cont10
+ i32 1, label %invoke.cont61
+ ]
+
+if.then: ; preds = %entry
+ unreachable
+
+invoke.cont10: ; preds = %entry
+ unreachable
+
+invoke.cont61: ; preds = %entry
+ br i1 undef, label %if.end75, label %if.then64
+
+if.then64: ; preds = %invoke.cont61
+ unreachable
+
+if.end75: ; preds = %invoke.cont61
+ br i1 undef, label %if.then17.i, label %if.then.i181
+
+if.then.i181: ; preds = %if.end75
+ unreachable
+
+if.then17.i: ; preds = %if.end75
+ %tobool.i.i.i = icmp eq i32 undef, 0
+ %0 = load i64*, i64** undef, align 8
+ %agg.tmp.sroa.3.0.copyload33.in.i = select i1 %tobool.i.i.i, i64* bitcast (%t3** getelementptr inbounds (%t1, %t1* @_ZN4Foam10SLListBase13endConstIter_E, i64 0, i32 1) to i64*), i64* %0
+ %agg.tmp.sroa.3.0.copyload33.i = load i64, i64* %agg.tmp.sroa.3.0.copyload33.in.i, align 8
+ %1 = inttoptr i64 %agg.tmp.sroa.3.0.copyload33.i to %t3*
+ %2 = load %t3*, %t3** getelementptr inbounds (%t1, %t1* @_ZN4Foam10SLListBase13endConstIter_E, i64 0, i32 1), align 8
+ %cmp.i37.i = icmp eq %t3* %1, %2
+ br i1 %cmp.i37.i, label %invoke.cont79, label %for.body.lr.ph.i
+
+; CHECK-LABEL: @_ZN4FoamrsIbEERNS_7IstreamES2_RNS_4ListIT_EE
+
+for.body.lr.ph.i: ; preds = %if.then17.i
+ br label %for.body.i
+
+for.body.i: ; preds = %for.body.i, %for.body.lr.ph.i
+ br i1 undef, label %invoke.cont79, label %for.body.i
+
+invoke.cont79: ; preds = %for.body.i, %if.then17.i
+ unreachable
+
+if.else82: ; preds = %entry
+ ret void
+}
+
+attributes #0 = { "target-cpu"="a2q" }
+
diff --git a/test/CodeGen/PowerPC/preincprep-invoke.ll b/test/CodeGen/PowerPC/preincprep-invoke.ll
new file mode 100644
index 000000000000..0e09ff1b774a
--- /dev/null
+++ b/test/CodeGen/PowerPC/preincprep-invoke.ll
@@ -0,0 +1,50 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+@.str1 = external unnamed_addr constant [1 x i8], align 1
+@.str2 = external unnamed_addr constant [39 x i8], align 1
+
+declare void @_ZN13CStdOutStreamlsEPKc()
+
+declare void @_ZN13CStdOutStream5FlushEv()
+
+declare i32 @__gxx_personality_v0(...)
+
+define void @_Z11GetPasswordP13CStdOutStreamb() {
+entry:
+ br label %for.cond.i.i
+
+for.cond.i.i: ; preds = %for.cond.i.i, %entry
+ br i1 undef, label %_ZN11CStringBaseIcEC2EPKc.exit.critedge, label %for.cond.i.i
+
+_ZN11CStringBaseIcEC2EPKc.exit.critedge: ; preds = %for.cond.i.i
+ invoke void @_ZN13CStdOutStreamlsEPKc()
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont: ; preds = %_ZN11CStringBaseIcEC2EPKc.exit.critedge
+ invoke void @_ZN13CStdOutStream5FlushEv()
+ to label %invoke.cont4 unwind label %lpad
+
+invoke.cont4: ; preds = %invoke.cont
+ %call7 = invoke i8* @getpass()
+ to label %for.cond.i.i30 unwind label %lpad
+
+; CHECK-LABEL: @_Z11GetPasswordP13CStdOutStreamb
+; CHECK: addi {{[0-9]+}}, 3, -1
+
+for.cond.i.i30: ; preds = %for.cond.i.i30, %invoke.cont4
+ %indvars.iv.i.i26 = phi i64 [ %indvars.iv.next.i.i29, %for.cond.i.i30 ], [ 0, %invoke.cont4 ]
+ %arrayidx.i.i27 = getelementptr inbounds i8, i8* %call7, i64 %indvars.iv.i.i26
+ %0 = load i8, i8* %arrayidx.i.i27, align 1
+ %indvars.iv.next.i.i29 = add nuw nsw i64 %indvars.iv.i.i26, 1
+ br label %for.cond.i.i30
+
+lpad: ; preds = %invoke.cont4, %invoke.cont, %_ZN11CStringBaseIcEC2EPKc.exit.critedge
+ %1 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ resume { i8*, i32 } undef
+}
+
+declare i8* @getpass()
+
diff --git a/test/CodeGen/PowerPC/private.ll b/test/CodeGen/PowerPC/private.ll
index 633fa651037f..4665fd246f33 100644
--- a/test/CodeGen/PowerPC/private.ll
+++ b/test/CodeGen/PowerPC/private.ll
@@ -19,7 +19,7 @@ define i32 @bar() nounwind {
; LINUX: lis{{.*}}.Lbaz
; OSX: lis{{.*}}l_baz
- %1 = load i32* @baz, align 4
+ %1 = load i32, i32* @baz, align 4
ret i32 %1
}
diff --git a/test/CodeGen/PowerPC/pwr7-gt-nop.ll b/test/CodeGen/PowerPC/pwr7-gt-nop.ll
index 8c8545d60df7..70f6dad362bf 100644
--- a/test/CodeGen/PowerPC/pwr7-gt-nop.ll
+++ b/test/CodeGen/PowerPC/pwr7-gt-nop.ll
@@ -8,11 +8,11 @@ define void @foo(float* nocapture %a, float* nocapture %b, float* nocapture read
; CHECK-LABEL: @foo
entry:
- %0 = load float* %b, align 4
+ %0 = load float, float* %b, align 4
store float %0, float* %a, align 4
- %1 = load float* %c, align 4
+ %1 = load float, float* %c, align 4
store float %1, float* %b, align 4
- %2 = load float* %a, align 4
+ %2 = load float, float* %a, align 4
store float %2, float* %d, align 4
ret void
diff --git a/test/CodeGen/PowerPC/qpx-bv-sint.ll b/test/CodeGen/PowerPC/qpx-bv-sint.ll
new file mode 100644
index 000000000000..0bc14ed4351a
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-bv-sint.ll
@@ -0,0 +1,33 @@
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-bgq-linux"
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+
+define void @s452() nounwind {
+entry:
+ br label %for.body4
+
+for.body4: ; preds = %for.body4, %entry
+ %conv.4 = sitofp i32 undef to double
+ %conv.5 = sitofp i32 undef to double
+ %mul.4.v.i0.1 = insertelement <2 x double> undef, double %conv.4, i32 0
+ %mul.4.v.i0.2 = insertelement <2 x double> %mul.4.v.i0.1, double %conv.5, i32 1
+ %mul.4 = fmul <2 x double> %mul.4.v.i0.2, undef
+ %add7.4 = fadd <2 x double> undef, %mul.4
+ store <2 x double> %add7.4, <2 x double>* undef, align 16
+ br i1 undef, label %for.end, label %for.body4
+
+for.end: ; preds = %for.body4
+ unreachable
+; CHECK-LABEL: @s452
+; CHECK: lfiwax [[REG1:[0-9]+]],
+; CHECK: fcfid [[REG2:[0-9]+]], [[REG1]]
+; FIXME: We could 'promote' this to a vector earlier and remove this splat.
+; CHECK: qvesplati {{[0-9]+}}, [[REG2]], 0
+; CHECK: qvfmul
+; CHECK: qvfadd
+; CHECK: qvesplati {{[0-9]+}},
+; FIXME: We can use qvstfcdx here instead of two stores.
+; CHECK: stfd
+; CHECK: stfd
+}
+
diff --git a/test/CodeGen/PowerPC/qpx-bv.ll b/test/CodeGen/PowerPC/qpx-bv.ll
new file mode 100644
index 000000000000..ae181de383b5
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-bv.ll
@@ -0,0 +1,37 @@
+; RUN: llc < %s -mcpu=a2q | FileCheck %s
+
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-bgq-linux"
+
+define <4 x double> @foo(double %f1, double %f2, double %f3, double %f4) {
+ %v1 = insertelement <4 x double> undef, double %f1, i32 0
+ %v2 = insertelement <4 x double> %v1, double %f2, i32 1
+ %v3 = insertelement <4 x double> %v2, double %f3, i32 2
+ %v4 = insertelement <4 x double> %v3, double %f4, i32 3
+ ret <4 x double> %v4
+
+; CHECK-LABEL: @foo
+; CHECK: qvgpci [[REG1:[0-9]+]], 275
+; CHECK-DAG: qvgpci [[REG2:[0-9]+]], 101
+; CHECK-DAG: qvfperm [[REG3:[0-9]+]], 3, 4, [[REG1]]
+; CHECK-DAG: qvfperm [[REG4:[0-9]+]], 1, 2, [[REG1]]
+; CHECK-DAG: qvfperm 1, [[REG4]], [[REG3]], [[REG2]]
+; CHECK: blr
+}
+
+define <4 x float> @goo(float %f1, float %f2, float %f3, float %f4) {
+ %v1 = insertelement <4 x float> undef, float %f1, i32 0
+ %v2 = insertelement <4 x float> %v1, float %f2, i32 1
+ %v3 = insertelement <4 x float> %v2, float %f3, i32 2
+ %v4 = insertelement <4 x float> %v3, float %f4, i32 3
+ ret <4 x float> %v4
+
+; CHECK-LABEL: @goo
+; CHECK: qvgpci [[REG1:[0-9]+]], 275
+; CHECK-DAG: qvgpci [[REG2:[0-9]+]], 101
+; CHECK-DAG: qvfperm [[REG3:[0-9]+]], 3, 4, [[REG1]]
+; CHECK-DAG: qvfperm [[REG4:[0-9]+]], 1, 2, [[REG1]]
+; CHECK-DAG: qvfperm 1, [[REG4]], [[REG3]], [[REG2]]
+; CHECK: blr
+}
+
diff --git a/test/CodeGen/PowerPC/qpx-func-clobber.ll b/test/CodeGen/PowerPC/qpx-func-clobber.ll
new file mode 100644
index 000000000000..511fa3827b0c
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-func-clobber.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+target triple = "powerpc64-bgq-linux"
+
+declare <4 x double> @foo(<4 x double> %p)
+
+define <4 x double> @bar(<4 x double> %p, <4 x double> %q) {
+entry:
+ %v = call <4 x double> @foo(<4 x double> %p)
+ %w = call <4 x double> @foo(<4 x double> %q)
+ %x = fadd <4 x double> %v, %w
+ ret <4 x double> %x
+
+; CHECK-LABEL: @bar
+; CHECK: qvstfdx 2,
+; CHECK: bl foo
+; CHECK: qvstfdx 1,
+; CHECK: qvlfdx 1,
+; CHECK: bl foo
+; CHECK: qvlfdx [[REG:[0-9]+]],
+; CHECK: qvfadd 1, [[REG]], 1
+}
+
diff --git a/test/CodeGen/PowerPC/qpx-load.ll b/test/CodeGen/PowerPC/qpx-load.ll
new file mode 100644
index 000000000000..7637c43850cc
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-load.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+target triple = "powerpc64-bgq-linux"
+
+define <4 x double> @foo(<4 x double>* %p) {
+entry:
+ %v = load <4 x double>, <4 x double>* %p, align 8
+ ret <4 x double> %v
+}
+
+; CHECK: @foo
+; CHECK-DAG: li [[REG1:[0-9]+]], 31
+; CHECK-DAG: qvlfdx [[REG4:[0-9]+]], 0, 3
+; CHECK-DAG: qvlfdx [[REG2:[0-9]+]], 3, [[REG1]]
+; CHECK-DAG: qvlpcldx [[REG3:[0-9]+]], 0, 3
+; CHECK-DAG: qvfperm 1, [[REG4]], [[REG2]], [[REG3]]
+; CHECK: blr
+
+define <4 x double> @bar(<4 x double>* %p) {
+entry:
+ %v = load <4 x double>, <4 x double>* %p, align 32
+ ret <4 x double> %v
+}
+
+; CHECK: @bar
+; CHECK: qvlfdx
+
diff --git a/test/CodeGen/PowerPC/qpx-recipest.ll b/test/CodeGen/PowerPC/qpx-recipest.ll
new file mode 100644
index 000000000000..0e01358e5791
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-recipest.ll
@@ -0,0 +1,194 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2q -enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2q | FileCheck -check-prefix=CHECK-SAFE %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+declare <4 x double> @llvm.sqrt.v4f64(<4 x double>)
+declare <4 x float> @llvm.sqrt.v4f32(<4 x float>)
+
+define <4 x double> @foo(<4 x double> %a, <4 x double> %b) nounwind {
+entry:
+ %x = call <4 x double> @llvm.sqrt.v4f64(<4 x double> %b)
+ %r = fdiv <4 x double> %a, %x
+ ret <4 x double> %r
+
+; CHECK-LABEL: @foo
+; CHECK: qvfrsqrte
+; CHECK: qvfmul
+; FIXME: We're currently loading two constants here (1.5 and -1.5), and using
+; an qvfmadd instead of a qvfnmsub
+; CHECK: qvfmadd
+; CHECK: qvfmadd
+; CHECK: qvfmul
+; CHECK: qvfmul
+; CHECK: qvfmadd
+; CHECK: qvfmul
+; CHECK: qvfmul
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @foo
+; CHECK-SAFE: fsqrt
+; CHECK-SAFE: fdiv
+; CHECK-SAFE: blr
+}
+
+define <4 x double> @foof(<4 x double> %a, <4 x float> %b) nounwind {
+entry:
+ %x = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %b)
+ %y = fpext <4 x float> %x to <4 x double>
+ %r = fdiv <4 x double> %a, %y
+ ret <4 x double> %r
+
+; CHECK-LABEL: @foof
+; CHECK: qvfrsqrtes
+; CHECK: qvfmuls
+; FIXME: We're currently loading two constants here (1.5 and -1.5), and using
+; an qvfmadd instead of a qvfnmsubs
+; CHECK: qvfmadds
+; CHECK: qvfmadds
+; CHECK: qvfmuls
+; CHECK: qvfmul
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @foof
+; CHECK-SAFE: fsqrts
+; CHECK-SAFE: fdiv
+; CHECK-SAFE: blr
+}
+
+define <4 x float> @food(<4 x float> %a, <4 x double> %b) nounwind {
+entry:
+ %x = call <4 x double> @llvm.sqrt.v4f64(<4 x double> %b)
+ %y = fptrunc <4 x double> %x to <4 x float>
+ %r = fdiv <4 x float> %a, %y
+ ret <4 x float> %r
+
+; CHECK-LABEL: @food
+; CHECK: qvfrsqrte
+; CHECK: qvfmul
+; FIXME: We're currently loading two constants here (1.5 and -1.5), and using
+; an qvfmadd instead of a qvfnmsub
+; CHECK: qvfmadd
+; CHECK: qvfmadd
+; CHECK: qvfmul
+; CHECK: qvfmul
+; CHECK: qvfmadd
+; CHECK: qvfmul
+; CHECK: qvfrsp
+; CHECK: qvfmuls
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @food
+; CHECK-SAFE: fsqrt
+; CHECK-SAFE: fdivs
+; CHECK-SAFE: blr
+}
+
+define <4 x float> @goo(<4 x float> %a, <4 x float> %b) nounwind {
+entry:
+ %x = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %b)
+ %r = fdiv <4 x float> %a, %x
+ ret <4 x float> %r
+
+; CHECK-LABEL: @goo
+; CHECK: qvfrsqrtes
+; CHECK: qvfmuls
+; FIXME: We're currently loading two constants here (1.5 and -1.5), and using
+; an qvfmadd instead of a qvfnmsubs
+; CHECK: qvfmadds
+; CHECK: qvfmadds
+; CHECK: qvfmuls
+; CHECK: qvfmuls
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @goo
+; CHECK-SAFE: fsqrts
+; CHECK-SAFE: fdivs
+; CHECK-SAFE: blr
+}
+
+define <4 x double> @foo2(<4 x double> %a, <4 x double> %b) nounwind {
+entry:
+ %r = fdiv <4 x double> %a, %b
+ ret <4 x double> %r
+
+; CHECK-LABEL: @foo2
+; CHECK: qvfre
+; CHECK: qvfnmsub
+; CHECK: qvfmadd
+; CHECK: qvfnmsub
+; CHECK: qvfmadd
+; CHECK: qvfmul
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @foo2
+; CHECK-SAFE: fdiv
+; CHECK-SAFE: blr
+}
+
+define <4 x float> @goo2(<4 x float> %a, <4 x float> %b) nounwind {
+entry:
+ %r = fdiv <4 x float> %a, %b
+ ret <4 x float> %r
+
+; CHECK-LABEL: @goo2
+; CHECK: qvfres
+; CHECK: qvfnmsubs
+; CHECK: qvfmadds
+; CHECK: qvfmuls
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @goo2
+; CHECK-SAFE: fdivs
+; CHECK-SAFE: blr
+}
+
+define <4 x double> @foo3(<4 x double> %a) nounwind {
+entry:
+ %r = call <4 x double> @llvm.sqrt.v4f64(<4 x double> %a)
+ ret <4 x double> %r
+
+; CHECK-LABEL: @foo3
+; CHECK: qvfrsqrte
+; CHECK: qvfmul
+; FIXME: We're currently loading two constants here (1.5 and -1.5), and using
+; an qvfmadd instead of a qvfnmsub
+; CHECK-DAG: qvfmadd
+; CHECK-DAG: qvfcmpeq
+; CHECK-DAG: qvfmadd
+; CHECK-DAG: qvfmul
+; CHECK-DAG: qvfmul
+; CHECK-DAG: qvfmadd
+; CHECK-DAG: qvfmul
+; CHECK-DAG: qvfmul
+; CHECK: qvfsel
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @foo3
+; CHECK-SAFE: fsqrt
+; CHECK-SAFE: blr
+}
+
+define <4 x float> @goo3(<4 x float> %a) nounwind {
+entry:
+ %r = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %a)
+ ret <4 x float> %r
+
+; CHECK-LABEL: @goo3
+; CHECK: qvfrsqrtes
+; CHECK: qvfmuls
+; FIXME: We're currently loading two constants here (1.5 and -1.5), and using
+; an qvfmadds instead of a qvfnmsubs
+; CHECK-DAG: qvfmadds
+; CHECK-DAG: qvfcmpeq
+; CHECK-DAG: qvfmadds
+; CHECK-DAG: qvfmuls
+; CHECK-DAG: qvfmuls
+; CHECK: qvfsel
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @goo3
+; CHECK-SAFE: fsqrts
+; CHECK-SAFE: blr
+}
+
diff --git a/test/CodeGen/PowerPC/qpx-rounding-ops.ll b/test/CodeGen/PowerPC/qpx-rounding-ops.ll
new file mode 100644
index 000000000000..6fdd8e6a7147
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-rounding-ops.ll
@@ -0,0 +1,109 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2q | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2q -enable-unsafe-fp-math | FileCheck -check-prefix=CHECK-FM %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define <4 x float> @test1(<4 x float> %x) nounwind {
+ %call = tail call <4 x float> @llvm.floor.v4f32(<4 x float> %x) nounwind readnone
+ ret <4 x float> %call
+
+; CHECK: test1:
+; CHECK: qvfrim 1, 1
+
+; CHECK-FM: test1:
+; CHECK-FM: qvfrim 1, 1
+}
+
+declare <4 x float> @llvm.floor.v4f32(<4 x float>) nounwind readnone
+
+define <4 x double> @test2(<4 x double> %x) nounwind {
+ %call = tail call <4 x double> @llvm.floor.v4f64(<4 x double> %x) nounwind readnone
+ ret <4 x double> %call
+
+; CHECK: test2:
+; CHECK: qvfrim 1, 1
+
+; CHECK-FM: test2:
+; CHECK-FM: qvfrim 1, 1
+}
+
+declare <4 x double> @llvm.floor.v4f64(<4 x double>) nounwind readnone
+
+define <4 x float> @test3(<4 x float> %x) nounwind {
+ %call = tail call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %x) nounwind readnone
+ ret <4 x float> %call
+
+; CHECK: test3:
+; CHECK-NOT: qvfrin
+
+; CHECK-FM: test3:
+; CHECK-FM-NOT: qvfrin
+}
+
+declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) nounwind readnone
+
+define <4 x double> @test4(<4 x double> %x) nounwind {
+ %call = tail call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %x) nounwind readnone
+ ret <4 x double> %call
+
+; CHECK: test4:
+; CHECK-NOT: qvfrin
+
+; CHECK-FM: test4:
+; CHECK-FM-NOT: qvfrin
+}
+
+declare <4 x double> @llvm.nearbyint.v4f64(<4 x double>) nounwind readnone
+
+define <4 x float> @test5(<4 x float> %x) nounwind {
+ %call = tail call <4 x float> @llvm.ceil.v4f32(<4 x float> %x) nounwind readnone
+ ret <4 x float> %call
+
+; CHECK: test5:
+; CHECK: qvfrip 1, 1
+
+; CHECK-FM: test5:
+; CHECK-FM: qvfrip 1, 1
+}
+
+declare <4 x float> @llvm.ceil.v4f32(<4 x float>) nounwind readnone
+
+define <4 x double> @test6(<4 x double> %x) nounwind {
+ %call = tail call <4 x double> @llvm.ceil.v4f64(<4 x double> %x) nounwind readnone
+ ret <4 x double> %call
+
+; CHECK: test6:
+; CHECK: qvfrip 1, 1
+
+; CHECK-FM: test6:
+; CHECK-FM: qvfrip 1, 1
+}
+
+declare <4 x double> @llvm.ceil.v4f64(<4 x double>) nounwind readnone
+
+define <4 x float> @test9(<4 x float> %x) nounwind {
+ %call = tail call <4 x float> @llvm.trunc.v4f32(<4 x float> %x) nounwind readnone
+ ret <4 x float> %call
+
+; CHECK: test9:
+; CHECK: qvfriz 1, 1
+
+; CHECK-FM: test9:
+; CHECK-FM: qvfriz 1, 1
+}
+
+declare <4 x float> @llvm.trunc.v4f32(<4 x float>) nounwind readnone
+
+define <4 x double> @test10(<4 x double> %x) nounwind {
+ %call = tail call <4 x double> @llvm.trunc.v4f64(<4 x double> %x) nounwind readnone
+ ret <4 x double> %call
+
+; CHECK: test10:
+; CHECK: qvfriz 1, 1
+
+; CHECK-FM: test10:
+; CHECK-FM: qvfriz 1, 1
+}
+
+declare <4 x double> @llvm.trunc.v4f64(<4 x double>) nounwind readnone
+
diff --git a/test/CodeGen/PowerPC/qpx-s-load.ll b/test/CodeGen/PowerPC/qpx-s-load.ll
new file mode 100644
index 000000000000..db147126c1ec
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-s-load.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+target triple = "powerpc64-bgq-linux"
+
+define <4 x float> @foo(<4 x float>* %p) {
+entry:
+ %v = load <4 x float>, <4 x float>* %p, align 4
+ ret <4 x float> %v
+}
+
+; CHECK: @foo
+; CHECK-DAG: li [[REG1:[0-9]+]], 15
+; CHECK-DAG: qvlfsx [[REG4:[0-9]+]], 0, 3
+; CHECK-DAG: qvlfsx [[REG2:[0-9]+]], 3, [[REG1]]
+; CHECK-DAG: qvlpclsx [[REG3:[0-9]+]], 0, 3
+; CHECK-DAG: qvfperm 1, [[REG4]], [[REG2]], [[REG3]]
+; CHECK: blr
+
+define <4 x float> @bar(<4 x float>* %p) {
+entry:
+ %v = load <4 x float>, <4 x float>* %p, align 16
+ ret <4 x float> %v
+}
+
+; CHECK: @bar
+; CHECK: qvlfsx
+
diff --git a/test/CodeGen/PowerPC/qpx-s-sel.ll b/test/CodeGen/PowerPC/qpx-s-sel.ll
new file mode 100644
index 000000000000..09a615c4597d
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-s-sel.ll
@@ -0,0 +1,144 @@
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+target triple = "powerpc64-bgq-linux"
+
+@Q = constant <4 x i1> <i1 0, i1 undef, i1 1, i1 1>, align 16
+@R = global <4 x i1> <i1 0, i1 0, i1 0, i1 0>, align 16
+
+define <4 x float> @test1(<4 x float> %a, <4 x float> %b, <4 x i1> %c) nounwind readnone {
+entry:
+ %r = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %r
+
+; CHECK-LABEL: @test1
+; CHECK: qvfsel 1, 3, 1, 2
+; CHECK: blr
+}
+
+define <4 x float> @test2(<4 x float> %a, <4 x float> %b, i1 %c1, i1 %c2, i1 %c3, i1 %c4) nounwind readnone {
+entry:
+ %v = insertelement <4 x i1> undef, i1 %c1, i32 0
+ %v2 = insertelement <4 x i1> %v, i1 %c2, i32 1
+ %v3 = insertelement <4 x i1> %v2, i1 %c3, i32 2
+ %v4 = insertelement <4 x i1> %v3, i1 %c4, i32 3
+ %r = select <4 x i1> %v4, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %r
+
+; CHECK-LABEL: @test2
+; CHECK: stw
+; CHECK-DAG: qvlfiwzx [[REG1:[0-9]+]],
+; CHECK-DAG: qvlfdx [[REG2:[0-9]+]],
+; CHECK-DAG: qvfcfidu [[REG3:[0-9]+]], [[REG1]]
+; CHECK: qvfcmpeq [[REG4:[0-9]+]], [[REG3]], [[REG2]]
+; CHECK: qvfsel 1, [[REG4]], 1, 2
+; CHECK: blr
+}
+
+define <4 x i1> @test3(<4 x i1> %a) nounwind readnone {
+entry:
+ %v = and <4 x i1> %a, <i1 0, i1 undef, i1 1, i1 1>
+ ret <4 x i1> %v
+
+; CHECK-LABEL: @test3
+; CHECK: qvlfsx [[REG:[0-9]+]],
+; qvflogical 1, 1, [[REG]], 1
+; blr
+}
+
+define <4 x i1> @test4(<4 x i1> %a) nounwind {
+entry:
+ %q = load <4 x i1>, <4 x i1>* @Q, align 16
+ %v = and <4 x i1> %a, %q
+ ret <4 x i1> %v
+
+; CHECK-LABEL: @test4
+; CHECK-DAG: lbz
+; CHECK-DAG: qvlfdx [[REG1:[0-9]+]],
+; CHECK-DAG: stw
+; CHECK-DAG: qvlfiwzx [[REG2:[0-9]+]],
+; CHECK-DAG: qvfcfidu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvfcmpeq [[REG4:[0-9]+]], [[REG3]], [[REG1]]
+; CHECK: qvflogical 1, 1, [[REG4]], 1
+; CHECK: blr
+}
+
+define void @test5(<4 x i1> %a) nounwind {
+entry:
+ store <4 x i1> %a, <4 x i1>* @R
+ ret void
+
+; CHECK-LABEL: @test5
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK: lwz
+; CHECK: stb
+; CHECK: blr
+}
+
+define i1 @test6(<4 x i1> %a) nounwind {
+entry:
+ %r = extractelement <4 x i1> %a, i32 2
+ ret i1 %r
+
+; CHECK-LABEL: @test6
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK: lwz
+; CHECK: blr
+}
+
+define i1 @test7(<4 x i1> %a) nounwind {
+entry:
+ %r = extractelement <4 x i1> %a, i32 2
+ %s = extractelement <4 x i1> %a, i32 3
+ %q = and i1 %r, %s
+ ret i1 %q
+
+; CHECK-LABEL: @test7
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK-DAG: lwz [[REG4:[0-9]+]],
+; FIXME: We're storing the vector twice, and that's silly.
+; CHECK-DAG: qvstfiwx [[REG3]],
+; CHECK: lwz [[REG5:[0-9]+]],
+; CHECK: and 3,
+; CHECK: blr
+}
+
+define i1 @test8(<3 x i1> %a) nounwind {
+entry:
+ %r = extractelement <3 x i1> %a, i32 2
+ ret i1 %r
+
+; CHECK-LABEL: @test8
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK: lwz
+; CHECK: blr
+}
+
+define <3 x float> @test9(<3 x float> %a, <3 x float> %b, i1 %c1, i1 %c2, i1 %c3) nounwind readnone {
+entry:
+ %v = insertelement <3 x i1> undef, i1 %c1, i32 0
+ %v2 = insertelement <3 x i1> %v, i1 %c2, i32 1
+ %v3 = insertelement <3 x i1> %v2, i1 %c3, i32 2
+ %r = select <3 x i1> %v3, <3 x float> %a, <3 x float> %b
+ ret <3 x float> %r
+
+; CHECK-LABEL: @test9
+; CHECK: stw
+; CHECK-DAG: qvlfiwzx [[REG1:[0-9]+]],
+; CHECK-DAG: qvlfdx [[REG2:[0-9]+]],
+; CHECK-DAG: qvfcfidu [[REG3:[0-9]+]], [[REG1]]
+; CHECK: qvfcmpeq [[REG4:[0-9]+]], [[REG3]], [[REG2]]
+; CHECK: qvfsel 1, [[REG4]], 1, 2
+; CHECK: blr
+}
+
diff --git a/test/CodeGen/PowerPC/qpx-s-store.ll b/test/CodeGen/PowerPC/qpx-s-store.ll
new file mode 100644
index 000000000000..0bd6201f767c
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-s-store.ll
@@ -0,0 +1,25 @@
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+target triple = "powerpc64-bgq-linux"
+
+define void @foo(<4 x float> %v, <4 x float>* %p) {
+entry:
+ store <4 x float> %v, <4 x float>* %p, align 4
+ ret void
+}
+
+; CHECK: @foo
+; CHECK: stfs
+; CHECK: stfs
+; CHECK: stfs
+; CHECK: stfs
+; CHECK: blr
+
+define void @bar(<4 x float> %v, <4 x float>* %p) {
+entry:
+ store <4 x float> %v, <4 x float>* %p, align 16
+ ret void
+}
+
+; CHECK: @bar
+; CHECK: qvstfsx
+
diff --git a/test/CodeGen/PowerPC/qpx-sel.ll b/test/CodeGen/PowerPC/qpx-sel.ll
new file mode 100644
index 000000000000..a375e6effbae
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-sel.ll
@@ -0,0 +1,152 @@
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+target triple = "powerpc64-bgq-linux"
+
+@Q = constant <4 x i1> <i1 0, i1 undef, i1 1, i1 1>, align 16
+@R = global <4 x i1> <i1 0, i1 0, i1 0, i1 0>, align 16
+
+define <4 x double> @test1(<4 x double> %a, <4 x double> %b, <4 x i1> %c) nounwind readnone {
+entry:
+ %r = select <4 x i1> %c, <4 x double> %a, <4 x double> %b
+ ret <4 x double> %r
+
+; CHECK-LABEL: @test1
+; CHECK: qvfsel 1, 3, 1, 2
+; CHECK: blr
+}
+
+define <4 x double> @test2(<4 x double> %a, <4 x double> %b, i1 %c1, i1 %c2, i1 %c3, i1 %c4) nounwind readnone {
+entry:
+ %v = insertelement <4 x i1> undef, i1 %c1, i32 0
+ %v2 = insertelement <4 x i1> %v, i1 %c2, i32 1
+ %v3 = insertelement <4 x i1> %v2, i1 %c3, i32 2
+ %v4 = insertelement <4 x i1> %v3, i1 %c4, i32 3
+ %r = select <4 x i1> %v4, <4 x double> %a, <4 x double> %b
+ ret <4 x double> %r
+
+; CHECK-LABEL: @test2
+
+; FIXME: This load/store sequence is unnecessary.
+; CHECK-DAG: lbz
+; CHECK-DAG: stw
+
+; CHECK-DAG: qvlfiwzx [[REG1:[0-9]+]],
+; CHECK-DAG: qvlfdx [[REG2:[0-9]+]],
+; CHECK-DAG: qvfcfidu [[REG3:[0-9]+]], [[REG1]]
+; CHECK: qvfcmpeq [[REG4:[0-9]+]], [[REG3]], [[REG2]]
+; CHECK: qvfsel 1, [[REG4]], 1, 2
+; CHECK: blr
+}
+
+define <4 x i1> @test3(<4 x i1> %a) nounwind readnone {
+entry:
+ %v = and <4 x i1> %a, <i1 0, i1 undef, i1 1, i1 1>
+ ret <4 x i1> %v
+
+; CHECK-LABEL: @test3
+; CHECK: qvlfsx [[REG:[0-9]+]],
+; qvflogical 1, 1, [[REG]], 1
+; blr
+}
+
+define <4 x i1> @test4(<4 x i1> %a) nounwind {
+entry:
+ %q = load <4 x i1>, <4 x i1>* @Q, align 16
+ %v = and <4 x i1> %a, %q
+ ret <4 x i1> %v
+
+; CHECK-LABEL: @test4
+; CHECK-DAG: lbz
+; CHECK-DAG: qvlfdx [[REG1:[0-9]+]],
+; CHECK-DAG: stw
+; CHECK-DAG: qvlfiwzx [[REG2:[0-9]+]],
+; CHECK-DAG: qvfcfidu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvfcmpeq [[REG4:[0-9]+]], [[REG3]], [[REG1]]
+; CHECK: qvflogical 1, 1, [[REG4]], 1
+; CHECK: blr
+}
+
+define void @test5(<4 x i1> %a) nounwind {
+entry:
+ store <4 x i1> %a, <4 x i1>* @R
+ ret void
+
+; CHECK-LABEL: @test5
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK: lwz
+; CHECK: stb
+; CHECK: blr
+}
+
+define i1 @test6(<4 x i1> %a) nounwind {
+entry:
+ %r = extractelement <4 x i1> %a, i32 2
+ ret i1 %r
+
+; CHECK-LABEL: @test6
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK: lwz
+; CHECK: blr
+}
+
+define i1 @test7(<4 x i1> %a) nounwind {
+entry:
+ %r = extractelement <4 x i1> %a, i32 2
+ %s = extractelement <4 x i1> %a, i32 3
+ %q = and i1 %r, %s
+ ret i1 %q
+
+; CHECK-LABEL: @test7
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK-DAG: lwz [[REG4:[0-9]+]],
+; FIXME: We're storing the vector twice, and that's silly.
+; CHECK-DAG: qvstfiwx [[REG3]],
+; CHECK-DAG: lwz [[REG5:[0-9]+]],
+; CHECK: and 3,
+; CHECK: blr
+}
+
+define i1 @test8(<3 x i1> %a) nounwind {
+entry:
+ %r = extractelement <3 x i1> %a, i32 2
+ ret i1 %r
+
+; CHECK-LABEL: @test8
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK: lwz
+; CHECK: blr
+}
+
+define <3 x double> @test9(<3 x double> %a, <3 x double> %b, i1 %c1, i1 %c2, i1 %c3) nounwind readnone {
+entry:
+ %v = insertelement <3 x i1> undef, i1 %c1, i32 0
+ %v2 = insertelement <3 x i1> %v, i1 %c2, i32 1
+ %v3 = insertelement <3 x i1> %v2, i1 %c3, i32 2
+ %r = select <3 x i1> %v3, <3 x double> %a, <3 x double> %b
+ ret <3 x double> %r
+
+; CHECK-LABEL: @test9
+
+; FIXME: This load/store sequence is unnecessary.
+; CHECK-DAG: lbz
+; CHECK-DAG: stw
+
+; CHECK-DAG: qvlfiwzx [[REG1:[0-9]+]],
+; CHECK-DAG: qvlfdx [[REG2:[0-9]+]],
+; CHECK-DAG: qvfcfidu [[REG3:[0-9]+]], [[REG1]]
+; CHECK: qvfcmpeq [[REG4:[0-9]+]], [[REG3]], [[REG2]]
+; CHECK: qvfsel 1, [[REG4]], 1, 2
+; CHECK: blr
+}
+
diff --git a/test/CodeGen/PowerPC/qpx-split-vsetcc.ll b/test/CodeGen/PowerPC/qpx-split-vsetcc.ll
new file mode 100644
index 000000000000..c8cef0faeaa4
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-split-vsetcc.ll
@@ -0,0 +1,40 @@
+; RUN: llc -mcpu=a2q < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-bgq-linux"
+
+; Function Attrs: nounwind
+define void @gsl_sf_legendre_Pl_deriv_array() #0 {
+entry:
+ br i1 undef, label %do.body.i, label %if.else.i
+
+do.body.i: ; preds = %entry
+ unreachable
+
+if.else.i: ; preds = %entry
+ br i1 undef, label %return, label %for.body46.lr.ph
+
+for.body46.lr.ph: ; preds = %if.else.i
+ br label %vector.body198
+
+vector.body198: ; preds = %vector.body198, %for.body46.lr.ph
+ %0 = icmp ne <4 x i32> undef, zeroinitializer
+ %1 = select <4 x i1> %0, <4 x double> <double 5.000000e-01, double 5.000000e-01, double 5.000000e-01, double 5.000000e-01>, <4 x double> <double -5.000000e-01, double -5.000000e-01, double -5.000000e-01, double -5.000000e-01>
+ %2 = fmul <4 x double> undef, %1
+ %3 = fmul <4 x double> undef, %2
+ %4 = fmul <4 x double> %3, undef
+ store <4 x double> %4, <4 x double>* undef, align 8
+ br label %vector.body198
+
+; CHECK-LABEL: @gsl_sf_legendre_Pl_deriv_array
+; CHECK: qvlfiwzx
+; CHECK: qvfcfidu
+; CHECK: qvfcmpeq
+; CHECK: qvfsel
+; CHECK: qvfmul
+
+return: ; preds = %if.else.i
+ ret void
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/qpx-store.ll b/test/CodeGen/PowerPC/qpx-store.ll
new file mode 100644
index 000000000000..2579d2c681c9
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-store.ll
@@ -0,0 +1,25 @@
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+target triple = "powerpc64-bgq-linux"
+
+define void @foo(<4 x double> %v, <4 x double>* %p) {
+entry:
+ store <4 x double> %v, <4 x double>* %p, align 8
+ ret void
+}
+
+; CHECK: @foo
+; CHECK: stfd
+; CHECK: stfd
+; CHECK: stfd
+; CHECK: stfd
+; CHECK: blr
+
+define void @bar(<4 x double> %v, <4 x double>* %p) {
+entry:
+ store <4 x double> %v, <4 x double>* %p, align 32
+ ret void
+}
+
+; CHECK: @bar
+; CHECK: qvstfdx
+
diff --git a/test/CodeGen/PowerPC/qpx-unalperm.ll b/test/CodeGen/PowerPC/qpx-unalperm.ll
new file mode 100644
index 000000000000..51b340c5835c
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-unalperm.ll
@@ -0,0 +1,64 @@
+; RUN: llc < %s -mcpu=a2q | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-bgq-linux"
+
+define <4 x double> @foo(<4 x double>* %a) {
+entry:
+ %r = load <4 x double>, <4 x double>* %a, align 32
+ ret <4 x double> %r
+; CHECK: qvlfdx
+; CHECK: blr
+}
+
+define <4 x double> @bar(<4 x double>* %a) {
+entry:
+ %r = load <4 x double>, <4 x double>* %a, align 8
+ %b = getelementptr <4 x double>, <4 x double>* %a, i32 16
+ %s = load <4 x double>, <4 x double>* %b, align 32
+ %t = fadd <4 x double> %r, %s
+ ret <4 x double> %t
+; CHECK: qvlpcldx
+; CHECK: qvlfdx
+; CHECK: qvfperm
+; CHECK: blr
+}
+
+define <4 x double> @bar1(<4 x double>* %a) {
+entry:
+ %r = load <4 x double>, <4 x double>* %a, align 8
+ %b = getelementptr <4 x double>, <4 x double>* %a, i32 16
+ %s = load <4 x double>, <4 x double>* %b, align 8
+ %t = fadd <4 x double> %r, %s
+ ret <4 x double> %t
+}
+
+define <4 x double> @bar2(<4 x double>* %a) {
+entry:
+ %r = load <4 x double>, <4 x double>* %a, align 8
+ %b = getelementptr <4 x double>, <4 x double>* %a, i32 1
+ %s = load <4 x double>, <4 x double>* %b, align 32
+ %t = fadd <4 x double> %r, %s
+ ret <4 x double> %t
+}
+
+define <4 x double> @bar3(<4 x double>* %a) {
+entry:
+ %r = load <4 x double>, <4 x double>* %a, align 8
+ %b = getelementptr <4 x double>, <4 x double>* %a, i32 1
+ %s = load <4 x double>, <4 x double>* %b, align 8
+ %t = fadd <4 x double> %r, %s
+ ret <4 x double> %t
+}
+
+define <4 x double> @bar4(<4 x double>* %a) {
+entry:
+ %r = load <4 x double>, <4 x double>* %a, align 8
+ %b = getelementptr <4 x double>, <4 x double>* %a, i32 1
+ %s = load <4 x double>, <4 x double>* %b, align 8
+ %c = getelementptr <4 x double>, <4 x double>* %b, i32 1
+ %t = load <4 x double>, <4 x double>* %c, align 8
+ %u = fadd <4 x double> %r, %s
+ %v = fadd <4 x double> %u, %t
+ ret <4 x double> %v
+}
+
diff --git a/test/CodeGen/PowerPC/quadint-return.ll b/test/CodeGen/PowerPC/quadint-return.ll
index 03499915e78e..0743ce4a95c1 100644
--- a/test/CodeGen/PowerPC/quadint-return.ll
+++ b/test/CodeGen/PowerPC/quadint-return.ll
@@ -8,7 +8,7 @@ define i128 @foo() nounwind {
entry:
%x = alloca i128, align 16
store i128 27, i128* %x, align 16
- %0 = load i128* %x, align 16
+ %0 = load i128, i128* %x, align 16
ret i128 %0
}
diff --git a/test/CodeGen/PowerPC/reg-coalesce-simple.ll b/test/CodeGen/PowerPC/reg-coalesce-simple.ll
index e0ddb4250fd2..3f9cb8a74270 100644
--- a/test/CodeGen/PowerPC/reg-coalesce-simple.ll
+++ b/test/CodeGen/PowerPC/reg-coalesce-simple.ll
@@ -3,8 +3,8 @@
%struct.foo = type { i32, i32, [0 x i8] }
define i32 @test(%struct.foo* %X) nounwind {
- %tmp1 = getelementptr %struct.foo* %X, i32 0, i32 2, i32 100 ; <i8*> [#uses=1]
- %tmp = load i8* %tmp1 ; <i8> [#uses=1]
+ %tmp1 = getelementptr %struct.foo, %struct.foo* %X, i32 0, i32 2, i32 100 ; <i8*> [#uses=1]
+ %tmp = load i8, i8* %tmp1 ; <i8> [#uses=1]
%tmp2 = zext i8 %tmp to i32 ; <i32> [#uses=1]
ret i32 %tmp2
}
diff --git a/test/CodeGen/PowerPC/reloc-align.ll b/test/CodeGen/PowerPC/reloc-align.ll
index 13d6adadfcae..754997bccbd6 100644
--- a/test/CodeGen/PowerPC/reloc-align.ll
+++ b/test/CodeGen/PowerPC/reloc-align.ll
@@ -24,7 +24,7 @@ entry:
define internal fastcc signext i32 @func_90(%struct.S1* byval nocapture %p_91) #0 {
entry:
%0 = bitcast %struct.S1* %p_91 to i64*
- %bf.load = load i64* %0, align 1
+ %bf.load = load i64, i64* %0, align 1
%bf.shl = shl i64 %bf.load, 26
%bf.ashr = ashr i64 %bf.shl, 54
%bf.cast = trunc i64 %bf.ashr to i32
diff --git a/test/CodeGen/PowerPC/remat-imm.ll b/test/CodeGen/PowerPC/remat-imm.ll
index 520921f57a93..ffae8a97cc83 100644
--- a/test/CodeGen/PowerPC/remat-imm.ll
+++ b/test/CodeGen/PowerPC/remat-imm.ll
@@ -9,7 +9,7 @@ define i32 @main() nounwind {
entry:
; CHECK: li 4, 128
; CHECK-NOT: mr 4, {{.*}}
- %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([6 x i8]* @.str, i32 0, i32 0), i32 128, i32 128) nounwind
+ %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i32 0, i32 0), i32 128, i32 128) nounwind
ret i32 0
}
diff --git a/test/CodeGen/PowerPC/resolvefi-basereg.ll b/test/CodeGen/PowerPC/resolvefi-basereg.ll
index 62c2d139920a..a613c3310a55 100644
--- a/test/CodeGen/PowerPC/resolvefi-basereg.ll
+++ b/test/CodeGen/PowerPC/resolvefi-basereg.ll
@@ -35,21 +35,21 @@ entry:
call void @llvm.memset.p0i8.i64(i8* bitcast (%struct.S1998* @s1998 to i8*), i8 0, i64 5168, i32 16, i1 false)
call void @llvm.memset.p0i8.i64(i8* bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8 0, i64 25840, i32 16, i1 false)
call void @llvm.memset.p0i8.i64(i8* bitcast (%struct.Info* @info to i8*), i8 0, i64 832, i32 8, i1 false)
- store i8* bitcast (%struct.S1998* @s1998 to i8*), i8** getelementptr inbounds (%struct.Info* @info, i32 0, i32 2), align 8
- store i8* bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8** getelementptr inbounds (%struct.Info* @info, i32 0, i32 3), align 8
- store i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 3) to i8*), i8** getelementptr inbounds (%struct.Info* @info, i32 0, i32 4), align 8
- store i64 5168, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 6), align 8
- store i64 16, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 8), align 8
- store i64 16, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 9), align 8
- store i64 16, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 10), align 8
- %0 = load i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 8), align 8
+ store i8* bitcast (%struct.S1998* @s1998 to i8*), i8** getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 2), align 8
+ store i8* bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8** getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 3), align 8
+ store i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 3) to i8*), i8** getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 4), align 8
+ store i64 5168, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 6), align 8
+ store i64 16, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 8), align 8
+ store i64 16, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 9), align 8
+ store i64 16, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 10), align 8
+ %0 = load i64, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 8), align 8
%sub = sub i64 %0, 1
- %and = and i64 ptrtoint (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 3) to i64), %sub
+ %and = and i64 ptrtoint (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 3) to i64), %sub
%tobool = icmp ne i64 %and, 0
br i1 %tobool, label %if.then, label %if.end
if.then: ; preds = %entry
- %1 = load i32* @fails, align 4
+ %1 = load i32, i32* @fails, align 4
%inc = add nsw i32 %1, 1
store i32 %inc, i32* @fails, align 4
br label %if.end
@@ -57,299 +57,299 @@ if.then: ; preds = %entry
if.end: ; preds = %if.then, %entry
store i32 0, i32* %i, align 4
store i32 0, i32* %j, align 4
- %2 = load i32* %i, align 4
+ %2 = load i32, i32* %i, align 4
%idxprom = sext i32 %2 to i64
- %arrayidx = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom
- store i8* bitcast (i32** getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 0, i64 1) to i8*), i8** %arrayidx, align 8
- %3 = load i32* %i, align 4
+ %arrayidx = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom
+ store i8* bitcast (i32** getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 0, i64 1) to i8*), i8** %arrayidx, align 8
+ %3 = load i32, i32* %i, align 4
%idxprom1 = sext i32 %3 to i64
- %arrayidx2 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom1
store i64 8, i64* %arrayidx2, align 8
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%idxprom3 = sext i32 %4 to i64
- %arrayidx4 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom3
+ %arrayidx4 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom3
store i64 8, i64* %arrayidx4, align 8
- store i32* getelementptr inbounds ([256 x i32]* @intarray, i32 0, i64 190), i32** getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 0, i64 1), align 8
- store i32* getelementptr inbounds ([256 x i32]* @intarray, i32 0, i64 241), i32** getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 0, i64 1), align 8
- %5 = load i32* %i, align 4
+ store i32* getelementptr inbounds ([256 x i32], [256 x i32]* @intarray, i32 0, i64 190), i32** getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 0, i64 1), align 8
+ store i32* getelementptr inbounds ([256 x i32], [256 x i32]* @intarray, i32 0, i64 241), i32** getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 0, i64 1), align 8
+ %5 = load i32, i32* %i, align 4
%inc5 = add nsw i32 %5, 1
store i32 %inc5, i32* %i, align 4
- %6 = load i32* %i, align 4
+ %6 = load i32, i32* %i, align 4
%idxprom6 = sext i32 %6 to i64
- %arrayidx7 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom6
- store i8* bitcast (i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 1) to i8*), i8** %arrayidx7, align 8
- %7 = load i32* %i, align 4
+ %arrayidx7 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom6
+ store i8* bitcast (i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 1) to i8*), i8** %arrayidx7, align 8
+ %7 = load i32, i32* %i, align 4
%idxprom8 = sext i32 %7 to i64
- %arrayidx9 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom8
+ %arrayidx9 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom8
store i64 8, i64* %arrayidx9, align 8
- %8 = load i32* %i, align 4
+ %8 = load i32, i32* %i, align 4
%idxprom10 = sext i32 %8 to i64
- %arrayidx11 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom10
+ %arrayidx11 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom10
store i64 8, i64* %arrayidx11, align 8
- store i64 -3866974208859106459, i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 1), align 8
- store i64 -185376695371304091, i64* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 1), align 8
- %9 = load i32* %i, align 4
+ store i64 -3866974208859106459, i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 1), align 8
+ store i64 -185376695371304091, i64* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 1), align 8
+ %9 = load i32, i32* %i, align 4
%inc12 = add nsw i32 %9, 1
store i32 %inc12, i32* %i, align 4
- %10 = load i32* %i, align 4
+ %10 = load i32, i32* %i, align 4
%idxprom13 = sext i32 %10 to i64
- %arrayidx14 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom13
- store i8* bitcast (i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 2) to i8*), i8** %arrayidx14, align 8
- %11 = load i32* %i, align 4
+ %arrayidx14 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom13
+ store i8* bitcast (i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 2) to i8*), i8** %arrayidx14, align 8
+ %11 = load i32, i32* %i, align 4
%idxprom15 = sext i32 %11 to i64
- %arrayidx16 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom15
+ %arrayidx16 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom15
store i64 8, i64* %arrayidx16, align 8
- %12 = load i32* %i, align 4
+ %12 = load i32, i32* %i, align 4
%idxprom17 = sext i32 %12 to i64
- %arrayidx18 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom17
+ %arrayidx18 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom17
store i64 8, i64* %arrayidx18, align 8
- store i64 -963638028680427187, i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 2), align 8
- store i64 7510542175772455554, i64* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 2), align 8
- %13 = load i32* %i, align 4
+ store i64 -963638028680427187, i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 2), align 8
+ store i64 7510542175772455554, i64* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 2), align 8
+ %13 = load i32, i32* %i, align 4
%inc19 = add nsw i32 %13, 1
store i32 %inc19, i32* %i, align 4
- %14 = load i32* %i, align 4
+ %14 = load i32, i32* %i, align 4
%idxprom20 = sext i32 %14 to i64
- %arrayidx21 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom20
- store i8* bitcast (double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 3) to i8*), i8** %arrayidx21, align 8
- %15 = load i32* %i, align 4
+ %arrayidx21 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom20
+ store i8* bitcast (double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 3) to i8*), i8** %arrayidx21, align 8
+ %15 = load i32, i32* %i, align 4
%idxprom22 = sext i32 %15 to i64
- %arrayidx23 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom22
+ %arrayidx23 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom22
store i64 8, i64* %arrayidx23, align 8
- %16 = load i32* %i, align 4
+ %16 = load i32, i32* %i, align 4
%idxprom24 = sext i32 %16 to i64
- %arrayidx25 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom24
+ %arrayidx25 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom24
store i64 16, i64* %arrayidx25, align 8
- store double 0xC0F8783300000000, double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 3), align 16
- store double 0xC10DF3CCC0000000, double* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 3), align 16
- %17 = load i32* %i, align 4
+ store double 0xC0F8783300000000, double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 3), align 16
+ store double 0xC10DF3CCC0000000, double* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 3), align 16
+ %17 = load i32, i32* %i, align 4
%inc26 = add nsw i32 %17, 1
store i32 %inc26, i32* %i, align 4
- %18 = load i32* %i, align 4
+ %18 = load i32, i32* %i, align 4
%idxprom27 = sext i32 %18 to i64
- %arrayidx28 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom27
- store i8* bitcast (i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 4) to i8*), i8** %arrayidx28, align 8
- %19 = load i32* %i, align 4
+ %arrayidx28 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom27
+ store i8* bitcast (i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 4) to i8*), i8** %arrayidx28, align 8
+ %19 = load i32, i32* %i, align 4
%idxprom29 = sext i32 %19 to i64
- %arrayidx30 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom29
+ %arrayidx30 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom29
store i64 2, i64* %arrayidx30, align 8
- %20 = load i32* %i, align 4
+ %20 = load i32, i32* %i, align 4
%idxprom31 = sext i32 %20 to i64
- %arrayidx32 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom31
+ %arrayidx32 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom31
store i64 2, i64* %arrayidx32, align 8
- store i16 -15897, i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 4), align 2
- store i16 30935, i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 4), align 2
- %21 = load i32* %i, align 4
+ store i16 -15897, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 4), align 2
+ store i16 30935, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 4), align 2
+ %21 = load i32, i32* %i, align 4
%inc33 = add nsw i32 %21, 1
store i32 %inc33, i32* %i, align 4
- store i32 -419541644, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 5), align 4
- store i32 2125926812, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 5), align 4
- %22 = load i32* %j, align 4
+ store i32 -419541644, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 5), align 4
+ store i32 2125926812, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 5), align 4
+ %22 = load i32, i32* %j, align 4
%inc34 = add nsw i32 %22, 1
store i32 %inc34, i32* %j, align 4
- %23 = load i32* %i, align 4
+ %23 = load i32, i32* %i, align 4
%idxprom35 = sext i32 %23 to i64
- %arrayidx36 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom35
- store i8* bitcast (double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0) to i8*), i8** %arrayidx36, align 8
- %24 = load i32* %i, align 4
+ %arrayidx36 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom35
+ store i8* bitcast (double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0) to i8*), i8** %arrayidx36, align 8
+ %24 = load i32, i32* %i, align 4
%idxprom37 = sext i32 %24 to i64
- %arrayidx38 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom37
+ %arrayidx38 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom37
store i64 8, i64* %arrayidx38, align 8
- %25 = load i32* %i, align 4
+ %25 = load i32, i32* %i, align 4
%idxprom39 = sext i32 %25 to i64
- %arrayidx40 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom39
+ %arrayidx40 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom39
store i64 8, i64* %arrayidx40, align 8
- store double 0xC0FC765780000000, double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0), align 8
- store double 0xC1025CD7A0000000, double* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 0, i64 0), align 8
- %26 = load i32* %i, align 4
+ store double 0xC0FC765780000000, double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0), align 8
+ store double 0xC1025CD7A0000000, double* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 0, i64 0), align 8
+ %26 = load i32, i32* %i, align 4
%inc41 = add nsw i32 %26, 1
store i32 %inc41, i32* %i, align 4
- %bf.load = load i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
+ %bf.load = load i32, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
%bf.clear = and i32 %bf.load, 7
%bf.set = or i32 %bf.clear, 16
- store i32 %bf.set, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
- %bf.load42 = load i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
+ store i32 %bf.set, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
+ %bf.load42 = load i32, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
%bf.clear43 = and i32 %bf.load42, 7
%bf.set44 = or i32 %bf.clear43, 24
- store i32 %bf.set44, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
- %27 = load i32* %j, align 4
+ store i32 %bf.set44, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
+ %27 = load i32, i32* %j, align 4
%inc45 = add nsw i32 %27, 1
store i32 %inc45, i32* %j, align 4
- %bf.load46 = load i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
+ %bf.load46 = load i16, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
%bf.clear47 = and i16 %bf.load46, 127
- store i16 %bf.clear47, i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
- %bf.load48 = load i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
+ store i16 %bf.clear47, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
+ %bf.load48 = load i16, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
%bf.clear49 = and i16 %bf.load48, 127
- store i16 %bf.clear49, i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
- %28 = load i32* %j, align 4
+ store i16 %bf.clear49, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
+ %28 = load i32, i32* %j, align 4
%inc50 = add nsw i32 %28, 1
store i32 %inc50, i32* %j, align 4
- %bf.load51 = load i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
+ %bf.load51 = load i32, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
%bf.clear52 = and i32 %bf.load51, 63
- store i32 %bf.clear52, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
- %bf.load53 = load i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
+ store i32 %bf.clear52, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
+ %bf.load53 = load i32, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
%bf.clear54 = and i32 %bf.load53, 63
%bf.set55 = or i32 %bf.clear54, 64
- store i32 %bf.set55, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
- %29 = load i32* %j, align 4
+ store i32 %bf.set55, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
+ %29 = load i32, i32* %j, align 4
%inc56 = add nsw i32 %29, 1
store i32 %inc56, i32* %j, align 4
- %bf.load57 = load i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
+ %bf.load57 = load i24, i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
%bf.clear58 = and i24 %bf.load57, 63
- store i24 %bf.clear58, i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
- %bf.load59 = load i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
+ store i24 %bf.clear58, i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
+ %bf.load59 = load i24, i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
%bf.clear60 = and i24 %bf.load59, 63
- store i24 %bf.clear60, i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
- %30 = load i32* %j, align 4
+ store i24 %bf.clear60, i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
+ %30 = load i32, i32* %j, align 4
%inc61 = add nsw i32 %30, 1
store i32 %inc61, i32* %j, align 4
- %31 = load i32* %i, align 4
+ %31 = load i32, i32* %i, align 4
%idxprom62 = sext i32 %31 to i64
- %arrayidx63 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom62
- store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), i8** %arrayidx63, align 8
- %32 = load i32* %i, align 4
+ %arrayidx63 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom62
+ store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), i8** %arrayidx63, align 8
+ %32 = load i32, i32* %i, align 4
%idxprom64 = sext i32 %32 to i64
- %arrayidx65 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom64
+ %arrayidx65 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom64
store i64 1, i64* %arrayidx65, align 8
- %33 = load i32* %i, align 4
+ %33 = load i32, i32* %i, align 4
%idxprom66 = sext i32 %33 to i64
- %arrayidx67 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom66
+ %arrayidx67 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom66
store i64 1, i64* %arrayidx67, align 8
- store i8 -83, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), align 1
- store i8 -67, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 5), align 1
- %34 = load i32* %i, align 4
+ store i8 -83, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), align 1
+ store i8 -67, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 5), align 1
+ %34 = load i32, i32* %i, align 4
%inc68 = add nsw i32 %34, 1
store i32 %inc68, i32* %i, align 4
- %35 = load i32* %i, align 4
+ %35 = load i32, i32* %i, align 4
%idxprom69 = sext i32 %35 to i64
- %arrayidx70 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom69
- store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), i8** %arrayidx70, align 8
- %36 = load i32* %i, align 4
+ %arrayidx70 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom69
+ store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), i8** %arrayidx70, align 8
+ %36 = load i32, i32* %i, align 4
%idxprom71 = sext i32 %36 to i64
- %arrayidx72 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom71
+ %arrayidx72 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom71
store i64 1, i64* %arrayidx72, align 8
- %37 = load i32* %i, align 4
+ %37 = load i32, i32* %i, align 4
%idxprom73 = sext i32 %37 to i64
- %arrayidx74 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom73
+ %arrayidx74 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom73
store i64 1, i64* %arrayidx74, align 8
- store i8 34, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), align 1
- store i8 64, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 1), align 1
- %38 = load i32* %i, align 4
+ store i8 34, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), align 1
+ store i8 64, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 1), align 1
+ %38 = load i32, i32* %i, align 4
%inc75 = add nsw i32 %38, 1
store i32 %inc75, i32* %i, align 4
- %39 = load i32* %i, align 4
+ %39 = load i32, i32* %i, align 4
%idxprom76 = sext i32 %39 to i64
- %arrayidx77 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom76
- store i8* bitcast (i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3) to i8*), i8** %arrayidx77, align 8
- %40 = load i32* %i, align 4
+ %arrayidx77 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom76
+ store i8* bitcast (i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3) to i8*), i8** %arrayidx77, align 8
+ %40 = load i32, i32* %i, align 4
%idxprom78 = sext i32 %40 to i64
- %arrayidx79 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom78
+ %arrayidx79 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom78
store i64 4, i64* %arrayidx79, align 8
- %41 = load i32* %i, align 4
+ %41 = load i32, i32* %i, align 4
%idxprom80 = sext i32 %41 to i64
- %arrayidx81 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom80
+ %arrayidx81 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom80
store i64 4, i64* %arrayidx81, align 8
- store i32 -3, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3), align 4
- store i32 -3, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 6, i64 3), align 4
- %42 = load i32* %i, align 4
+ store i32 -3, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3), align 4
+ store i32 -3, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 6, i64 3), align 4
+ %42 = load i32, i32* %i, align 4
%inc82 = add nsw i32 %42, 1
store i32 %inc82, i32* %i, align 4
- %43 = load i32* %i, align 4
+ %43 = load i32, i32* %i, align 4
%idxprom83 = sext i32 %43 to i64
- %arrayidx84 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom83
- store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), i8** %arrayidx84, align 8
- %44 = load i32* %i, align 4
+ %arrayidx84 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom83
+ store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), i8** %arrayidx84, align 8
+ %44 = load i32, i32* %i, align 4
%idxprom85 = sext i32 %44 to i64
- %arrayidx86 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom85
+ %arrayidx86 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom85
store i64 1, i64* %arrayidx86, align 8
- %45 = load i32* %i, align 4
+ %45 = load i32, i32* %i, align 4
%idxprom87 = sext i32 %45 to i64
- %arrayidx88 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom87
+ %arrayidx88 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom87
store i64 1, i64* %arrayidx88, align 8
- store i8 106, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), align 1
- store i8 -102, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 7), align 1
- %46 = load i32* %i, align 4
+ store i8 106, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), align 1
+ store i8 -102, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 7), align 1
+ %46 = load i32, i32* %i, align 4
%inc89 = add nsw i32 %46, 1
store i32 %inc89, i32* %i, align 4
- %47 = load i32* %i, align 4
+ %47 = load i32, i32* %i, align 4
%idxprom90 = sext i32 %47 to i64
- %arrayidx91 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom90
- store i8* bitcast (i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 7) to i8*), i8** %arrayidx91, align 8
- %48 = load i32* %i, align 4
+ %arrayidx91 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom90
+ store i8* bitcast (i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 7) to i8*), i8** %arrayidx91, align 8
+ %48 = load i32, i32* %i, align 4
%idxprom92 = sext i32 %48 to i64
- %arrayidx93 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom92
+ %arrayidx93 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom92
store i64 2, i64* %arrayidx93, align 8
- %49 = load i32* %i, align 4
+ %49 = load i32, i32* %i, align 4
%idxprom94 = sext i32 %49 to i64
- %arrayidx95 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom94
+ %arrayidx95 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom94
store i64 2, i64* %arrayidx95, align 8
- store i16 29665, i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 7), align 2
- store i16 7107, i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 7), align 2
- %50 = load i32* %i, align 4
+ store i16 29665, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 7), align 2
+ store i16 7107, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 7), align 2
+ %50 = load i32, i32* %i, align 4
%inc96 = add nsw i32 %50, 1
store i32 %inc96, i32* %i, align 4
- %51 = load i32* %i, align 4
+ %51 = load i32, i32* %i, align 4
%idxprom97 = sext i32 %51 to i64
- %arrayidx98 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom97
- store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 8), i8** %arrayidx98, align 8
- %52 = load i32* %i, align 4
+ %arrayidx98 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom97
+ store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 8), i8** %arrayidx98, align 8
+ %52 = load i32, i32* %i, align 4
%idxprom99 = sext i32 %52 to i64
- %arrayidx100 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom99
+ %arrayidx100 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom99
store i64 1, i64* %arrayidx100, align 8
- %53 = load i32* %i, align 4
+ %53 = load i32, i32* %i, align 4
%idxprom101 = sext i32 %53 to i64
- %arrayidx102 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom101
+ %arrayidx102 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom101
store i64 1, i64* %arrayidx102, align 8
- store i8 52, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 8), align 1
- store i8 -86, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 8), align 1
- %54 = load i32* %i, align 4
+ store i8 52, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 8), align 1
+ store i8 -86, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 8), align 1
+ %54 = load i32, i32* %i, align 4
%inc103 = add nsw i32 %54, 1
store i32 %inc103, i32* %i, align 4
- %55 = load i32* %i, align 4
+ %55 = load i32, i32* %i, align 4
%idxprom104 = sext i32 %55 to i64
- %arrayidx105 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom104
- store i8* bitcast (i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 9) to i8*), i8** %arrayidx105, align 8
- %56 = load i32* %i, align 4
+ %arrayidx105 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom104
+ store i8* bitcast (i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 9) to i8*), i8** %arrayidx105, align 8
+ %56 = load i32, i32* %i, align 4
%idxprom106 = sext i32 %56 to i64
- %arrayidx107 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom106
+ %arrayidx107 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom106
store i64 4, i64* %arrayidx107, align 8
- %57 = load i32* %i, align 4
+ %57 = load i32, i32* %i, align 4
%idxprom108 = sext i32 %57 to i64
- %arrayidx109 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom108
+ %arrayidx109 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom108
store i64 4, i64* %arrayidx109, align 8
- store i32 -54118453, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 9), align 4
- store i32 1668755823, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 9), align 4
- %58 = load i32* %i, align 4
+ store i32 -54118453, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 9), align 4
+ store i32 1668755823, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 9), align 4
+ %58 = load i32, i32* %i, align 4
%inc110 = add nsw i32 %58, 1
store i32 %inc110, i32* %i, align 4
store i32 %inc110, i32* %tmp
- %59 = load i32* %tmp
- %60 = load i32* %i, align 4
- store i32 %60, i32* getelementptr inbounds (%struct.Info* @info, i32 0, i32 0), align 4
- %61 = load i32* %j, align 4
- store i32 %61, i32* getelementptr inbounds (%struct.Info* @info, i32 0, i32 1), align 4
+ %59 = load i32, i32* %tmp
+ %60 = load i32, i32* %i, align 4
+ store i32 %60, i32* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 0), align 4
+ %61 = load i32, i32* %j, align 4
+ store i32 %61, i32* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 1), align 4
%62 = bitcast %struct.S1998* %agg.tmp111 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %62, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)
%63 = bitcast %struct.S1998* %agg.tmp112 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %63, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
- call void @check1998(%struct.S1998* sret %agg.tmp, %struct.S1998* byval align 16 %agg.tmp111, %struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 1), %struct.S1998* byval align 16 %agg.tmp112)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %63, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
+ call void @check1998(%struct.S1998* sret %agg.tmp, %struct.S1998* byval align 16 %agg.tmp111, %struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 1), %struct.S1998* byval align 16 %agg.tmp112)
call void @checkx1998(%struct.S1998* byval align 16 %agg.tmp)
%64 = bitcast %struct.S1998* %agg.tmp113 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %64, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)
%65 = bitcast %struct.S1998* %agg.tmp114 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %65, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %65, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
%66 = bitcast %struct.S1998* %agg.tmp115 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %66, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
- call void (i32, ...)* @check1998va(i32 signext 1, double 1.000000e+00, %struct.S1998* byval align 16 %agg.tmp113, i64 2, %struct.S1998* byval align 16 %agg.tmp114, %struct.S1998* byval align 16 %agg.tmp115)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %66, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
+ call void (i32, ...) @check1998va(i32 signext 1, double 1.000000e+00, %struct.S1998* byval align 16 %agg.tmp113, i64 2, %struct.S1998* byval align 16 %agg.tmp114, %struct.S1998* byval align 16 %agg.tmp115)
%67 = bitcast %struct.S1998* %agg.tmp116 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %67, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)
%68 = bitcast %struct.S1998* %agg.tmp117 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %68, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)
%69 = bitcast %struct.S1998* %agg.tmp118 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %69, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %69, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
%70 = bitcast %struct.S1998* %agg.tmp119 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %70, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)
- call void (i32, ...)* @check1998va(i32 signext 2, %struct.S1998* byval align 16 %agg.tmp116, %struct.S1998* byval align 16 %agg.tmp117, ppc_fp128 0xM40000000000000000000000000000000, %struct.S1998* byval align 16 %agg.tmp118, %struct.S1998* byval align 16 %agg.tmp119)
+ call void (i32, ...) @check1998va(i32 signext 2, %struct.S1998* byval align 16 %agg.tmp116, %struct.S1998* byval align 16 %agg.tmp117, ppc_fp128 0xM40000000000000000000000000000000, %struct.S1998* byval align 16 %agg.tmp118, %struct.S1998* byval align 16 %agg.tmp119)
ret void
}
diff --git a/test/CodeGen/PowerPC/resolvefi-disp.ll b/test/CodeGen/PowerPC/resolvefi-disp.ll
index ca42bcd767a0..a1c2070a6f44 100644
--- a/test/CodeGen/PowerPC/resolvefi-disp.ll
+++ b/test/CodeGen/PowerPC/resolvefi-disp.ll
@@ -41,23 +41,23 @@ entry:
call void @llvm.memset.p0i8.i64(i8* %7, i8 0, i64 11104, i32 32, i1 false)
%8 = bitcast %struct.S2760* %b2 to i8*
call void @llvm.memset.p0i8.i64(i8* %8, i8 0, i64 11104, i32 32, i1 false)
- %b = getelementptr inbounds %struct.S2760* %arg0, i32 0, i32 1
- %g = getelementptr inbounds %struct.anon* %b, i32 0, i32 1
- %9 = load i64* %g, align 8
- %10 = load i64* getelementptr inbounds (%struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
+ %b = getelementptr inbounds %struct.S2760, %struct.S2760* %arg0, i32 0, i32 1
+ %g = getelementptr inbounds %struct.anon, %struct.anon* %b, i32 0, i32 1
+ %9 = load i64, i64* %g, align 8
+ %10 = load i64, i64* getelementptr inbounds (%struct.S2760, %struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
%cmp = icmp ne i64 %9, %10
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
- %11 = load i32* @fails, align 4
+ %11 = load i32, i32* @fails, align 4
%inc = add nsw i32 %11, 1
store i32 %inc, i32* @fails, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
- %12 = load i64* getelementptr inbounds (%struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
- %b3 = getelementptr inbounds %struct.S2760* %ret, i32 0, i32 1
- %g4 = getelementptr inbounds %struct.anon* %b3, i32 0, i32 1
+ %12 = load i64, i64* getelementptr inbounds (%struct.S2760, %struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
+ %b3 = getelementptr inbounds %struct.S2760, %struct.S2760* %ret, i32 0, i32 1
+ %g4 = getelementptr inbounds %struct.anon, %struct.anon* %b3, i32 0, i32 1
store i64 %12, i64* %g4, align 8
%13 = bitcast %struct.S2760* %agg.result to i8*
%14 = bitcast %struct.S2760* %ret to i8*
diff --git a/test/CodeGen/PowerPC/retaddr2.ll b/test/CodeGen/PowerPC/retaddr2.ll
index 8fa3b4d13b7e..8581f6cb9a38 100644
--- a/test/CodeGen/PowerPC/retaddr2.ll
+++ b/test/CodeGen/PowerPC/retaddr2.ll
@@ -12,8 +12,7 @@ entry:
; CHECK-LABEL: @test1
; CHECK: mflr 0
; CHECK: std 0, 16(1)
-; FIXME: These next two lines don't both need to load the same value.
-; CHECK-DAG: ld 3, 16(1)
+; CHECK-DAG: ld 3, 64(1)
; CHECK-DAG: ld 0, 16(1)
; CHECK: mtlr 0
; CHECK: blr
diff --git a/test/CodeGen/PowerPC/return-val-i128.ll b/test/CodeGen/PowerPC/return-val-i128.ll
index e14a43809a7b..2f924096661a 100644
--- a/test/CodeGen/PowerPC/return-val-i128.ll
+++ b/test/CodeGen/PowerPC/return-val-i128.ll
@@ -7,29 +7,29 @@ entry:
%tmp = alloca i128, align 16 ; <i128*> [#uses=3]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float %a, float* %a_addr
- %tmp1 = load float* %a_addr, align 4 ; <float> [#uses=1]
+ %tmp1 = load float, float* %a_addr, align 4 ; <float> [#uses=1]
%tmp2 = fcmp olt float %tmp1, 0.000000e+00 ; <i1> [#uses=1]
%tmp23 = zext i1 %tmp2 to i8 ; <i8> [#uses=1]
%toBool = icmp ne i8 %tmp23, 0 ; <i1> [#uses=1]
br i1 %toBool, label %bb, label %bb8
bb: ; preds = %entry
- %tmp4 = load float* %a_addr, align 4 ; <float> [#uses=1]
+ %tmp4 = load float, float* %a_addr, align 4 ; <float> [#uses=1]
%tmp5 = fsub float -0.000000e+00, %tmp4 ; <float> [#uses=1]
%tmp6 = call i128 @__fixunssfDI( float %tmp5 ) nounwind ; <i128> [#uses=1]
%tmp7 = sub i128 0, %tmp6 ; <i128> [#uses=1]
store i128 %tmp7, i128* %tmp, align 16
br label %bb11
bb8: ; preds = %entry
- %tmp9 = load float* %a_addr, align 4 ; <float> [#uses=1]
+ %tmp9 = load float, float* %a_addr, align 4 ; <float> [#uses=1]
%tmp10 = call i128 @__fixunssfDI( float %tmp9 ) nounwind ; <i128> [#uses=1]
store i128 %tmp10, i128* %tmp, align 16
br label %bb11
bb11: ; preds = %bb8, %bb
- %tmp12 = load i128* %tmp, align 16 ; <i128> [#uses=1]
+ %tmp12 = load i128, i128* %tmp, align 16 ; <i128> [#uses=1]
store i128 %tmp12, i128* %retval, align 16
br label %return
return: ; preds = %bb11
- %retval13 = load i128* %retval ; <i128> [#uses=1]
+ %retval13 = load i128, i128* %retval ; <i128> [#uses=1]
ret i128 %retval13
}
diff --git a/test/CodeGen/PowerPC/rlwimi-and.ll b/test/CodeGen/PowerPC/rlwimi-and.ll
index 9433f8e3dee2..59f704ee16bc 100644
--- a/test/CodeGen/PowerPC/rlwimi-and.ll
+++ b/test/CodeGen/PowerPC/rlwimi-and.ll
@@ -16,11 +16,11 @@ codeRepl12: ; preds = %codeRepl4
unreachable
codeRepl17: ; preds = %codeRepl4
- %0 = load i8* undef, align 2
+ %0 = load i8, i8* undef, align 2
%1 = and i8 %0, 1
%not.tobool.i.i.i = icmp eq i8 %1, 0
%2 = select i1 %not.tobool.i.i.i, i16 0, i16 256
- %3 = load i8* undef, align 1
+ %3 = load i8, i8* undef, align 1
%4 = and i8 %3, 1
%not.tobool.i.1.i.i = icmp eq i8 %4, 0
%rvml38.sroa.1.1.insert.ext = select i1 %not.tobool.i.1.i.i, i16 0, i16 1
@@ -29,7 +29,7 @@ codeRepl17: ; preds = %codeRepl4
unreachable
; CHECK: @test
-; CHECK: rlwinm [[R1:[0-9]+]], {{[0-9]+}}, 0, 31, 31
+; CHECK: clrlwi [[R1:[0-9]+]], {{[0-9]+}}, 31
; CHECK: rlwimi [[R1]], {{[0-9]+}}, 8, 23, 23
codeRepl29: ; preds = %codeRepl1
diff --git a/test/CodeGen/PowerPC/rlwimi-commute.ll b/test/CodeGen/PowerPC/rlwimi-commute.ll
index 3f90008c006b..cd0f49ed7807 100644
--- a/test/CodeGen/PowerPC/rlwimi-commute.ll
+++ b/test/CodeGen/PowerPC/rlwimi-commute.ll
@@ -4,8 +4,8 @@
; Make sure there is no register-register copies here.
define void @test1(i32* %A, i32* %B, i32* %D, i32* %E) {
- %A.upgrd.1 = load i32* %A ; <i32> [#uses=2]
- %B.upgrd.2 = load i32* %B ; <i32> [#uses=1]
+ %A.upgrd.1 = load i32, i32* %A ; <i32> [#uses=2]
+ %B.upgrd.2 = load i32, i32* %B ; <i32> [#uses=1]
%X = and i32 %A.upgrd.1, 15 ; <i32> [#uses=1]
%Y = and i32 %B.upgrd.2, -16 ; <i32> [#uses=1]
%Z = or i32 %X, %Y ; <i32> [#uses=1]
@@ -15,8 +15,8 @@ define void @test1(i32* %A, i32* %B, i32* %D, i32* %E) {
}
define void @test2(i32* %A, i32* %B, i32* %D, i32* %E) {
- %A.upgrd.3 = load i32* %A ; <i32> [#uses=1]
- %B.upgrd.4 = load i32* %B ; <i32> [#uses=2]
+ %A.upgrd.3 = load i32, i32* %A ; <i32> [#uses=1]
+ %B.upgrd.4 = load i32, i32* %B ; <i32> [#uses=2]
%X = and i32 %A.upgrd.3, 15 ; <i32> [#uses=1]
%Y = and i32 %B.upgrd.4, -16 ; <i32> [#uses=1]
%Z = or i32 %X, %Y ; <i32> [#uses=1]
diff --git a/test/CodeGen/PowerPC/rlwimi-dyn-and.ll b/test/CodeGen/PowerPC/rlwimi-dyn-and.ll
index e02801fafbf5..76f3da66dd04 100644
--- a/test/CodeGen/PowerPC/rlwimi-dyn-and.ll
+++ b/test/CodeGen/PowerPC/rlwimi-dyn-and.ll
@@ -4,13 +4,13 @@ target triple = "powerpc64-unknown-linux-gnu"
define i32 @test1() #0 {
entry:
- %conv67.reload = load i32* undef
+ %conv67.reload = load i32, i32* undef
%const = bitcast i32 65535 to i32
br label %next
next:
%shl161 = shl nuw nsw i32 %conv67.reload, 15
- %0 = load i8* undef, align 1
+ %0 = load i8, i8* undef, align 1
%conv169 = zext i8 %0 to i32
%shl170 = shl nuw nsw i32 %conv169, 7
%const_mat = add i32 %const, -32767
@@ -25,13 +25,13 @@ next:
define i32 @test2() #0 {
entry:
- %conv67.reload = load i32* undef
+ %conv67.reload = load i32, i32* undef
%const = bitcast i32 65535 to i32
br label %next
next:
%shl161 = shl nuw nsw i32 %conv67.reload, 15
- %0 = load i8* undef, align 1
+ %0 = load i8, i8* undef, align 1
%conv169 = zext i8 %0 to i32
%shl170 = shl nuw nsw i32 %conv169, 7
%shl161.masked = and i32 %shl161, 32768
diff --git a/test/CodeGen/PowerPC/rm-zext.ll b/test/CodeGen/PowerPC/rm-zext.ll
index 33995e114d27..97c546c0145f 100644
--- a/test/CodeGen/PowerPC/rm-zext.ll
+++ b/test/CodeGen/PowerPC/rm-zext.ll
@@ -45,7 +45,7 @@ declare i32 @llvm.bswap.i32(i32) #0
; Function Attrs: nounwind readonly
define zeroext i32 @bs32(i32* nocapture readonly %x) #1 {
entry:
- %0 = load i32* %x, align 4
+ %0 = load i32, i32* %x, align 4
%1 = tail call i32 @llvm.bswap.i32(i32 %0)
ret i32 %1
@@ -57,7 +57,7 @@ entry:
; Function Attrs: nounwind readonly
define zeroext i16 @bs16(i16* nocapture readonly %x) #1 {
entry:
- %0 = load i16* %x, align 2
+ %0 = load i16, i16* %x, align 2
%1 = tail call i16 @llvm.bswap.i16(i16 %0)
ret i16 %1
diff --git a/test/CodeGen/PowerPC/rotl-2.ll b/test/CodeGen/PowerPC/rotl-2.ll
index d32ef59be6c4..86539b6c119c 100644
--- a/test/CodeGen/PowerPC/rotl-2.ll
+++ b/test/CodeGen/PowerPC/rotl-2.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=ppc32 | grep rlwinm | count 4
-; RUN: llc < %s -march=ppc32 | grep rlwnm | count 2
+; RUN: llc < %s -march=ppc32 | grep rotlwi | count 2
+; RUN: llc < %s -march=ppc32 | grep clrlwi | count 2
+; RUN: llc < %s -march=ppc32 | grep rotlw | count 4
; RUN: llc < %s -march=ppc32 | not grep or
define i32 @rotl32(i32 %A, i8 %Amt) nounwind {
diff --git a/test/CodeGen/PowerPC/rotl-64.ll b/test/CodeGen/PowerPC/rotl-64.ll
index 674c9e4cc951..2ccdc29f2cd2 100644
--- a/test/CodeGen/PowerPC/rotl-64.ll
+++ b/test/CodeGen/PowerPC/rotl-64.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=ppc64 | grep rldicl
-; RUN: llc < %s -march=ppc64 | grep rldcl
+; RUN: llc < %s -march=ppc64 | grep rotld
+; RUN: llc < %s -march=ppc64 | grep rotldi
; PR1613
define i64 @t1(i64 %A) {
diff --git a/test/CodeGen/PowerPC/rotl.ll b/test/CodeGen/PowerPC/rotl.ll
index 56fc4a8c911f..671f524645a7 100644
--- a/test/CodeGen/PowerPC/rotl.ll
+++ b/test/CodeGen/PowerPC/rotl.ll
@@ -1,5 +1,7 @@
-; RUN: llc < %s -march=ppc32 | grep rlwnm | count 2
-; RUN: llc < %s -march=ppc32 | grep rlwinm | count 2
+; RUN: llc < %s -march=ppc32 | grep rotrw: | count 1
+; RUN: llc < %s -march=ppc32 | grep rotlw: | count 1
+; RUN: llc < %s -march=ppc32 | grep rotlwi: | count 1
+; RUN: llc < %s -march=ppc32 | grep rotrwi: | count 1
define i32 @rotlw(i32 %x, i32 %sh) {
entry:
diff --git a/test/CodeGen/PowerPC/rs-undef-use.ll b/test/CodeGen/PowerPC/rs-undef-use.ll
index 24dd5fd9da99..007931e7407f 100644
--- a/test/CodeGen/PowerPC/rs-undef-use.ll
+++ b/test/CodeGen/PowerPC/rs-undef-use.ll
@@ -15,7 +15,7 @@ CF82.critedge: ; preds = %CF
br label %CF82
CF82: ; preds = %CF82, %CF82.critedge
- %L17 = load i8* %0
+ %L17 = load i8, i8* %0
%E18 = extractelement <2 x i64> undef, i32 0
%PC = bitcast <2 x i1>* %A3 to i64*
br i1 undef, label %CF82, label %CF84.critedge
@@ -25,13 +25,13 @@ CF84.critedge: ; preds = %CF82
br label %CF84
CF84: ; preds = %CF84, %CF84.critedge
- %L40 = load i64* %PC
+ %L40 = load i64, i64* %PC
store i64 -1, i64* %PC
%Sl46 = select i1 undef, i1 undef, i1 false
br i1 %Sl46, label %CF84, label %CF85
CF85: ; preds = %CF84
- %L47 = load i64* %PC
+ %L47 = load i64, i64* %PC
store i64 %E18, i64* %PC
%PC52 = bitcast <8 x i32>* %A2 to ppc_fp128*
store ppc_fp128 0xM4D436562A0416DE00000000000000000, ppc_fp128* %PC52
diff --git a/test/CodeGen/PowerPC/s000-alias-misched.ll b/test/CodeGen/PowerPC/s000-alias-misched.ll
index 3570a11b6271..2e34c65a0a38 100644
--- a/test/CodeGen/PowerPC/s000-alias-misched.ll
+++ b/test/CodeGen/PowerPC/s000-alias-misched.ll
@@ -22,7 +22,7 @@ declare signext i32 @init(i8*) nounwind
define signext i32 @s000() nounwind {
entry:
- %call = tail call signext i32 @init(i8* getelementptr inbounds ([6 x i8]* @.str1, i64 0, i64 0))
+ %call = tail call signext i32 @init(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str1, i64 0, i64 0))
%call1 = tail call i64 @clock() nounwind
br label %for.cond2.preheader
@@ -34,34 +34,34 @@ for.cond2.preheader: ; preds = %for.end, %entry
for.body4: ; preds = %for.body4, %for.cond2.preheader
%indvars.iv = phi i64 [ 0, %for.cond2.preheader ], [ %indvars.iv.next.15, %for.body4 ]
- %arrayidx = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv
- %arrayidx6 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv
+ %arrayidx6 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv
%0 = bitcast double* %arrayidx to <1 x double>*
- %1 = load <1 x double>* %0, align 32
+ %1 = load <1 x double>, <1 x double>* %0, align 32
%add = fadd <1 x double> %1, <double 1.000000e+00>
%2 = bitcast double* %arrayidx6 to <1 x double>*
store <1 x double> %add, <1 x double>* %2, align 32
%indvars.iv.next.322 = or i64 %indvars.iv, 4
- %arrayidx.4 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.322
- %arrayidx6.4 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.322
+ %arrayidx.4 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.322
+ %arrayidx6.4 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.322
%3 = bitcast double* %arrayidx.4 to <1 x double>*
- %4 = load <1 x double>* %3, align 32
+ %4 = load <1 x double>, <1 x double>* %3, align 32
%add.4 = fadd <1 x double> %4, <double 1.000000e+00>
%5 = bitcast double* %arrayidx6.4 to <1 x double>*
store <1 x double> %add.4, <1 x double>* %5, align 32
%indvars.iv.next.726 = or i64 %indvars.iv, 8
- %arrayidx.8 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.726
- %arrayidx6.8 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.726
+ %arrayidx.8 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.726
+ %arrayidx6.8 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.726
%6 = bitcast double* %arrayidx.8 to <1 x double>*
- %7 = load <1 x double>* %6, align 32
+ %7 = load <1 x double>, <1 x double>* %6, align 32
%add.8 = fadd <1 x double> %7, <double 1.000000e+00>
%8 = bitcast double* %arrayidx6.8 to <1 x double>*
store <1 x double> %add.8, <1 x double>* %8, align 32
%indvars.iv.next.1130 = or i64 %indvars.iv, 12
- %arrayidx.12 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1130
- %arrayidx6.12 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1130
+ %arrayidx.12 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1130
+ %arrayidx6.12 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1130
%9 = bitcast double* %arrayidx.12 to <1 x double>*
- %10 = load <1 x double>* %9, align 32
+ %10 = load <1 x double>, <1 x double>* %9, align 32
%add.12 = fadd <1 x double> %10, <double 1.000000e+00>
%11 = bitcast double* %arrayidx6.12 to <1 x double>*
store <1 x double> %add.12, <1 x double>* %11, align 32
@@ -77,7 +77,7 @@ for.body4: ; preds = %for.body4, %for.con
; CHECK: bdnz
for.end: ; preds = %for.body4
- %call7 = tail call signext i32 @dummy(double* getelementptr inbounds ([16000 x double]* @X, i64 0, i64 0), double* getelementptr inbounds ([16000 x double]* @Y, i64 0, i64 0), double* getelementptr inbounds ([16000 x double]* @Z, i64 0, i64 0), double* getelementptr inbounds ([16000 x double]* @U, i64 0, i64 0), double* getelementptr inbounds ([16000 x double]* @V, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]]* @aa, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]]* @bb, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]]* @cc, i64 0, i64 0), double 0.000000e+00) nounwind
+ %call7 = tail call signext i32 @dummy(double* getelementptr inbounds ([16000 x double], [16000 x double]* @X, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @Y, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @Z, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @U, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @V, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]], [256 x [256 x double]]* @aa, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]], [256 x [256 x double]]* @bb, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]], [256 x [256 x double]]* @cc, i64 0, i64 0), double 0.000000e+00) nounwind
%inc9 = add nsw i32 %nl.018, 1
%exitcond = icmp eq i32 %inc9, 400000
br i1 %exitcond, label %for.end10, label %for.cond2.preheader
@@ -87,7 +87,7 @@ for.end10: ; preds = %for.end
%sub = sub nsw i64 %call11, %call1
%conv = sitofp i64 %sub to double
%div = fdiv double %conv, 1.000000e+06
- %call12 = tail call signext i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([14 x i8]* @.str137, i64 0, i64 0), double %div) nounwind
+ %call12 = tail call signext i32 (i8*, ...) @printf(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str137, i64 0, i64 0), double %div) nounwind
tail call void @check(i32 signext 1)
ret i32 0
}
diff --git a/test/CodeGen/PowerPC/sdag-ppcf128.ll b/test/CodeGen/PowerPC/sdag-ppcf128.ll
index c46bc6b22dde..6d2a04c72936 100644
--- a/test/CodeGen/PowerPC/sdag-ppcf128.ll
+++ b/test/CodeGen/PowerPC/sdag-ppcf128.ll
@@ -5,7 +5,7 @@
define fastcc void @_D3std4math4sqrtFNaNbNfcZc() {
entry:
br i1 undef, label %if, label %else
-; CHECK: cmplwi 0, 3, 0
+; CHECK: cmplwi 3, 0
if: ; preds = %entry
store { ppc_fp128, ppc_fp128 } zeroinitializer, { ppc_fp128, ppc_fp128 }* undef
ret void
diff --git a/test/CodeGen/PowerPC/seteq-0.ll b/test/CodeGen/PowerPC/seteq-0.ll
index b7dd78085eb1..4afb8fee1776 100644
--- a/test/CodeGen/PowerPC/seteq-0.ll
+++ b/test/CodeGen/PowerPC/seteq-0.ll
@@ -5,7 +5,7 @@ define i32 @eq0(i32 %a) {
%tmp.2 = zext i1 %tmp.1 to i32 ; <i32> [#uses=1]
ret i32 %tmp.2
-; CHECK: cntlzw [[REG:r[0-9]+]], r3
+; CHECK: cntlz [[REG:r[0-9]+]], r3
; CHECK: rlwinm r3, [[REG]], 27, 31, 31
; CHECK: blr
}
diff --git a/test/CodeGen/PowerPC/sjlj.ll b/test/CodeGen/PowerPC/sjlj.ll
index f9f887af31f3..62403e711968 100644
--- a/test/CodeGen/PowerPC/sjlj.ll
+++ b/test/CodeGen/PowerPC/sjlj.ll
@@ -37,7 +37,7 @@ entry:
%0 = call i8* @llvm.frameaddress(i32 0)
store i8* %0, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**)
%1 = call i8* @llvm.stacksave()
- store i8* %1, i8** getelementptr (i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**), i32 2)
+ store i8* %1, i8** getelementptr (i8*, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**), i32 2)
%2 = call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8*))
%tobool = icmp ne i32 %2, 0
br i1 %tobool, label %if.then, label %if.else
@@ -55,7 +55,7 @@ if.end: ; preds = %if.else
br label %return
return: ; preds = %if.end, %if.then
- %3 = load i32* %retval
+ %3 = load i32, i32* %retval
ret i32 %3
; FIXME: We should be saving VRSAVE on Darwin, but we're not!
@@ -110,7 +110,7 @@ entry:
%0 = call i8* @llvm.frameaddress(i32 0)
store i8* %0, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**)
%1 = call i8* @llvm.stacksave()
- store i8* %1, i8** getelementptr (i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**), i32 2)
+ store i8* %1, i8** getelementptr (i8*, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**), i32 2)
%2 = call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8*))
%tobool = icmp ne i32 %2, 0
br i1 %tobool, label %if.then, label %if.else
@@ -128,7 +128,7 @@ if.end: ; preds = %if.else
br label %return
return: ; preds = %if.end, %if.then
- %3 = load i32* %retval
+ %3 = load i32, i32* %retval
ret i32 %3
; CHECK: @main2
diff --git a/test/CodeGen/PowerPC/small-arguments.ll b/test/CodeGen/PowerPC/small-arguments.ll
index 19ca0985eef1..3cef817689b3 100644
--- a/test/CodeGen/PowerPC/small-arguments.ll
+++ b/test/CodeGen/PowerPC/small-arguments.ll
@@ -26,14 +26,14 @@ UnifiedReturnBlock:
}
define i32 @test4(i16* %P) {
- %tmp.1 = load i16* %P
+ %tmp.1 = load i16, i16* %P
%tmp.2 = zext i16 %tmp.1 to i32
%tmp.3 = and i32 %tmp.2, 255
ret i32 %tmp.3
}
define i32 @test5(i16* %P) {
- %tmp.1 = load i16* %P
+ %tmp.1 = load i16, i16* %P
%tmp.2 = bitcast i16 %tmp.1 to i16
%tmp.3 = zext i16 %tmp.2 to i32
%tmp.4 = and i32 %tmp.3, 255
@@ -41,7 +41,7 @@ define i32 @test5(i16* %P) {
}
define i32 @test6(i32* %P) {
- %tmp.1 = load i32* %P
+ %tmp.1 = load i32, i32* %P
%tmp.2 = and i32 %tmp.1, 255
ret i32 %tmp.2
}
diff --git a/test/CodeGen/PowerPC/split-index-tc.ll b/test/CodeGen/PowerPC/split-index-tc.ll
index 03aff243b231..38be93f28a85 100644
--- a/test/CodeGen/PowerPC/split-index-tc.ll
+++ b/test/CodeGen/PowerPC/split-index-tc.ll
@@ -13,16 +13,16 @@ define void @_ZN4llvm17ScheduleDAGInstrs14addPhysRegDepsEPNS_5SUnitEj() #0 align
; CHECK-NOT: lhzu
entry:
- %0 = load %"class.llvm::MachineOperand"** undef, align 8
+ %0 = load %"class.llvm::MachineOperand"*, %"class.llvm::MachineOperand"** undef, align 8
br i1 undef, label %_ZNK4llvm14MachineOperand6getRegEv.exit, label %cond.false.i123
cond.false.i123: ; preds = %_ZN4llvm12MachineInstr10getOperandEj.exit
unreachable
_ZNK4llvm14MachineOperand6getRegEv.exit: ; preds = %_ZN4llvm12MachineInstr10getOperandEj.exit
- %IsDef.i = getelementptr inbounds %"class.llvm::MachineOperand"* %0, i64 undef, i32 1
+ %IsDef.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %0, i64 undef, i32 1
%1 = bitcast [3 x i8]* %IsDef.i to i24*
- %bf.load.i = load i24* %1, align 1
+ %bf.load.i = load i24, i24* %1, align 1
%2 = and i24 %bf.load.i, 128
br i1 undef, label %for.cond.cleanup, label %for.body.lr.ph
@@ -61,7 +61,7 @@ cond.false.i257: ; preds = %if.end55
unreachable
_ZNK4llvm14MachineOperand6isDeadEv.exit262: ; preds = %if.end55
- %bf.load.i259 = load i24* %1, align 1
+ %bf.load.i259 = load i24, i24* %1, align 1
br i1 undef, label %if.then57, label %if.else59
if.then57: ; preds = %_ZNK4llvm14MachineOperand6isDeadEv.exit262
diff --git a/test/CodeGen/PowerPC/stack-protector.ll b/test/CodeGen/PowerPC/stack-protector.ll
index b81d94181cdf..8d255bd9a43b 100644
--- a/test/CodeGen/PowerPC/stack-protector.ll
+++ b/test/CodeGen/PowerPC/stack-protector.ll
@@ -11,10 +11,10 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store i8* %a, i8** %a_addr
%buf1 = bitcast [8 x i8]* %buf to i8* ; <i8*> [#uses=1]
- %0 = load i8** %a_addr, align 4 ; <i8*> [#uses=1]
+ %0 = load i8*, i8** %a_addr, align 4 ; <i8*> [#uses=1]
%1 = call i8* @strcpy(i8* %buf1, i8* %0) nounwind ; <i8*> [#uses=0]
%buf2 = bitcast [8 x i8]* %buf to i8* ; <i8*> [#uses=1]
- %2 = call i32 (i8*, ...)* @printf(i8* getelementptr ([11 x i8]* @"\01LC", i32 0, i32 0), i8* %buf2) nounwind ; <i32> [#uses=0]
+ %2 = call i32 (i8*, ...) @printf(i8* getelementptr ([11 x i8], [11 x i8]* @"\01LC", i32 0, i32 0), i8* %buf2) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
diff --git a/test/CodeGen/PowerPC/stack-realign.ll b/test/CodeGen/PowerPC/stack-realign.ll
index 762f50a9cbe0..e91b563af72e 100644
--- a/test/CodeGen/PowerPC/stack-realign.ll
+++ b/test/CodeGen/PowerPC/stack-realign.ll
@@ -14,14 +14,14 @@ declare void @bar(i32*)
define void @goo(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 32
- %a1 = getelementptr inbounds %struct.s* %a, i64 0, i32 0
- %0 = load i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32]* %x, i64 0, i64 0
+ %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
+ %0 = load i32, i32* %a1, align 4
+ %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 32
- %b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
- %1 = load i32* %b, align 4
- %2 = load i32* @barbaz, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32]* %x, i64 0, i64 1
+ %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
+ %1 = load i32, i32* %b, align 4
+ %2 = load i32, i32* @barbaz, align 4
+ %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %2, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx)
ret void
@@ -30,7 +30,7 @@ entry:
; CHECK-LABEL: @goo
; CHECK-DAG: mflr 0
-; CHECK-DAG: rldicl [[REG:[0-9]+]], 1, 0, 59
+; CHECK-DAG: clrldi [[REG:[0-9]+]], 1, 59
; CHECK-DAG: std 30, -16(1)
; CHECK-DAG: mr 30, 1
; CHECK-DAG: std 0, 16(1)
@@ -52,7 +52,7 @@ entry:
; CHECK-FP-LABEL: @goo
; CHECK-FP-DAG: mflr 0
-; CHECK-FP-DAG: rldicl [[REG:[0-9]+]], 1, 0, 59
+; CHECK-FP-DAG: clrldi [[REG:[0-9]+]], 1, 59
; CHECK-FP-DAG: std 31, -8(1)
; CHECK-FP-DAG: std 30, -16(1)
; CHECK-FP-DAG: mr 30, 1
@@ -78,7 +78,7 @@ entry:
; CHECK-32-LABEL: @goo
; CHECK-32-DAG: mflr 0
-; CHECK-32-DAG: rlwinm [[REG:[0-9]+]], 1, 0, 27, 31
+; CHECK-32-DAG: clrlwi [[REG:[0-9]+]], 1, 27
; CHECK-32-DAG: stw 30, -8(1)
; CHECK-32-DAG: mr 30, 1
; CHECK-32-DAG: stw 0, 4(1)
@@ -87,7 +87,7 @@ entry:
; CHECK-32-PIC-LABEL: @goo
; CHECK-32-PIC-DAG: mflr 0
-; CHECK-32-PIC-DAG: rlwinm [[REG:[0-9]+]], 1, 0, 27, 31
+; CHECK-32-PIC-DAG: clrlwi [[REG:[0-9]+]], 1, 27
; CHECK-32-PIC-DAG: stw 29, -12(1)
; CHECK-32-PIC-DAG: mr 29, 1
; CHECK-32-PIC-DAG: stw 0, 4(1)
@@ -98,13 +98,13 @@ entry:
define void @hoo(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [200000 x i32], align 32
- %a1 = getelementptr inbounds %struct.s* %a, i64 0, i32 0
- %0 = load i32* %a1, align 4
- %arrayidx = getelementptr inbounds [200000 x i32]* %x, i64 0, i64 0
+ %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
+ %0 = load i32, i32* %a1, align 4
+ %arrayidx = getelementptr inbounds [200000 x i32], [200000 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 32
- %b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
- %1 = load i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [200000 x i32]* %x, i64 0, i64 1
+ %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
+ %1 = load i32, i32* %b, align 4
+ %arrayidx2 = getelementptr inbounds [200000 x i32], [200000 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx)
ret void
@@ -113,7 +113,7 @@ entry:
; CHECK-LABEL: @hoo
; CHECK-DAG: lis [[REG1:[0-9]+]], -13
-; CHECK-DAG: rldicl [[REG3:[0-9]+]], 1, 0, 59
+; CHECK-DAG: clrldi [[REG3:[0-9]+]], 1, 59
; CHECK-DAG: mflr 0
; CHECK-DAG: ori [[REG2:[0-9]+]], [[REG1]], 51808
; CHECK-DAG: std 30, -16(1)
@@ -129,7 +129,7 @@ entry:
; CHECK-32-LABEL: @hoo
; CHECK-32-DAG: lis [[REG1:[0-9]+]], -13
-; CHECK-32-DAG: rlwinm [[REG3:[0-9]+]], 1, 0, 27, 31
+; CHECK-32-DAG: clrlwi [[REG3:[0-9]+]], 1, 27
; CHECK-32-DAG: mflr 0
; CHECK-32-DAG: ori [[REG2:[0-9]+]], [[REG1]], 51904
; CHECK-32-DAG: stw 30, -8(1)
@@ -143,7 +143,7 @@ entry:
; CHECK-32-PIC-LABEL: @hoo
; CHECK-32-PIC-DAG: lis [[REG1:[0-9]+]], -13
-; CHECK-32-PIC-DAG: rlwinm [[REG3:[0-9]+]], 1, 0, 27, 31
+; CHECK-32-PIC-DAG: clrlwi [[REG3:[0-9]+]], 1, 27
; CHECK-32-PIC-DAG: mflr 0
; CHECK-32-PIC-DAG: ori [[REG2:[0-9]+]], [[REG1]], 51904
; CHECK-32-PIC-DAG: stw 29, -12(1)
@@ -159,13 +159,13 @@ entry:
define void @loo(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 32
- %a1 = getelementptr inbounds %struct.s* %a, i64 0, i32 0
- %0 = load i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32]* %x, i64 0, i64 0
+ %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
+ %0 = load i32, i32* %a1, align 4
+ %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 32
- %b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
- %1 = load i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32]* %x, i64 0, i64 1
+ %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
+ %1 = load i32, i32* %b, align 4
+ %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx)
call void asm sideeffect "", "~{f30}"() nounwind
@@ -175,7 +175,7 @@ entry:
; CHECK-LABEL: @loo
; CHECK-DAG: mflr 0
-; CHECK-DAG: rldicl [[REG:[0-9]+]], 1, 0, 59
+; CHECK-DAG: clrldi [[REG:[0-9]+]], 1, 59
; CHECK-DAG: std 30, -32(1)
; CHECK-DAG: mr 30, 1
; CHECK-DAG: std 0, 16(1)
@@ -191,7 +191,7 @@ entry:
; CHECK-FP-LABEL: @loo
; CHECK-FP-DAG: mflr 0
-; CHECK-FP-DAG: rldicl [[REG:[0-9]+]], 1, 0, 59
+; CHECK-FP-DAG: clrldi [[REG:[0-9]+]], 1, 59
; CHECK-FP-DAG: std 31, -24(1)
; CHECK-FP-DAG: std 30, -32(1)
; CHECK-FP-DAG: mr 30, 1
diff --git a/test/CodeGen/PowerPC/std-unal-fi.ll b/test/CodeGen/PowerPC/std-unal-fi.ll
index 8b9606e1624f..74ea8cd373bd 100644
--- a/test/CodeGen/PowerPC/std-unal-fi.ll
+++ b/test/CodeGen/PowerPC/std-unal-fi.ll
@@ -9,7 +9,7 @@ BB:
br label %CF
CF: ; preds = %CF80, %CF, %BB
- %L5 = load i64* undef
+ %L5 = load i64, i64* undef
store i8 %0, i8* %A4
%Shuff7 = shufflevector <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> %Shuff, <16 x i32> <i32 28, i32 30, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 undef, i32 20, i32 22, i32 24, i32 26>
%PC10 = bitcast i8* %A4 to ppc_fp128*
@@ -19,13 +19,13 @@ CF77: ; preds = %CF81, %CF83, %CF77,
br i1 undef, label %CF77, label %CF82
CF82: ; preds = %CF82, %CF77
- %L19 = load i64* undef
+ %L19 = load i64, i64* undef
store <1 x ppc_fp128> zeroinitializer, <1 x ppc_fp128>* %A
store i8 -65, i8* %A4
br i1 undef, label %CF82, label %CF83
CF83: ; preds = %CF82
- %L34 = load i64* undef
+ %L34 = load i64, i64* undef
br i1 undef, label %CF77, label %CF81
CF81: ; preds = %CF83
@@ -54,7 +54,7 @@ define void @autogen_SD88042(i8*, i32*, i8) {
BB:
%A4 = alloca <2 x i1>
%A = alloca <16 x float>
- %L = load i8* %0
+ %L = load i8, i8* %0
%Sl = select i1 false, <16 x float>* %A, <16 x float>* %A
%PC = bitcast <2 x i1>* %A4 to i64*
%Sl27 = select i1 false, i8 undef, i8 %L
@@ -66,7 +66,7 @@ CF: ; preds = %CF78, %CF, %BB
CF77: ; preds = %CF80, %CF77, %CF
store <16 x float> zeroinitializer, <16 x float>* %Sl
- %L58 = load i32* %PC33
+ %L58 = load i32, i32* %PC33
store i8 0, i8* %0
br i1 undef, label %CF77, label %CF80
@@ -90,7 +90,7 @@ BB:
%A1 = alloca i1
%I8 = insertelement <1 x i32> <i32 -1>, i32 454855, i32 0
%Cmp = icmp ult <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, undef
- %L10 = load i64* %2
+ %L10 = load i64, i64* %2
%E11 = extractelement <4 x i1> %Cmp, i32 2
br label %CF72
@@ -103,7 +103,7 @@ CF72: ; preds = %CF74, %CF72, %BB
CF74: ; preds = %CF72
store i8 0, i8* %0
%PC = bitcast i1* %A1 to i64*
- %L31 = load i64* %PC
+ %L31 = load i64, i64* %PC
store i64 477323, i64* %PC
%Sl37 = select i1 false, i32* undef, i32* %1
%Cmp38 = icmp ugt i1 undef, undef
diff --git a/test/CodeGen/PowerPC/stdux-constuse.ll b/test/CodeGen/PowerPC/stdux-constuse.ll
index e62d438014ee..d4d17956868a 100644
--- a/test/CodeGen/PowerPC/stdux-constuse.ll
+++ b/test/CodeGen/PowerPC/stdux-constuse.ll
@@ -4,7 +4,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define i32 @test1(i64 %add, i64* %ptr) nounwind {
entry:
- %p1 = getelementptr i64* %ptr, i64 144115188075855
+ %p1 = getelementptr i64, i64* %ptr, i64 144115188075855
br label %for.cond2.preheader
for.cond2.preheader:
@@ -14,10 +14,10 @@ for.cond2.preheader:
for.body4:
%lsr.iv = phi i32 [ %lsr.iv.next, %for.body4 ], [ 16000, %for.cond2.preheader ]
%i0 = phi i64* [ %p1, %for.cond2.preheader ], [ %i6, %for.body4 ]
- %i6 = getelementptr i64* %i0, i64 400000
- %i7 = getelementptr i64* %i6, i64 300000
- %i8 = getelementptr i64* %i6, i64 200000
- %i9 = getelementptr i64* %i6, i64 100000
+ %i6 = getelementptr i64, i64* %i0, i64 400000
+ %i7 = getelementptr i64, i64* %i6, i64 300000
+ %i8 = getelementptr i64, i64* %i6, i64 200000
+ %i9 = getelementptr i64, i64* %i6, i64 100000
store i64 %add, i64* %i6, align 32
store i64 %add, i64* %i7, align 32
store i64 %add, i64* %i8, align 32
diff --git a/test/CodeGen/PowerPC/stfiwx.ll b/test/CodeGen/PowerPC/stfiwx.ll
index 588e44fb28d3..5f90dcad032d 100644
--- a/test/CodeGen/PowerPC/stfiwx.ll
+++ b/test/CodeGen/PowerPC/stfiwx.ll
@@ -22,8 +22,8 @@ define void @test1(float %a, i32* %b) nounwind {
define void @test2(float %a, i32* %b, i32 %i) nounwind {
; CHECK-LABEL: @test2
; CHECK-LS-LABEL: @test2
- %tmp.2 = getelementptr i32* %b, i32 1 ; <i32*> [#uses=1]
- %tmp.5 = getelementptr i32* %b, i32 %i ; <i32*> [#uses=1]
+ %tmp.2 = getelementptr i32, i32* %b, i32 1 ; <i32*> [#uses=1]
+ %tmp.5 = getelementptr i32, i32* %b, i32 %i ; <i32*> [#uses=1]
%tmp.7 = fptosi float %a to i32 ; <i32> [#uses=3]
store i32 %tmp.7, i32* %tmp.5
store i32 %tmp.7, i32* %tmp.2
diff --git a/test/CodeGen/PowerPC/store-load-fwd.ll b/test/CodeGen/PowerPC/store-load-fwd.ll
index 25663c1ac68e..62dd79ec18a4 100644
--- a/test/CodeGen/PowerPC/store-load-fwd.ll
+++ b/test/CodeGen/PowerPC/store-load-fwd.ll
@@ -2,7 +2,7 @@
define i32 @test(i32* %P) {
store i32 1, i32* %P
- %V = load i32* %P ; <i32> [#uses=1]
+ %V = load i32, i32* %P ; <i32> [#uses=1]
ret i32 %V
}
diff --git a/test/CodeGen/PowerPC/store-update.ll b/test/CodeGen/PowerPC/store-update.ll
index 7b9e8f720a17..65f052869702 100644
--- a/test/CodeGen/PowerPC/store-update.ll
+++ b/test/CodeGen/PowerPC/store-update.ll
@@ -5,7 +5,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define i8* @test_stbu(i8* %base, i8 zeroext %val) nounwind {
entry:
- %arrayidx = getelementptr inbounds i8* %base, i64 16
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 16
store i8 %val, i8* %arrayidx, align 1
ret i8* %arrayidx
}
@@ -16,7 +16,7 @@ entry:
define i8* @test_stbux(i8* %base, i8 zeroext %val, i64 %offset) nounwind {
entry:
- %arrayidx = getelementptr inbounds i8* %base, i64 %offset
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %offset
store i8 %val, i8* %arrayidx, align 1
ret i8* %arrayidx
}
@@ -27,7 +27,7 @@ entry:
define i16* @test_sthu(i16* %base, i16 zeroext %val) nounwind {
entry:
- %arrayidx = getelementptr inbounds i16* %base, i64 16
+ %arrayidx = getelementptr inbounds i16, i16* %base, i64 16
store i16 %val, i16* %arrayidx, align 2
ret i16* %arrayidx
}
@@ -38,7 +38,7 @@ entry:
define i16* @test_sthux(i16* %base, i16 zeroext %val, i64 %offset) nounwind {
entry:
- %arrayidx = getelementptr inbounds i16* %base, i64 %offset
+ %arrayidx = getelementptr inbounds i16, i16* %base, i64 %offset
store i16 %val, i16* %arrayidx, align 2
ret i16* %arrayidx
}
@@ -50,7 +50,7 @@ entry:
define i32* @test_stwu(i32* %base, i32 zeroext %val) nounwind {
entry:
- %arrayidx = getelementptr inbounds i32* %base, i64 16
+ %arrayidx = getelementptr inbounds i32, i32* %base, i64 16
store i32 %val, i32* %arrayidx, align 4
ret i32* %arrayidx
}
@@ -61,7 +61,7 @@ entry:
define i32* @test_stwux(i32* %base, i32 zeroext %val, i64 %offset) nounwind {
entry:
- %arrayidx = getelementptr inbounds i32* %base, i64 %offset
+ %arrayidx = getelementptr inbounds i32, i32* %base, i64 %offset
store i32 %val, i32* %arrayidx, align 4
ret i32* %arrayidx
}
@@ -74,7 +74,7 @@ entry:
define i8* @test_stbu8(i8* %base, i64 %val) nounwind {
entry:
%conv = trunc i64 %val to i8
- %arrayidx = getelementptr inbounds i8* %base, i64 16
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 16
store i8 %conv, i8* %arrayidx, align 1
ret i8* %arrayidx
}
@@ -86,7 +86,7 @@ entry:
define i8* @test_stbux8(i8* %base, i64 %val, i64 %offset) nounwind {
entry:
%conv = trunc i64 %val to i8
- %arrayidx = getelementptr inbounds i8* %base, i64 %offset
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %offset
store i8 %conv, i8* %arrayidx, align 1
ret i8* %arrayidx
}
@@ -98,7 +98,7 @@ entry:
define i16* @test_sthu8(i16* %base, i64 %val) nounwind {
entry:
%conv = trunc i64 %val to i16
- %arrayidx = getelementptr inbounds i16* %base, i64 16
+ %arrayidx = getelementptr inbounds i16, i16* %base, i64 16
store i16 %conv, i16* %arrayidx, align 2
ret i16* %arrayidx
}
@@ -110,7 +110,7 @@ entry:
define i16* @test_sthux8(i16* %base, i64 %val, i64 %offset) nounwind {
entry:
%conv = trunc i64 %val to i16
- %arrayidx = getelementptr inbounds i16* %base, i64 %offset
+ %arrayidx = getelementptr inbounds i16, i16* %base, i64 %offset
store i16 %conv, i16* %arrayidx, align 2
ret i16* %arrayidx
}
@@ -123,7 +123,7 @@ entry:
define i32* @test_stwu8(i32* %base, i64 %val) nounwind {
entry:
%conv = trunc i64 %val to i32
- %arrayidx = getelementptr inbounds i32* %base, i64 16
+ %arrayidx = getelementptr inbounds i32, i32* %base, i64 16
store i32 %conv, i32* %arrayidx, align 4
ret i32* %arrayidx
}
@@ -135,7 +135,7 @@ entry:
define i32* @test_stwux8(i32* %base, i64 %val, i64 %offset) nounwind {
entry:
%conv = trunc i64 %val to i32
- %arrayidx = getelementptr inbounds i32* %base, i64 %offset
+ %arrayidx = getelementptr inbounds i32, i32* %base, i64 %offset
store i32 %conv, i32* %arrayidx, align 4
ret i32* %arrayidx
}
@@ -147,7 +147,7 @@ entry:
define i64* @test_stdu(i64* %base, i64 %val) nounwind {
entry:
- %arrayidx = getelementptr inbounds i64* %base, i64 16
+ %arrayidx = getelementptr inbounds i64, i64* %base, i64 16
store i64 %val, i64* %arrayidx, align 8
ret i64* %arrayidx
}
@@ -158,7 +158,7 @@ entry:
define i64* @test_stdux(i64* %base, i64 %val, i64 %offset) nounwind {
entry:
- %arrayidx = getelementptr inbounds i64* %base, i64 %offset
+ %arrayidx = getelementptr inbounds i64, i64* %base, i64 %offset
store i64 %val, i64* %arrayidx, align 8
ret i64* %arrayidx
}
diff --git a/test/CodeGen/PowerPC/structsinmem.ll b/test/CodeGen/PowerPC/structsinmem.ll
index b5552af0eb51..bfada4c63714 100644
--- a/test/CodeGen/PowerPC/structsinmem.ll
+++ b/test/CodeGen/PowerPC/structsinmem.ll
@@ -43,7 +43,7 @@ entry:
%p6 = alloca %struct.s6, align 4
%p7 = alloca %struct.s7, align 4
%0 = bitcast %struct.s1* %p1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1, %struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i32 1, i1 false)
%1 = bitcast %struct.s2* %p2 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i32 2, i1 false)
%2 = bitcast %struct.s3* %p3 to i8*
@@ -88,28 +88,28 @@ entry:
store i32 %z6, i32* %z6.addr, align 4
store i32 %z7, i32* %z7.addr, align 4
store i32 %z8, i32* %z8.addr, align 4
- %a = getelementptr inbounds %struct.s1* %v1, i32 0, i32 0
- %0 = load i8* %a, align 1
+ %a = getelementptr inbounds %struct.s1, %struct.s1* %v1, i32 0, i32 0
+ %0 = load i8, i8* %a, align 1
%conv = zext i8 %0 to i32
- %a1 = getelementptr inbounds %struct.s2* %v2, i32 0, i32 0
- %1 = load i16* %a1, align 2
+ %a1 = getelementptr inbounds %struct.s2, %struct.s2* %v2, i32 0, i32 0
+ %1 = load i16, i16* %a1, align 2
%conv2 = sext i16 %1 to i32
%add = add nsw i32 %conv, %conv2
- %a3 = getelementptr inbounds %struct.s3* %v3, i32 0, i32 0
- %2 = load i16* %a3, align 2
+ %a3 = getelementptr inbounds %struct.s3, %struct.s3* %v3, i32 0, i32 0
+ %2 = load i16, i16* %a3, align 2
%conv4 = sext i16 %2 to i32
%add5 = add nsw i32 %add, %conv4
- %a6 = getelementptr inbounds %struct.s4* %v4, i32 0, i32 0
- %3 = load i32* %a6, align 4
+ %a6 = getelementptr inbounds %struct.s4, %struct.s4* %v4, i32 0, i32 0
+ %3 = load i32, i32* %a6, align 4
%add7 = add nsw i32 %add5, %3
- %a8 = getelementptr inbounds %struct.s5* %v5, i32 0, i32 0
- %4 = load i32* %a8, align 4
+ %a8 = getelementptr inbounds %struct.s5, %struct.s5* %v5, i32 0, i32 0
+ %4 = load i32, i32* %a8, align 4
%add9 = add nsw i32 %add7, %4
- %a10 = getelementptr inbounds %struct.s6* %v6, i32 0, i32 0
- %5 = load i32* %a10, align 4
+ %a10 = getelementptr inbounds %struct.s6, %struct.s6* %v6, i32 0, i32 0
+ %5 = load i32, i32* %a10, align 4
%add11 = add nsw i32 %add9, %5
- %a12 = getelementptr inbounds %struct.s7* %v7, i32 0, i32 0
- %6 = load i32* %a12, align 4
+ %a12 = getelementptr inbounds %struct.s7, %struct.s7* %v7, i32 0, i32 0
+ %6 = load i32, i32* %a12, align 4
%add13 = add nsw i32 %add11, %6
ret i32 %add13
@@ -132,7 +132,7 @@ entry:
%p6 = alloca %struct.t6, align 1
%p7 = alloca %struct.t7, align 1
%0 = bitcast %struct.t1* %p1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1, %struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i32 1, i1 false)
%1 = bitcast %struct.t2* %p2 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i32 1, i1 false)
%2 = bitcast %struct.t3* %p3 to i8*
@@ -180,28 +180,28 @@ entry:
store i32 %z6, i32* %z6.addr, align 4
store i32 %z7, i32* %z7.addr, align 4
store i32 %z8, i32* %z8.addr, align 4
- %a = getelementptr inbounds %struct.t1* %v1, i32 0, i32 0
- %0 = load i8* %a, align 1
+ %a = getelementptr inbounds %struct.t1, %struct.t1* %v1, i32 0, i32 0
+ %0 = load i8, i8* %a, align 1
%conv = zext i8 %0 to i32
- %a1 = getelementptr inbounds %struct.t2* %v2, i32 0, i32 0
- %1 = load i16* %a1, align 1
+ %a1 = getelementptr inbounds %struct.t2, %struct.t2* %v2, i32 0, i32 0
+ %1 = load i16, i16* %a1, align 1
%conv2 = sext i16 %1 to i32
%add = add nsw i32 %conv, %conv2
- %a3 = getelementptr inbounds %struct.t3* %v3, i32 0, i32 0
- %2 = load i16* %a3, align 1
+ %a3 = getelementptr inbounds %struct.t3, %struct.t3* %v3, i32 0, i32 0
+ %2 = load i16, i16* %a3, align 1
%conv4 = sext i16 %2 to i32
%add5 = add nsw i32 %add, %conv4
- %a6 = getelementptr inbounds %struct.t4* %v4, i32 0, i32 0
- %3 = load i32* %a6, align 1
+ %a6 = getelementptr inbounds %struct.t4, %struct.t4* %v4, i32 0, i32 0
+ %3 = load i32, i32* %a6, align 1
%add7 = add nsw i32 %add5, %3
- %a8 = getelementptr inbounds %struct.t5* %v5, i32 0, i32 0
- %4 = load i32* %a8, align 1
+ %a8 = getelementptr inbounds %struct.t5, %struct.t5* %v5, i32 0, i32 0
+ %4 = load i32, i32* %a8, align 1
%add9 = add nsw i32 %add7, %4
- %a10 = getelementptr inbounds %struct.t6* %v6, i32 0, i32 0
- %5 = load i32* %a10, align 1
+ %a10 = getelementptr inbounds %struct.t6, %struct.t6* %v6, i32 0, i32 0
+ %5 = load i32, i32* %a10, align 1
%add11 = add nsw i32 %add9, %5
- %a12 = getelementptr inbounds %struct.t7* %v7, i32 0, i32 0
- %6 = load i32* %a12, align 1
+ %a12 = getelementptr inbounds %struct.t7, %struct.t7* %v7, i32 0, i32 0
+ %6 = load i32, i32* %a12, align 1
%add13 = add nsw i32 %add11, %6
ret i32 %add13
diff --git a/test/CodeGen/PowerPC/structsinregs.ll b/test/CodeGen/PowerPC/structsinregs.ll
index cfe32e9560ae..0fb9895a6227 100644
--- a/test/CodeGen/PowerPC/structsinregs.ll
+++ b/test/CodeGen/PowerPC/structsinregs.ll
@@ -43,7 +43,7 @@ entry:
%p6 = alloca %struct.s6, align 4
%p7 = alloca %struct.s7, align 4
%0 = bitcast %struct.s1* %p1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1, %struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i32 1, i1 false)
%1 = bitcast %struct.s2* %p2 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i32 2, i1 false)
%2 = bitcast %struct.s3* %p3 to i8*
@@ -72,28 +72,28 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32,
define internal i32 @callee1(%struct.s1* byval %v1, %struct.s2* byval %v2, %struct.s3* byval %v3, %struct.s4* byval %v4, %struct.s5* byval %v5, %struct.s6* byval %v6, %struct.s7* byval %v7) nounwind {
entry:
- %a = getelementptr inbounds %struct.s1* %v1, i32 0, i32 0
- %0 = load i8* %a, align 1
+ %a = getelementptr inbounds %struct.s1, %struct.s1* %v1, i32 0, i32 0
+ %0 = load i8, i8* %a, align 1
%conv = zext i8 %0 to i32
- %a1 = getelementptr inbounds %struct.s2* %v2, i32 0, i32 0
- %1 = load i16* %a1, align 2
+ %a1 = getelementptr inbounds %struct.s2, %struct.s2* %v2, i32 0, i32 0
+ %1 = load i16, i16* %a1, align 2
%conv2 = sext i16 %1 to i32
%add = add nsw i32 %conv, %conv2
- %a3 = getelementptr inbounds %struct.s3* %v3, i32 0, i32 0
- %2 = load i16* %a3, align 2
+ %a3 = getelementptr inbounds %struct.s3, %struct.s3* %v3, i32 0, i32 0
+ %2 = load i16, i16* %a3, align 2
%conv4 = sext i16 %2 to i32
%add5 = add nsw i32 %add, %conv4
- %a6 = getelementptr inbounds %struct.s4* %v4, i32 0, i32 0
- %3 = load i32* %a6, align 4
+ %a6 = getelementptr inbounds %struct.s4, %struct.s4* %v4, i32 0, i32 0
+ %3 = load i32, i32* %a6, align 4
%add7 = add nsw i32 %add5, %3
- %a8 = getelementptr inbounds %struct.s5* %v5, i32 0, i32 0
- %4 = load i32* %a8, align 4
+ %a8 = getelementptr inbounds %struct.s5, %struct.s5* %v5, i32 0, i32 0
+ %4 = load i32, i32* %a8, align 4
%add9 = add nsw i32 %add7, %4
- %a10 = getelementptr inbounds %struct.s6* %v6, i32 0, i32 0
- %5 = load i32* %a10, align 4
+ %a10 = getelementptr inbounds %struct.s6, %struct.s6* %v6, i32 0, i32 0
+ %5 = load i32, i32* %a10, align 4
%add11 = add nsw i32 %add9, %5
- %a12 = getelementptr inbounds %struct.s7* %v7, i32 0, i32 0
- %6 = load i32* %a12, align 4
+ %a12 = getelementptr inbounds %struct.s7, %struct.s7* %v7, i32 0, i32 0
+ %6 = load i32, i32* %a12, align 4
%add13 = add nsw i32 %add11, %6
ret i32 %add13
@@ -123,7 +123,7 @@ entry:
%p6 = alloca %struct.t6, align 1
%p7 = alloca %struct.t7, align 1
%0 = bitcast %struct.t1* %p1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1, %struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i32 1, i1 false)
%1 = bitcast %struct.t2* %p2 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i32 1, i1 false)
%2 = bitcast %struct.t3* %p3 to i8*
@@ -159,28 +159,28 @@ entry:
define internal i32 @callee2(%struct.t1* byval %v1, %struct.t2* byval %v2, %struct.t3* byval %v3, %struct.t4* byval %v4, %struct.t5* byval %v5, %struct.t6* byval %v6, %struct.t7* byval %v7) nounwind {
entry:
- %a = getelementptr inbounds %struct.t1* %v1, i32 0, i32 0
- %0 = load i8* %a, align 1
+ %a = getelementptr inbounds %struct.t1, %struct.t1* %v1, i32 0, i32 0
+ %0 = load i8, i8* %a, align 1
%conv = zext i8 %0 to i32
- %a1 = getelementptr inbounds %struct.t2* %v2, i32 0, i32 0
- %1 = load i16* %a1, align 1
+ %a1 = getelementptr inbounds %struct.t2, %struct.t2* %v2, i32 0, i32 0
+ %1 = load i16, i16* %a1, align 1
%conv2 = sext i16 %1 to i32
%add = add nsw i32 %conv, %conv2
- %a3 = getelementptr inbounds %struct.t3* %v3, i32 0, i32 0
- %2 = load i16* %a3, align 1
+ %a3 = getelementptr inbounds %struct.t3, %struct.t3* %v3, i32 0, i32 0
+ %2 = load i16, i16* %a3, align 1
%conv4 = sext i16 %2 to i32
%add5 = add nsw i32 %add, %conv4
- %a6 = getelementptr inbounds %struct.t4* %v4, i32 0, i32 0
- %3 = load i32* %a6, align 1
+ %a6 = getelementptr inbounds %struct.t4, %struct.t4* %v4, i32 0, i32 0
+ %3 = load i32, i32* %a6, align 1
%add7 = add nsw i32 %add5, %3
- %a8 = getelementptr inbounds %struct.t5* %v5, i32 0, i32 0
- %4 = load i32* %a8, align 1
+ %a8 = getelementptr inbounds %struct.t5, %struct.t5* %v5, i32 0, i32 0
+ %4 = load i32, i32* %a8, align 1
%add9 = add nsw i32 %add7, %4
- %a10 = getelementptr inbounds %struct.t6* %v6, i32 0, i32 0
- %5 = load i32* %a10, align 1
+ %a10 = getelementptr inbounds %struct.t6, %struct.t6* %v6, i32 0, i32 0
+ %5 = load i32, i32* %a10, align 1
%add11 = add nsw i32 %add9, %5
- %a12 = getelementptr inbounds %struct.t7* %v7, i32 0, i32 0
- %6 = load i32* %a12, align 1
+ %a12 = getelementptr inbounds %struct.t7, %struct.t7* %v7, i32 0, i32 0
+ %6 = load i32, i32* %a12, align 1
%add13 = add nsw i32 %add11, %6
ret i32 %add13
diff --git a/test/CodeGen/PowerPC/stwu-gta.ll b/test/CodeGen/PowerPC/stwu-gta.ll
index 980c1d502853..2b420156f739 100644
--- a/test/CodeGen/PowerPC/stwu-gta.ll
+++ b/test/CodeGen/PowerPC/stwu-gta.ll
@@ -8,8 +8,8 @@ target triple = "powerpc-unknown-linux"
define void @_GLOBAL__I_a() nounwind section ".text.startup" {
entry:
- store i32 5, i32* getelementptr inbounds (%class.Two.0.5* @foo, i32 0, i32 0), align 4
- store i32 6, i32* getelementptr inbounds (%class.Two.0.5* @foo, i32 0, i32 1), align 4
+ store i32 5, i32* getelementptr inbounds (%class.Two.0.5, %class.Two.0.5* @foo, i32 0, i32 0), align 4
+ store i32 6, i32* getelementptr inbounds (%class.Two.0.5, %class.Two.0.5* @foo, i32 0, i32 1), align 4
ret void
}
diff --git a/test/CodeGen/PowerPC/stwu8.ll b/test/CodeGen/PowerPC/stwu8.ll
index b220af2df4a4..bb2748432d79 100644
--- a/test/CodeGen/PowerPC/stwu8.ll
+++ b/test/CodeGen/PowerPC/stwu8.ll
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define void @test1(%class.spell_checker.21.103.513.538* %this) unnamed_addr align 2 {
entry:
- %_M_header.i.i.i.i.i.i = getelementptr inbounds %class.spell_checker.21.103.513.538* %this, i64 0, i32 0, i32 0, i32 0, i32 1
+ %_M_header.i.i.i.i.i.i = getelementptr inbounds %class.spell_checker.21.103.513.538, %class.spell_checker.21.103.513.538* %this, i64 0, i32 0, i32 0, i32 0, i32 1
%0 = bitcast %"struct.std::_Rb_tree_node_base.17.99.509.534"* %_M_header.i.i.i.i.i.i to i8*
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 40, i32 4, i1 false) nounwind
store %"struct.std::_Rb_tree_node_base.17.99.509.534"* %_M_header.i.i.i.i.i.i, %"struct.std::_Rb_tree_node_base.17.99.509.534"** undef, align 8
diff --git a/test/CodeGen/PowerPC/stwux.ll b/test/CodeGen/PowerPC/stwux.ll
index 737e9d9f0ecb..2ed630d8002d 100644
--- a/test/CodeGen/PowerPC/stwux.ll
+++ b/test/CodeGen/PowerPC/stwux.ll
@@ -27,7 +27,7 @@ while.end: ; preds = %if.end12
if.end15: ; preds = %while.end
%idxprom.i.i230 = sext i32 %i.1 to i64
- %arrayidx18 = getelementptr inbounds [100 x i32]* @multvec_i, i64 0, i64 %idxprom.i.i230
+ %arrayidx18 = getelementptr inbounds [100 x i32], [100 x i32]* @multvec_i, i64 0, i64 %idxprom.i.i230
store i32 0, i32* %arrayidx18, align 4
br i1 undef, label %while.body21, label %while.end90
diff --git a/test/CodeGen/PowerPC/subreg-postra-2.ll b/test/CodeGen/PowerPC/subreg-postra-2.ll
index 2faaa6129294..051536443413 100644
--- a/test/CodeGen/PowerPC/subreg-postra-2.ll
+++ b/test/CodeGen/PowerPC/subreg-postra-2.ll
@@ -134,8 +134,8 @@ while.body392.lr.ph: ; preds = %do.body378
br label %while.body392
while.body392: ; preds = %wait_on_buffer.exit1319, %while.body392.lr.ph
- %0 = load i8** undef, align 8
- %add.ptr399 = getelementptr inbounds i8* %0, i64 -72
+ %0 = load i8*, i8** undef, align 8
+ %add.ptr399 = getelementptr inbounds i8, i8* %0, i64 -72
%b_state.i.i1314 = bitcast i8* %add.ptr399 to i64*
%tobool.i1316 = icmp eq i64 undef, 0
br i1 %tobool.i1316, label %wait_on_buffer.exit1319, label %if.then.i1317
@@ -144,7 +144,7 @@ if.then.i1317: ; preds = %while.body392
unreachable
wait_on_buffer.exit1319: ; preds = %while.body392
- %1 = load volatile i64* %b_state.i.i1314, align 8
+ %1 = load volatile i64, i64* %b_state.i.i1314, align 8
%conv.i.i1322 = and i64 %1, 1
%lnot404 = icmp eq i64 %conv.i.i1322, 0
%.err.4 = select i1 %lnot404, i32 -5, i32 undef
@@ -160,7 +160,7 @@ while.end418: ; preds = %wait_on_buffer.exit
; CHECK-LABEL: @jbd2_journal_commit_transaction
; CHECK: andi.
-; CHECK: cror [[REG:[0-9]+]], 1, 1
+; CHECK: crmove [[REG:[0-9]+]], 1
; CHECK: stdcx.
; CHECK: isel {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}, [[REG]]
diff --git a/test/CodeGen/PowerPC/subreg-postra.ll b/test/CodeGen/PowerPC/subreg-postra.ll
index b10fa668cb8d..ba1b967cf204 100644
--- a/test/CodeGen/PowerPC/subreg-postra.ll
+++ b/test/CodeGen/PowerPC/subreg-postra.ll
@@ -120,8 +120,8 @@ while.body392.lr.ph: ; preds = %do.body378
br label %while.body392
while.body392: ; preds = %wait_on_buffer.exit1319, %while.body392.lr.ph
- %0 = load i8** undef, align 8
- %add.ptr399 = getelementptr inbounds i8* %0, i64 -72
+ %0 = load i8*, i8** undef, align 8
+ %add.ptr399 = getelementptr inbounds i8, i8* %0, i64 -72
%b_state.i.i1314 = bitcast i8* %add.ptr399 to i64*
%tobool.i1316 = icmp eq i64 undef, 0
br i1 %tobool.i1316, label %wait_on_buffer.exit1319, label %if.then.i1317
@@ -130,20 +130,20 @@ if.then.i1317: ; preds = %while.body392
unreachable
wait_on_buffer.exit1319: ; preds = %while.body392
- %1 = load volatile i64* %b_state.i.i1314, align 8
+ %1 = load volatile i64, i64* %b_state.i.i1314, align 8
%conv.i.i1322 = and i64 %1, 1
%lnot404 = icmp eq i64 %conv.i.i1322, 0
%.err.4 = select i1 %lnot404, i32 -5, i32 undef
%2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(i64* %b_state.i.i1314, i64 262144, i64* %b_state.i.i1314, i64* %b_state.i.i1314) #1
- %prev.i.i.i1325 = getelementptr inbounds i8* %0, i64 8
- %3 = load i32** null, align 8
+ %prev.i.i.i1325 = getelementptr inbounds i8, i8* %0, i64 8
+ %3 = load i32*, i32** null, align 8
store i32* %3, i32** undef, align 8
call void @__brelse(i32* undef) #1
br i1 undef, label %while.end418, label %while.body392
; CHECK-LABEL: @jbd2_journal_commit_transaction
; CHECK: andi.
-; CHECK: cror [[REG:[0-9]+]], 1, 1
+; CHECK: crmove [[REG:[0-9]+]], 1
; CHECK: stdcx.
; CHECK: isel {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}, [[REG]]
diff --git a/test/CodeGen/PowerPC/subsumes-pred-regs.ll b/test/CodeGen/PowerPC/subsumes-pred-regs.ll
index c510e36cb413..5389c1318445 100644
--- a/test/CodeGen/PowerPC/subsumes-pred-regs.ll
+++ b/test/CodeGen/PowerPC/subsumes-pred-regs.ll
@@ -20,7 +20,7 @@ if.then: ; preds = %lor.end
br i1 undef, label %return, label %if.end.i24
if.end.i24: ; preds = %if.then
- %0 = load i32* undef, align 4
+ %0 = load i32, i32* undef, align 4
%lnot.i.i16.i23 = icmp eq i32 %0, 0
br i1 %lnot.i.i16.i23, label %if.end7.i37, label %test.exit27.i34
diff --git a/test/CodeGen/PowerPC/swaps-le-1.ll b/test/CodeGen/PowerPC/swaps-le-1.ll
new file mode 100644
index 000000000000..0c4163169034
--- /dev/null
+++ b/test/CodeGen/PowerPC/swaps-le-1.ll
@@ -0,0 +1,147 @@
+; RUN: llc -O3 -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -O3 -mcpu=pwr8 -disable-ppc-vsx-swap-removal -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck -check-prefix=NOOPTSWAP %s
+
+; This test was generated from the following source:
+;
+; #define N 4096
+; int ca[N] __attribute__((aligned(16)));
+; int cb[N] __attribute__((aligned(16)));
+; int cc[N] __attribute__((aligned(16)));
+; int cd[N] __attribute__((aligned(16)));
+;
+; void foo ()
+; {
+; int i;
+; for (i = 0; i < N; i++) {
+; ca[i] = (cb[i] + cc[i]) * cd[i];
+; }
+; }
+
+@cb = common global [4096 x i32] zeroinitializer, align 16
+@cc = common global [4096 x i32] zeroinitializer, align 16
+@cd = common global [4096 x i32] zeroinitializer, align 16
+@ca = common global [4096 x i32] zeroinitializer, align 16
+
+define void @foo() {
+entry:
+ br label %vector.body
+
+vector.body:
+ %index = phi i64 [ 0, %entry ], [ %index.next.3, %vector.body ]
+ %0 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index
+ %1 = bitcast i32* %0 to <4 x i32>*
+ %wide.load = load <4 x i32>, <4 x i32>* %1, align 16
+ %2 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index
+ %3 = bitcast i32* %2 to <4 x i32>*
+ %wide.load13 = load <4 x i32>, <4 x i32>* %3, align 16
+ %4 = add nsw <4 x i32> %wide.load13, %wide.load
+ %5 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index
+ %6 = bitcast i32* %5 to <4 x i32>*
+ %wide.load14 = load <4 x i32>, <4 x i32>* %6, align 16
+ %7 = mul nsw <4 x i32> %4, %wide.load14
+ %8 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index
+ %9 = bitcast i32* %8 to <4 x i32>*
+ store <4 x i32> %7, <4 x i32>* %9, align 16
+ %index.next = add nuw nsw i64 %index, 4
+ %10 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index.next
+ %11 = bitcast i32* %10 to <4 x i32>*
+ %wide.load.1 = load <4 x i32>, <4 x i32>* %11, align 16
+ %12 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index.next
+ %13 = bitcast i32* %12 to <4 x i32>*
+ %wide.load13.1 = load <4 x i32>, <4 x i32>* %13, align 16
+ %14 = add nsw <4 x i32> %wide.load13.1, %wide.load.1
+ %15 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index.next
+ %16 = bitcast i32* %15 to <4 x i32>*
+ %wide.load14.1 = load <4 x i32>, <4 x i32>* %16, align 16
+ %17 = mul nsw <4 x i32> %14, %wide.load14.1
+ %18 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index.next
+ %19 = bitcast i32* %18 to <4 x i32>*
+ store <4 x i32> %17, <4 x i32>* %19, align 16
+ %index.next.1 = add nuw nsw i64 %index.next, 4
+ %20 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index.next.1
+ %21 = bitcast i32* %20 to <4 x i32>*
+ %wide.load.2 = load <4 x i32>, <4 x i32>* %21, align 16
+ %22 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index.next.1
+ %23 = bitcast i32* %22 to <4 x i32>*
+ %wide.load13.2 = load <4 x i32>, <4 x i32>* %23, align 16
+ %24 = add nsw <4 x i32> %wide.load13.2, %wide.load.2
+ %25 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index.next.1
+ %26 = bitcast i32* %25 to <4 x i32>*
+ %wide.load14.2 = load <4 x i32>, <4 x i32>* %26, align 16
+ %27 = mul nsw <4 x i32> %24, %wide.load14.2
+ %28 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index.next.1
+ %29 = bitcast i32* %28 to <4 x i32>*
+ store <4 x i32> %27, <4 x i32>* %29, align 16
+ %index.next.2 = add nuw nsw i64 %index.next.1, 4
+ %30 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index.next.2
+ %31 = bitcast i32* %30 to <4 x i32>*
+ %wide.load.3 = load <4 x i32>, <4 x i32>* %31, align 16
+ %32 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index.next.2
+ %33 = bitcast i32* %32 to <4 x i32>*
+ %wide.load13.3 = load <4 x i32>, <4 x i32>* %33, align 16
+ %34 = add nsw <4 x i32> %wide.load13.3, %wide.load.3
+ %35 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index.next.2
+ %36 = bitcast i32* %35 to <4 x i32>*
+ %wide.load14.3 = load <4 x i32>, <4 x i32>* %36, align 16
+ %37 = mul nsw <4 x i32> %34, %wide.load14.3
+ %38 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index.next.2
+ %39 = bitcast i32* %38 to <4 x i32>*
+ store <4 x i32> %37, <4 x i32>* %39, align 16
+ %index.next.3 = add nuw nsw i64 %index.next.2, 4
+ %40 = icmp eq i64 %index.next.3, 4096
+ br i1 %40, label %for.end, label %vector.body
+
+for.end:
+ ret void
+}
+
+; CHECK-LABEL: @foo
+; CHECK-NOT: xxpermdi
+; CHECK-NOT: xxswapd
+
+; CHECK: lxvd2x
+; CHECK: lxvd2x
+; CHECK-DAG: lxvd2x
+; CHECK-DAG: vadduwm
+; CHECK: vmuluwm
+; CHECK: stxvd2x
+
+; CHECK: lxvd2x
+; CHECK: lxvd2x
+; CHECK-DAG: lxvd2x
+; CHECK-DAG: vadduwm
+; CHECK: vmuluwm
+; CHECK: stxvd2x
+
+; CHECK: lxvd2x
+; CHECK: lxvd2x
+; CHECK-DAG: lxvd2x
+; CHECK-DAG: vadduwm
+; CHECK: vmuluwm
+; CHECK: stxvd2x
+
+; CHECK: lxvd2x
+; CHECK: lxvd2x
+; CHECK-DAG: lxvd2x
+; CHECK-DAG: vadduwm
+; CHECK: vmuluwm
+; CHECK: stxvd2x
+
+
+; NOOPTSWAP-LABEL: @foo
+
+; NOOPTSWAP: lxvd2x
+; NOOPTSWAP-DAG: lxvd2x
+; NOOPTSWAP-DAG: lxvd2x
+; NOOPTSWAP-DAG: xxswapd
+; NOOPTSWAP-DAG: xxswapd
+; NOOPTSWAP-DAG: xxswapd
+; NOOPTSWAP-DAG: vadduwm
+; NOOPTSWAP: vmuluwm
+; NOOPTSWAP: xxswapd
+; NOOPTSWAP-DAG: xxswapd
+; NOOPTSWAP-DAG: xxswapd
+; NOOPTSWAP-DAG: stxvd2x
+; NOOPTSWAP-DAG: stxvd2x
+; NOOPTSWAP: stxvd2x
+
diff --git a/test/CodeGen/PowerPC/swaps-le-2.ll b/test/CodeGen/PowerPC/swaps-le-2.ll
new file mode 100644
index 000000000000..08096ed20ddb
--- /dev/null
+++ b/test/CodeGen/PowerPC/swaps-le-2.ll
@@ -0,0 +1,91 @@
+; RUN: llc -O3 -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+
+; Test swap removal when a vector splat must be adjusted to make it legal.
+;
+; Test generated from following C code:
+;
+; vector char vc = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+; vector char vcr;
+; vector short vs = {0, 1, 2, 3, 4, 5, 6, 7};
+; vector short vsr;
+; vector int vi = {0, 1, 2, 3};
+; vector int vir;
+;
+; void cfoo ()
+; {
+; vcr = (vector char){vc[5], vc[5], vc[5], vc[5], vc[5], vc[5], vc[5], vc[5],
+; vc[5], vc[5], vc[5], vc[5], vc[5], vc[5], vc[5], vc[5]};
+; }
+;
+; void sfoo ()
+; {
+; vsr = (vector short){vs[6], vs[6], vs[6], vs[6],
+; vs[6], vs[6], vs[6], vs[6]};
+; }
+;
+; void ifoo ()
+; {
+; vir = (vector int){vi[1], vi[1], vi[1], vi[1]};
+; }
+
+@vc = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@vs = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@vi = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@vcr = common global <16 x i8> zeroinitializer, align 16
+@vsr = common global <8 x i16> zeroinitializer, align 16
+@vir = common global <4 x i32> zeroinitializer, align 16
+
+; Function Attrs: nounwind
+define void @cfoo() {
+entry:
+ %0 = load <16 x i8>, <16 x i8>* @vc, align 16
+ %vecinit30 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ store <16 x i8> %vecinit30, <16 x i8>* @vcr, align 16
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @sfoo() {
+entry:
+ %0 = load <8 x i16>, <8 x i16>* @vs, align 16
+ %vecinit14 = shufflevector <8 x i16> %0, <8 x i16> undef, <8 x i32> <i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6>
+ store <8 x i16> %vecinit14, <8 x i16>* @vsr, align 16
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @ifoo() {
+entry:
+ %0 = load <4 x i32>, <4 x i32>* @vi, align 16
+ %vecinit6 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ store <4 x i32> %vecinit6, <4 x i32>* @vir, align 16
+ ret void
+}
+
+; Justification:
+; Byte splat of element 5 (BE) becomes element 15-5 = 10 (LE)
+; which becomes (10+8)%16 = 2 (LE swapped).
+;
+; Halfword splat of element 6 (BE) becomes element 7-6 = 1 (LE)
+; which becomes (1+4)%8 = 5 (LE swapped).
+;
+; Word splat of element 1 (BE) becomes element 3-1 = 2 (LE)
+; which becomes (2+2)%4 = 0 (LE swapped).
+
+; CHECK-NOT: xxpermdi
+; CHECK-NOT: xxswapd
+
+; CHECK-LABEL: @cfoo
+; CHECK: lxvd2x
+; CHECK: vspltb {{[0-9]+}}, {{[0-9]+}}, 2
+; CHECK: stxvd2x
+
+; CHECK-LABEL: @sfoo
+; CHECK: lxvd2x
+; CHECK: vsplth {{[0-9]+}}, {{[0-9]+}}, 5
+; CHECK: stxvd2x
+
+; CHECK-LABEL: @ifoo
+; CHECK: lxvd2x
+; CHECK: vspltw {{[0-9]+}}, {{[0-9]+}}, 0
+; CHECK: stxvd2x
diff --git a/test/CodeGen/PowerPC/tls-cse.ll b/test/CodeGen/PowerPC/tls-cse.ll
new file mode 100644
index 000000000000..7375e9ccbae3
--- /dev/null
+++ b/test/CodeGen/PowerPC/tls-cse.ll
@@ -0,0 +1,52 @@
+; RUN: llc -march=ppc64 -mcpu=pwr7 -O2 -relocation-model=pic < %s | FileCheck %s
+; RUN: llc -march=ppc64 -mcpu=pwr7 -O2 -relocation-model=pic < %s | grep "__tls_get_addr" | count 1
+
+; This test was derived from LLVM's own
+; PrettyStackTraceEntry::~PrettyStackTraceEntry(). It demonstrates an
+; opportunity for CSE of calls to __tls_get_addr().
+
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+%"class.llvm::PrettyStackTraceEntry" = type { i32 (...)**, %"class.llvm::PrettyStackTraceEntry"* }
+
+@_ZTVN4llvm21PrettyStackTraceEntryE = unnamed_addr constant [5 x i8*] [i8* null, i8* null, i8* bitcast (void (%"class.llvm::PrettyStackTraceEntry"*)* @_ZN4llvm21PrettyStackTraceEntryD2Ev to i8*), i8* bitcast (void (%"class.llvm::PrettyStackTraceEntry"*)* @_ZN4llvm21PrettyStackTraceEntryD0Ev to i8*), i8* bitcast (void ()* @__cxa_pure_virtual to i8*)], align 8
+@_ZL20PrettyStackTraceHead = internal thread_local unnamed_addr global %"class.llvm::PrettyStackTraceEntry"* null, align 8
+@.str = private unnamed_addr constant [87 x i8] c"PrettyStackTraceHead == this && \22Pretty stack trace entry destruction is out of order\22\00", align 1
+@.str1 = private unnamed_addr constant [64 x i8] c"/home/wschmidt/llvm/llvm-test2/lib/Support/PrettyStackTrace.cpp\00", align 1
+@__PRETTY_FUNCTION__._ZN4llvm21PrettyStackTraceEntryD2Ev = private unnamed_addr constant [62 x i8] c"virtual llvm::PrettyStackTraceEntry::~PrettyStackTraceEntry()\00", align 1
+
+declare void @_ZN4llvm21PrettyStackTraceEntryD2Ev(%"class.llvm::PrettyStackTraceEntry"* %this) unnamed_addr
+declare void @__cxa_pure_virtual()
+declare void @__assert_fail(i8*, i8*, i32 zeroext, i8*)
+declare void @_ZdlPv(i8*)
+
+define void @_ZN4llvm21PrettyStackTraceEntryD0Ev(%"class.llvm::PrettyStackTraceEntry"* %this) unnamed_addr align 2 {
+entry:
+ %0 = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry", %"class.llvm::PrettyStackTraceEntry"* %this, i64 0, i32 0
+ store i32 (...)** bitcast (i8** getelementptr inbounds ([5 x i8*], [5 x i8*]* @_ZTVN4llvm21PrettyStackTraceEntryE, i64 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 8
+ %1 = load %"class.llvm::PrettyStackTraceEntry"*, %"class.llvm::PrettyStackTraceEntry"** @_ZL20PrettyStackTraceHead, align 8
+ %cmp.i = icmp eq %"class.llvm::PrettyStackTraceEntry"* %1, %this
+ br i1 %cmp.i, label %_ZN4llvm21PrettyStackTraceEntryD2Ev.exit, label %cond.false.i
+
+cond.false.i: ; preds = %entry
+ tail call void @__assert_fail(i8* getelementptr inbounds ([87 x i8], [87 x i8]* @.str, i64 0, i64 0), i8* getelementptr inbounds ([64 x i8], [64 x i8]* @.str1, i64 0, i64 0), i32 zeroext 119, i8* getelementptr inbounds ([62 x i8], [62 x i8]* @__PRETTY_FUNCTION__._ZN4llvm21PrettyStackTraceEntryD2Ev, i64 0, i64 0))
+ unreachable
+
+_ZN4llvm21PrettyStackTraceEntryD2Ev.exit: ; preds = %entry
+ %NextEntry.i.i = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry", %"class.llvm::PrettyStackTraceEntry"* %this, i64 0, i32 1
+ %2 = bitcast %"class.llvm::PrettyStackTraceEntry"** %NextEntry.i.i to i64*
+ %3 = load i64, i64* %2, align 8
+ store i64 %3, i64* bitcast (%"class.llvm::PrettyStackTraceEntry"** @_ZL20PrettyStackTraceHead to i64*), align 8
+ %4 = bitcast %"class.llvm::PrettyStackTraceEntry"* %this to i8*
+ tail call void @_ZdlPv(i8* %4)
+ ret void
+}
+
+; CHECK-LABEL: _ZN4llvm21PrettyStackTraceEntryD0Ev:
+; CHECK: addis [[REG1:[0-9]+]], 2, _ZL20PrettyStackTraceHead@got@tlsld@ha
+; CHECK: addi 3, [[REG1]], _ZL20PrettyStackTraceHead@got@tlsld@l
+; CHECK: bl __tls_get_addr(_ZL20PrettyStackTraceHead@tlsld)
+; CHECK: addis 3, 3, _ZL20PrettyStackTraceHead@dtprel@ha
+; CHECK: ld {{[0-9]+}}, _ZL20PrettyStackTraceHead@dtprel@l(3)
+; CHECK: std {{[0-9]+}}, _ZL20PrettyStackTraceHead@dtprel@l(3)
diff --git a/test/CodeGen/PowerPC/tls-pic.ll b/test/CodeGen/PowerPC/tls-pic.ll
index 9ba372591e6e..b7d9298685de 100644
--- a/test/CodeGen/PowerPC/tls-pic.ll
+++ b/test/CodeGen/PowerPC/tls-pic.ll
@@ -13,38 +13,38 @@ define signext i32 @main() nounwind {
entry:
%retval = alloca i32, align 4
store i32 0, i32* %retval
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
ret i32 %0
}
; OPT0-LABEL: main:
; OPT0: addis [[REG:[0-9]+]], 2, a@got@tlsld@ha
-; OPT0-NEXT: addi 3, [[REG]], a@got@tlsld@l
+; OPT0: addi 3, [[REG]], a@got@tlsld@l
; OPT0: bl __tls_get_addr(a@tlsld)
; OPT0-NEXT: nop
; OPT0: addis [[REG2:[0-9]+]], 3, a@dtprel@ha
-; OPT0-NEXT: addi {{[0-9]+}}, [[REG2]], a@dtprel@l
+; OPT0: addi {{[0-9]+}}, [[REG2]], a@dtprel@l
; OPT0-32-LABEL: main
; OPT0-32: addi {{[0-9]+}}, {{[0-9]+}}, a@got@tlsld
; OPT0-32: bl __tls_get_addr(a@tlsld)@PLT
; OPT0-32: addis [[REG:[0-9]+]], 3, a@dtprel@ha
-; OPT0-32-NEXT: addi {{[0-9]+}}, [[REG]], a@dtprel@l
+; OPT0-32: addi {{[0-9]+}}, [[REG]], a@dtprel@l
; OPT1-32-LABEL: main
; OPT1-32: addi 3, {{[0-9]+}}, a@got@tlsld
; OPT1-32: bl __tls_get_addr(a@tlsld)@PLT
; OPT1-32: addis [[REG:[0-9]+]], 3, a@dtprel@ha
-; OPT1-32-NEXT: addi {{[0-9]+}}, [[REG]], a@dtprel@l
+; OPT1-32: addi {{[0-9]+}}, [[REG]], a@dtprel@l
; Test peephole optimization for thread-local storage using the
; local dynamic model.
; OPT1-LABEL: main:
; OPT1: addis [[REG:[0-9]+]], 2, a@got@tlsld@ha
-; OPT1-NEXT: addi 3, [[REG]], a@got@tlsld@l
+; OPT1: addi 3, [[REG]], a@got@tlsld@l
; OPT1: bl __tls_get_addr(a@tlsld)
; OPT1-NEXT: nop
; OPT1: addis [[REG2:[0-9]+]], 3, a@dtprel@ha
-; OPT1-NEXT: lwa {{[0-9]+}}, a@dtprel@l([[REG2]])
+; OPT1: lwa {{[0-9]+}}, a@dtprel@l([[REG2]])
; Test correct assembly code generation for thread-local storage using
; the general dynamic model.
@@ -55,13 +55,13 @@ define signext i32 @main2() nounwind {
entry:
%retval = alloca i32, align 4
store i32 0, i32* %retval
- %0 = load i32* @a2, align 4
+ %0 = load i32, i32* @a2, align 4
ret i32 %0
}
; OPT1-LABEL: main2
-; OPT1: addis [[REG:[0-9]+]], 2, a2@got@tlsgd@ha
-; OPT1-NEXT: addi 3, [[REG]], a2@got@tlsgd@l
+; OPT1: addis [[REG:[0-9]+]], 2, a2@got@tlsgd@ha
+; OPT1: addi 3, [[REG]], a2@got@tlsgd@l
; OPT1: bl __tls_get_addr(a2@tlsgd)
; OPT1-NEXT: nop
; OPT1-32-LABEL: main2
diff --git a/test/CodeGen/PowerPC/tls-store2.ll b/test/CodeGen/PowerPC/tls-store2.ll
index f884dd8a0a17..e9aa17e8c0ff 100644
--- a/test/CodeGen/PowerPC/tls-store2.ll
+++ b/test/CodeGen/PowerPC/tls-store2.ll
@@ -19,13 +19,14 @@ entry:
}
; CHECK-LABEL: call_once:
-; CHECK: addis 3, 2, __once_callable@got@tlsgd@ha
-; CHECK: addi 3, 3, __once_callable@got@tlsgd@l
+; CHECK: addi 3, {{[0-9]+}}, __once_callable@got@tlsgd@l
; CHECK: bl __tls_get_addr(__once_callable@tlsgd)
; CHECK-NEXT: nop
-; CHECK: std {{[0-9]+}}, 0(3)
-; CHECK: addis 3, 2, __once_call@got@tlsgd@ha
-; CHECK: addi 3, 3, __once_call@got@tlsgd@l
+; FIXME: We could check here for 'std {{[0-9]+}}, 0(3)', but that no longer
+; works because, with new scheduling freedom, we create a copy of R3 based on the
+; initial scheduling, but don't coalesce it again after we move the instructions
+; so that the copy is no longer necessary.
+; CHECK: addi 3, {{[0-9]+}}, __once_call@got@tlsgd@l
; CHECK: bl __tls_get_addr(__once_call@tlsgd)
; CHECK-NEXT: nop
; CHECK: std {{[0-9]+}}, 0(3)
diff --git a/test/CodeGen/PowerPC/tls.ll b/test/CodeGen/PowerPC/tls.ll
index 59b4de755988..c96e444e02c8 100644
--- a/test/CodeGen/PowerPC/tls.ll
+++ b/test/CodeGen/PowerPC/tls.ll
@@ -30,7 +30,7 @@ define signext i32 @main2() nounwind {
entry:
%retval = alloca i32, align 4
store i32 0, i32* %retval
- %0 = load i32* @a2, align 4
+ %0 = load i32, i32* @a2, align 4
ret i32 %0
}
diff --git a/test/CodeGen/PowerPC/toc-load-sched-bug.ll b/test/CodeGen/PowerPC/toc-load-sched-bug.ll
index e92c4f4018b1..5ac4e3635023 100644
--- a/test/CodeGen/PowerPC/toc-load-sched-bug.ll
+++ b/test/CodeGen/PowerPC/toc-load-sched-bug.ll
@@ -176,51 +176,51 @@ entry:
%FileOrErr = alloca %"class.llvm::ErrorOr", align 8
%ref.tmp = alloca %"class.llvm::SMDiagnostic", align 8
%ref.tmp5 = alloca %"class.std::basic_string", align 8
- %_M_p.i.i.i = getelementptr inbounds %"class.std::basic_string"* %Filename, i64 0, i32 0, i32 0
- %0 = load i8** %_M_p.i.i.i, align 8, !tbaa !1
+ %_M_p.i.i.i = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %Filename, i64 0, i32 0, i32 0
+ %0 = load i8*, i8** %_M_p.i.i.i, align 8, !tbaa !1
%1 = ptrtoint i8* %0 to i64
- %arrayidx.i.i.i = getelementptr inbounds i8* %0, i64 -24
+ %arrayidx.i.i.i = getelementptr inbounds i8, i8* %0, i64 -24
%_M_length.i.i = bitcast i8* %arrayidx.i.i.i to i64*
- %2 = load i64* %_M_length.i.i, align 8, !tbaa !7
+ %2 = load i64, i64* %_M_length.i.i, align 8, !tbaa !7
%.fca.0.insert18 = insertvalue [2 x i64] undef, i64 %1, 0
%.fca.1.insert21 = insertvalue [2 x i64] %.fca.0.insert18, i64 %2, 1
call void @_ZN4llvm12MemoryBuffer14getFileOrSTDINENS_9StringRefEl(%"class.llvm::ErrorOr"* sret %FileOrErr, [2 x i64] %.fca.1.insert21, i64 -1) #3
- %HasError.i24 = getelementptr inbounds %"class.llvm::ErrorOr"* %FileOrErr, i64 0, i32 1
- %bf.load.i25 = load i8* %HasError.i24, align 8
+ %HasError.i24 = getelementptr inbounds %"class.llvm::ErrorOr", %"class.llvm::ErrorOr"* %FileOrErr, i64 0, i32 1
+ %bf.load.i25 = load i8, i8* %HasError.i24, align 8
%3 = and i8 %bf.load.i25, 1
%bf.cast.i26 = icmp eq i8 %3, 0
br i1 %bf.cast.i26, label %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit, label %_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit
_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit: ; preds = %entry
%retval.sroa.0.0..sroa_cast.i = bitcast %"class.llvm::ErrorOr"* %FileOrErr to i64*
- %retval.sroa.0.0.copyload.i = load i64* %retval.sroa.0.0..sroa_cast.i, align 8
- %retval.sroa.3.0..sroa_idx.i = getelementptr inbounds %"class.llvm::ErrorOr"* %FileOrErr, i64 0, i32 0, i32 0, i32 0, i32 0, i64 8
+ %retval.sroa.0.0.copyload.i = load i64, i64* %retval.sroa.0.0..sroa_cast.i, align 8
+ %retval.sroa.3.0..sroa_idx.i = getelementptr inbounds %"class.llvm::ErrorOr", %"class.llvm::ErrorOr"* %FileOrErr, i64 0, i32 0, i32 0, i32 0, i32 0, i64 8
%retval.sroa.3.0..sroa_cast.i = bitcast i8* %retval.sroa.3.0..sroa_idx.i to i64*
- %retval.sroa.3.0.copyload.i = load i64* %retval.sroa.3.0..sroa_cast.i, align 8
+ %retval.sroa.3.0.copyload.i = load i64, i64* %retval.sroa.3.0..sroa_cast.i, align 8
%phitmp = trunc i64 %retval.sroa.0.0.copyload.i to i32
%cmp.i = icmp eq i32 %phitmp, 0
br i1 %cmp.i, label %cond.false.i.i, label %if.then
if.then: ; preds = %_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit
%.c = inttoptr i64 %retval.sroa.3.0.copyload.i to %"class.std::error_category"*
- %4 = load i8** %_M_p.i.i.i, align 8, !tbaa !1
- %arrayidx.i.i.i30 = getelementptr inbounds i8* %4, i64 -24
+ %4 = load i8*, i8** %_M_p.i.i.i, align 8, !tbaa !1
+ %arrayidx.i.i.i30 = getelementptr inbounds i8, i8* %4, i64 -24
%_M_length.i.i31 = bitcast i8* %arrayidx.i.i.i30 to i64*
- %5 = load i64* %_M_length.i.i31, align 8, !tbaa !7
+ %5 = load i64, i64* %_M_length.i.i31, align 8, !tbaa !7
%6 = inttoptr i64 %retval.sroa.3.0.copyload.i to void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)***
- %vtable.i = load void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)*** %6, align 8, !tbaa !11
- %vfn.i = getelementptr inbounds void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)** %vtable.i, i64 3
- %7 = load void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)** %vfn.i, align 8
+ %vtable.i = load void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)**, void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)*** %6, align 8, !tbaa !11
+ %vfn.i = getelementptr inbounds void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)*, void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)** %vtable.i, i64 3
+ %7 = load void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)*, void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)** %vfn.i, align 8
call void %7(%"class.std::basic_string"* sret %ref.tmp5, %"class.std::error_category"* %.c, i32 signext %phitmp) #3
- %call2.i.i = call dereferenceable(8) %"class.std::basic_string"* @_ZNSs6insertEmPKcm(%"class.std::basic_string"* %ref.tmp5, i64 0, i8* getelementptr inbounds ([28 x i8]* @.str, i64 0, i64 0), i64 27) #3
- %_M_p2.i.i.i.i = getelementptr inbounds %"class.std::basic_string"* %call2.i.i, i64 0, i32 0, i32 0
- %8 = load i8** %_M_p2.i.i.i.i, align 8, !tbaa !13
- store i8* bitcast (i64* getelementptr inbounds ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p2.i.i.i.i, align 8, !tbaa !1
- %arrayidx.i.i.i36 = getelementptr inbounds i8* %8, i64 -24
+ %call2.i.i = call dereferenceable(8) %"class.std::basic_string"* @_ZNSs6insertEmPKcm(%"class.std::basic_string"* %ref.tmp5, i64 0, i8* getelementptr inbounds ([28 x i8], [28 x i8]* @.str, i64 0, i64 0), i64 27) #3
+ %_M_p2.i.i.i.i = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %call2.i.i, i64 0, i32 0, i32 0
+ %8 = load i8*, i8** %_M_p2.i.i.i.i, align 8, !tbaa !13
+ store i8* bitcast (i64* getelementptr inbounds ([0 x i64], [0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p2.i.i.i.i, align 8, !tbaa !1
+ %arrayidx.i.i.i36 = getelementptr inbounds i8, i8* %8, i64 -24
%_M_length.i.i37 = bitcast i8* %arrayidx.i.i.i36 to i64*
- %9 = load i64* %_M_length.i.i37, align 8, !tbaa !7
- %Filename.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 2
- %10 = getelementptr inbounds %"class.std::allocator"* %ref.tmp.i.i2.i, i64 0, i32 0
+ %9 = load i64, i64* %_M_length.i.i37, align 8, !tbaa !7
+ %Filename.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 2
+ %10 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i2.i, i64 0, i32 0
%11 = bitcast %"class.llvm::SMDiagnostic"* %ref.tmp to i8*
call void @llvm.memset.p0i8.i64(i8* %11, i8 0, i64 16, i32 8, i1 false) #3
call void @llvm.lifetime.start(i64 1, i8* %10) #3
@@ -228,8 +228,8 @@ if.then: ; preds = %_ZNK4llvm7ErrorOrIS
br i1 %tobool.i.i4.i, label %if.then.i.i6.i, label %if.end.i.i8.i
if.then.i.i6.i: ; preds = %if.then
- %_M_p.i.i.i.i.i.i5.i = getelementptr inbounds %"class.std::basic_string"* %Filename.i, i64 0, i32 0, i32 0
- store i8* bitcast (i64* getelementptr inbounds ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i.i5.i, align 8, !tbaa !13
+ %_M_p.i.i.i.i.i.i5.i = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %Filename.i, i64 0, i32 0, i32 0
+ store i8* bitcast (i64* getelementptr inbounds ([0 x i64], [0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i.i5.i, align 8, !tbaa !13
br label %_ZNK4llvm9StringRefcvSsEv.exit9.i
if.end.i.i8.i: ; preds = %if.then
@@ -238,21 +238,21 @@ if.end.i.i8.i: ; preds = %if.then
_ZNK4llvm9StringRefcvSsEv.exit9.i: ; preds = %if.end.i.i8.i, %if.then.i.i6.i
call void @llvm.lifetime.end(i64 1, i8* %10) #3
- %LineNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 3
+ %LineNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 3
store i32 -1, i32* %LineNo.i, align 8, !tbaa !14
- %ColumnNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 4
+ %ColumnNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 4
store i32 -1, i32* %ColumnNo.i, align 4, !tbaa !21
- %Kind.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 5
+ %Kind.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 5
store i32 0, i32* %Kind.i, align 8, !tbaa !22
- %Message.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 6
- %12 = getelementptr inbounds %"class.std::allocator"* %ref.tmp.i.i.i, i64 0, i32 0
+ %Message.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 6
+ %12 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i.i, i64 0, i32 0
call void @llvm.lifetime.start(i64 1, i8* %12) #3
%tobool.i.i.i = icmp eq i8* %8, null
br i1 %tobool.i.i.i, label %if.then.i.i.i, label %if.end.i.i.i
if.then.i.i.i: ; preds = %_ZNK4llvm9StringRefcvSsEv.exit9.i
- %_M_p.i.i.i.i.i.i.i = getelementptr inbounds %"class.std::basic_string"* %Message.i, i64 0, i32 0, i32 0
- store i8* bitcast (i64* getelementptr inbounds ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i.i.i, align 8, !tbaa !13
+ %_M_p.i.i.i.i.i.i.i = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %Message.i, i64 0, i32 0, i32 0
+ store i8* bitcast (i64* getelementptr inbounds ([0 x i64], [0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i.i.i, align 8, !tbaa !13
br label %_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit
if.end.i.i.i: ; preds = %_ZNK4llvm9StringRefcvSsEv.exit9.i
@@ -261,49 +261,49 @@ if.end.i.i.i: ; preds = %_ZNK4llvm9StringRef
_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit: ; preds = %if.then.i.i.i, %if.end.i.i.i
call void @llvm.lifetime.end(i64 1, i8* %12) #3
- %_M_p.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 7, i32 0, i32 0
- store i8* bitcast (i64* getelementptr inbounds ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i, align 8, !tbaa !13
- %Ranges.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8
+ %_M_p.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 7, i32 0, i32 0
+ store i8* bitcast (i64* getelementptr inbounds ([0 x i64], [0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i, align 8, !tbaa !13
+ %Ranges.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8
%13 = bitcast %"class.std::vector.79"* %Ranges.i to i8*
call void @llvm.memset.p0i8.i64(i8* %13, i8 0, i64 24, i32 8, i1 false) #3
- %14 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 0
- %BeginX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 0
+ %14 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 0
+ %BeginX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 0
store i8* %14, i8** %BeginX.i.i.i.i.i.i, align 8, !tbaa !23
- %EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 1
+ %EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 1
store i8* %14, i8** %EndX.i.i.i.i.i.i, align 8, !tbaa !25
- %CapacityX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 2
- %add.ptr.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 96
+ %CapacityX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 2
+ %add.ptr.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 96
store i8* %add.ptr.i.i.i.i.i.i, i8** %CapacityX.i.i.i.i.i.i, align 8, !tbaa !26
%15 = bitcast %"class.llvm::SMDiagnostic"* %Err to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %15, i8* %11, i64 16, i32 8, i1 false) #3
- %Filename.i38 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 2
+ %Filename.i38 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 2
call void @_ZNSs4swapERSs(%"class.std::basic_string"* %Filename.i38, %"class.std::basic_string"* dereferenceable(8) %Filename.i) #3
- %LineNo.i39 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 3
+ %LineNo.i39 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 3
%16 = bitcast i32* %LineNo.i39 to i8*
%17 = bitcast i32* %LineNo.i to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %16, i8* %17, i64 12, i32 4, i1 false) #3
- %Message.i40 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 6
+ %Message.i40 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 6
call void @_ZNSs4swapERSs(%"class.std::basic_string"* %Message.i40, %"class.std::basic_string"* dereferenceable(8) %Message.i) #3
- %LineContents.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 7
- %LineContents7.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 7
+ %LineContents.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 7
+ %LineContents7.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 7
call void @_ZNSs4swapERSs(%"class.std::basic_string"* %LineContents.i, %"class.std::basic_string"* dereferenceable(8) %LineContents7.i) #3
- %Ranges.i41 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8
- %_M_start.i7.i.i.i = getelementptr inbounds %"class.std::vector.79"* %Ranges.i41, i64 0, i32 0, i32 0, i32 0
- %18 = load %"struct.std::pair"** %_M_start.i7.i.i.i, align 8, !tbaa !27
- %_M_finish.i9.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8, i32 0, i32 0, i32 1
- %_M_end_of_storage.i11.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8, i32 0, i32 0, i32 2
- %_M_start2.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 0
+ %Ranges.i41 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8
+ %_M_start.i7.i.i.i = getelementptr inbounds %"class.std::vector.79", %"class.std::vector.79"* %Ranges.i41, i64 0, i32 0, i32 0, i32 0
+ %18 = load %"struct.std::pair"*, %"struct.std::pair"** %_M_start.i7.i.i.i, align 8, !tbaa !27
+ %_M_finish.i9.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8, i32 0, i32 0, i32 1
+ %_M_end_of_storage.i11.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8, i32 0, i32 0, i32 2
+ %_M_start2.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 0
%19 = bitcast %"class.std::vector.79"* %Ranges.i41 to i8*
call void @llvm.memset.p0i8.i64(i8* %19, i8 0, i64 16, i32 8, i1 false) #3
- %20 = load %"struct.std::pair"** %_M_start2.i.i.i.i, align 8, !tbaa !27
+ %20 = load %"struct.std::pair"*, %"struct.std::pair"** %_M_start2.i.i.i.i, align 8, !tbaa !27
store %"struct.std::pair"* %20, %"struct.std::pair"** %_M_start.i7.i.i.i, align 8, !tbaa !27
store %"struct.std::pair"* null, %"struct.std::pair"** %_M_start2.i.i.i.i, align 8, !tbaa !27
- %_M_finish3.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 1
- %21 = load %"struct.std::pair"** %_M_finish3.i.i.i.i, align 8, !tbaa !27
+ %_M_finish3.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 1
+ %21 = load %"struct.std::pair"*, %"struct.std::pair"** %_M_finish3.i.i.i.i, align 8, !tbaa !27
store %"struct.std::pair"* %21, %"struct.std::pair"** %_M_finish.i9.i.i.i, align 8, !tbaa !27
store %"struct.std::pair"* null, %"struct.std::pair"** %_M_finish3.i.i.i.i, align 8, !tbaa !27
- %_M_end_of_storage4.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 2
- %22 = load %"struct.std::pair"** %_M_end_of_storage4.i.i.i.i, align 8, !tbaa !27
+ %_M_end_of_storage4.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 2
+ %22 = load %"struct.std::pair"*, %"struct.std::pair"** %_M_end_of_storage4.i.i.i.i, align 8, !tbaa !27
store %"struct.std::pair"* %22, %"struct.std::pair"** %_M_end_of_storage.i11.i.i.i, align 8, !tbaa !27
store %"struct.std::pair"* null, %"struct.std::pair"** %_M_end_of_storage4.i.i.i.i, align 8, !tbaa !27
%tobool.i.i.i.i.i.i = icmp eq %"struct.std::pair"* %18, null
@@ -315,18 +315,18 @@ if.then.i.i.i.i.i.i: ; preds = %_ZN4llvm12SMDiagnos
br label %_ZN4llvm12SMDiagnosticaSEOS0_.exit
_ZN4llvm12SMDiagnosticaSEOS0_.exit: ; preds = %_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit, %if.then.i.i.i.i.i.i
- %24 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 9, i32 0
- %25 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0
+ %24 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 9, i32 0
+ %25 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0
%call2.i.i42 = call dereferenceable(48) %"class.llvm::SmallVectorImpl.85"* @_ZN4llvm15SmallVectorImplINS_7SMFixItEEaSEOS2_(%"class.llvm::SmallVectorImpl.85"* %24, %"class.llvm::SmallVectorImpl.85"* dereferenceable(48) %25) #3
call void @_ZN4llvm12SMDiagnosticD2Ev(%"class.llvm::SMDiagnostic"* %ref.tmp) #3
- %26 = getelementptr inbounds %"class.std::allocator"* %ref.tmp.i.i, i64 0, i32 0
+ %26 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i, i64 0, i32 0
call void @llvm.lifetime.start(i64 1, i8* %26) #3
%27 = bitcast i8* %arrayidx.i.i.i36 to %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"*
%cmp.i.i.i = icmp eq i8* %arrayidx.i.i.i36, bitcast ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE to i8*)
br i1 %cmp.i.i.i, label %_ZNSsD1Ev.exit, label %if.then.i.i.i45, !prof !28
if.then.i.i.i45: ; preds = %_ZN4llvm12SMDiagnosticaSEOS0_.exit
- %_M_refcount.i.i.i = getelementptr inbounds i8* %8, i64 -8
+ %_M_refcount.i.i.i = getelementptr inbounds i8, i8* %8, i64 -8
%28 = bitcast i8* %_M_refcount.i.i.i to i32*
br i1 icmp ne (i8* bitcast (i32 (i32*, void (i8*)*)* @__pthread_key_create to i8*), i8* null), label %if.then.i.i.i.i, label %if.else.i.i.i.i
@@ -335,12 +335,12 @@ if.then.i.i.i.i: ; preds = %if.then.i.i.i45
call void @llvm.lifetime.start(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
%29 = atomicrmw volatile add i32* %28, i32 -1 acq_rel
store i32 %29, i32* %.atomicdst.i.i.i.i.i, align 4
- %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i = load volatile i32* %.atomicdst.i.i.i.i.i, align 4
+ %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i = load volatile i32, i32* %.atomicdst.i.i.i.i.i, align 4
call void @llvm.lifetime.end(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i
if.else.i.i.i.i: ; preds = %if.then.i.i.i45
- %30 = load i32* %28, align 4, !tbaa !29
+ %30 = load i32, i32* %28, align 4, !tbaa !29
%add.i.i.i.i.i = add nsw i32 %30, -1
store i32 %add.i.i.i.i.i, i32* %28, align 4, !tbaa !29
br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i
@@ -356,17 +356,17 @@ if.then4.i.i.i: ; preds = %_ZN9__gnu_cxxL27__e
_ZNSsD1Ev.exit: ; preds = %_ZN4llvm12SMDiagnosticaSEOS0_.exit, %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i, %if.then4.i.i.i
call void @llvm.lifetime.end(i64 1, i8* %26) #3
- %31 = getelementptr inbounds %"class.std::allocator"* %ref.tmp.i.i47, i64 0, i32 0
+ %31 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i47, i64 0, i32 0
call void @llvm.lifetime.start(i64 1, i8* %31) #3
- %_M_p.i.i.i.i48 = getelementptr inbounds %"class.std::basic_string"* %ref.tmp5, i64 0, i32 0, i32 0
- %32 = load i8** %_M_p.i.i.i.i48, align 8, !tbaa !1
- %arrayidx.i.i.i49 = getelementptr inbounds i8* %32, i64 -24
+ %_M_p.i.i.i.i48 = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %ref.tmp5, i64 0, i32 0, i32 0
+ %32 = load i8*, i8** %_M_p.i.i.i.i48, align 8, !tbaa !1
+ %arrayidx.i.i.i49 = getelementptr inbounds i8, i8* %32, i64 -24
%33 = bitcast i8* %arrayidx.i.i.i49 to %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"*
%cmp.i.i.i50 = icmp eq i8* %arrayidx.i.i.i49, bitcast ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE to i8*)
br i1 %cmp.i.i.i50, label %_ZNSsD1Ev.exit62, label %if.then.i.i.i52, !prof !28
if.then.i.i.i52: ; preds = %_ZNSsD1Ev.exit
- %_M_refcount.i.i.i51 = getelementptr inbounds i8* %32, i64 -8
+ %_M_refcount.i.i.i51 = getelementptr inbounds i8, i8* %32, i64 -8
%34 = bitcast i8* %_M_refcount.i.i.i51 to i32*
br i1 icmp ne (i8* bitcast (i32 (i32*, void (i8*)*)* @__pthread_key_create to i8*), i8* null), label %if.then.i.i.i.i55, label %if.else.i.i.i.i57
@@ -375,12 +375,12 @@ if.then.i.i.i.i55: ; preds = %if.then.i.i.i52
call void @llvm.lifetime.start(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
%35 = atomicrmw volatile add i32* %34, i32 -1 acq_rel
store i32 %35, i32* %.atomicdst.i.i.i.i.i46, align 4
- %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i54 = load volatile i32* %.atomicdst.i.i.i.i.i46, align 4
+ %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i54 = load volatile i32, i32* %.atomicdst.i.i.i.i.i46, align 4
call void @llvm.lifetime.end(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60
if.else.i.i.i.i57: ; preds = %if.then.i.i.i52
- %36 = load i32* %34, align 4, !tbaa !29
+ %36 = load i32, i32* %34, align 4, !tbaa !29
%add.i.i.i.i.i56 = add nsw i32 %36, -1
store i32 %add.i.i.i.i.i56, i32* %34, align 4, !tbaa !29
br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60
@@ -399,33 +399,33 @@ _ZNSsD1Ev.exit62: ; preds = %_ZNSsD1Ev.exit, %_Z
br label %cleanup
cond.false.i.i: ; preds = %_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit
- call void @__assert_fail(i8* getelementptr inbounds ([54 x i8]* @.str1, i64 0, i64 0), i8* getelementptr inbounds ([61 x i8]* @.str2, i64 0, i64 0), i32 zeroext 242, i8* getelementptr inbounds ([206 x i8]* @__PRETTY_FUNCTION__._ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv, i64 0, i64 0)) #7
+ call void @__assert_fail(i8* getelementptr inbounds ([54 x i8], [54 x i8]* @.str1, i64 0, i64 0), i8* getelementptr inbounds ([61 x i8], [61 x i8]* @.str2, i64 0, i64 0), i32 zeroext 242, i8* getelementptr inbounds ([206 x i8], [206 x i8]* @__PRETTY_FUNCTION__._ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv, i64 0, i64 0)) #7
unreachable
_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit: ; preds = %entry
%_M_head_impl.i.i.i.i.i = bitcast %"class.llvm::ErrorOr"* %FileOrErr to %"class.llvm::MemoryBuffer"**
- %37 = load %"class.llvm::MemoryBuffer"** %_M_head_impl.i.i.i.i.i, align 8, !tbaa !27
+ %37 = load %"class.llvm::MemoryBuffer"*, %"class.llvm::MemoryBuffer"** %_M_head_impl.i.i.i.i.i, align 8, !tbaa !27
%call9 = call %"class.llvm::Module"* @_ZN4llvm7ParseIREPNS_12MemoryBufferERNS_12SMDiagnosticERNS_11LLVMContextE(%"class.llvm::MemoryBuffer"* %37, %"class.llvm::SMDiagnostic"* dereferenceable(200) %Err, %"class.llvm::LLVMContext"* dereferenceable(8) %Context)
br label %cleanup
cleanup: ; preds = %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit, %_ZNSsD1Ev.exit62
%retval.0 = phi %"class.llvm::Module"* [ null, %_ZNSsD1Ev.exit62 ], [ %call9, %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit ]
- %bf.load.i = load i8* %HasError.i24, align 8
+ %bf.load.i = load i8, i8* %HasError.i24, align 8
%38 = and i8 %bf.load.i, 1
%bf.cast.i = icmp eq i8 %38, 0
br i1 %bf.cast.i, label %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv.exit.i, label %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEED2Ev.exit
_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv.exit.i: ; preds = %cleanup
%_M_head_impl.i.i.i.i.i.i = bitcast %"class.llvm::ErrorOr"* %FileOrErr to %"class.llvm::MemoryBuffer"**
- %39 = load %"class.llvm::MemoryBuffer"** %_M_head_impl.i.i.i.i.i.i, align 8, !tbaa !27
+ %39 = load %"class.llvm::MemoryBuffer"*, %"class.llvm::MemoryBuffer"** %_M_head_impl.i.i.i.i.i.i, align 8, !tbaa !27
%cmp.i.i = icmp eq %"class.llvm::MemoryBuffer"* %39, null
br i1 %cmp.i.i, label %_ZNSt10unique_ptrIN4llvm12MemoryBufferESt14default_deleteIS1_EED2Ev.exit.i, label %_ZNKSt14default_deleteIN4llvm12MemoryBufferEEclEPS1_.exit.i.i
_ZNKSt14default_deleteIN4llvm12MemoryBufferEEclEPS1_.exit.i.i: ; preds = %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv.exit.i
%40 = bitcast %"class.llvm::MemoryBuffer"* %39 to void (%"class.llvm::MemoryBuffer"*)***
- %vtable.i.i.i = load void (%"class.llvm::MemoryBuffer"*)*** %40, align 8, !tbaa !11
- %vfn.i.i.i = getelementptr inbounds void (%"class.llvm::MemoryBuffer"*)** %vtable.i.i.i, i64 1
- %41 = load void (%"class.llvm::MemoryBuffer"*)** %vfn.i.i.i, align 8
+ %vtable.i.i.i = load void (%"class.llvm::MemoryBuffer"*)**, void (%"class.llvm::MemoryBuffer"*)*** %40, align 8, !tbaa !11
+ %vfn.i.i.i = getelementptr inbounds void (%"class.llvm::MemoryBuffer"*)*, void (%"class.llvm::MemoryBuffer"*)** %vtable.i.i.i, i64 1
+ %41 = load void (%"class.llvm::MemoryBuffer"*)*, void (%"class.llvm::MemoryBuffer"*)** %vfn.i.i.i, align 8
call void %41(%"class.llvm::MemoryBuffer"* %39) #3
br label %_ZNSt10unique_ptrIN4llvm12MemoryBufferESt14default_deleteIS1_EED2Ev.exit.i
diff --git a/test/CodeGen/PowerPC/trampoline.ll b/test/CodeGen/PowerPC/trampoline.ll
index 3ea46f50e0c0..e1a26dae7291 100644
--- a/test/CodeGen/PowerPC/trampoline.ll
+++ b/test/CodeGen/PowerPC/trampoline.ll
@@ -29,20 +29,20 @@ module asm "\09.globl .objc_class_name_NSBitmapImageRep"
%struct.objc_super = type opaque
@_NSConcreteStackBlock = external global i8* ; <i8**> [#uses=1]
@"\01L_OBJC_SELECTOR_REFERENCES_1" = internal global %struct.objc_selector* bitcast ([34 x i8]* @"\01L_OBJC_METH_VAR_NAME_1" to %struct.objc_selector*), section "__OBJC,__message_refs,literal_pointers,no_dead_strip" ; <%struct.objc_selector**> [#uses=2]
-@"\01L_OBJC_CLASS_NSBitmapImageRep" = internal global %struct._objc_class { %struct._objc_class* @"\01L_OBJC_METACLASS_NSBitmapImageRep", %struct._objc_class* bitcast ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1" to %struct._objc_class*), i8* getelementptr ([17 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i32 0, i32 1, i32 0, %struct._objc_ivar_list* null, %struct._objc_method_list* bitcast ({ i8*, i32, [1 x %struct._objc_method] }* @"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" to %struct._objc_method_list*), %struct.objc_cache* null, %struct._objc_protocol** null, i8* null, %struct._objc_class_ext* null }, section "__OBJC,__class,regular,no_dead_strip" ; <%struct._objc_class*> [#uses=3]
+@"\01L_OBJC_CLASS_NSBitmapImageRep" = internal global %struct._objc_class { %struct._objc_class* @"\01L_OBJC_METACLASS_NSBitmapImageRep", %struct._objc_class* bitcast ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1" to %struct._objc_class*), i8* getelementptr ([17 x i8], [17 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i32 0, i32 1, i32 0, %struct._objc_ivar_list* null, %struct._objc_method_list* bitcast ({ i8*, i32, [1 x %struct._objc_method] }* @"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" to %struct._objc_method_list*), %struct.objc_cache* null, %struct._objc_protocol** null, i8* null, %struct._objc_class_ext* null }, section "__OBJC,__class,regular,no_dead_strip" ; <%struct._objc_class*> [#uses=3]
@"\01L_OBJC_SELECTOR_REFERENCES_0" = internal global %struct.objc_selector* bitcast ([14 x i8]* @"\01L_OBJC_METH_VAR_NAME_0" to %struct.objc_selector*), section "__OBJC,__message_refs,literal_pointers,no_dead_strip" ; <%struct.objc_selector**> [#uses=2]
@"\01L_OBJC_SYMBOLS" = internal global { i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] } { i32 0, %struct.objc_selector** null, i16 1, i16 0, [1 x %struct._objc_class*] [ %struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep" ] }, section "__OBJC,__symbols,regular,no_dead_strip" ; <{ i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] }*> [#uses=2]
@"\01L_OBJC_METH_VAR_NAME_0" = internal global [14 x i8] c"copyWithZone:\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <[14 x i8]*> [#uses=2]
@"\01L_OBJC_METH_VAR_TYPE_0" = internal global [20 x i8] c"@12@0:4^{_NSZone=}8\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <[20 x i8]*> [#uses=1]
-@"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" = internal global { i8*, i32, [1 x %struct._objc_method] } { i8* null, i32 1, [1 x %struct._objc_method] [ %struct._objc_method { %struct.objc_selector* bitcast ([14 x i8]* @"\01L_OBJC_METH_VAR_NAME_0" to %struct.objc_selector*), i8* getelementptr ([20 x i8]* @"\01L_OBJC_METH_VAR_TYPE_0", i32 0, i32 0), i8* bitcast (%struct.objc_object* (%struct.NSBitmapImageRep*, %struct.objc_selector*, %struct.NSZone*)* @"-[NSBitmapImageRep copyWithZone:]" to i8*) } ] }, section "__OBJC,__inst_meth,regular,no_dead_strip" ; <{ i8*, i32, [1 x %struct._objc_method] }*> [#uses=2]
+@"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" = internal global { i8*, i32, [1 x %struct._objc_method] } { i8* null, i32 1, [1 x %struct._objc_method] [ %struct._objc_method { %struct.objc_selector* bitcast ([14 x i8]* @"\01L_OBJC_METH_VAR_NAME_0" to %struct.objc_selector*), i8* getelementptr ([20 x i8], [20 x i8]* @"\01L_OBJC_METH_VAR_TYPE_0", i32 0, i32 0), i8* bitcast (%struct.objc_object* (%struct.NSBitmapImageRep*, %struct.objc_selector*, %struct.NSZone*)* @"-[NSBitmapImageRep copyWithZone:]" to i8*) } ] }, section "__OBJC,__inst_meth,regular,no_dead_strip" ; <{ i8*, i32, [1 x %struct._objc_method] }*> [#uses=2]
@"\01L_OBJC_CLASS_NAME_0" = internal global [17 x i8] c"NSBitmapImageRep\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <[17 x i8]*> [#uses=1]
@"\01L_OBJC_CLASS_NAME_1" = internal global [11 x i8] c"NSImageRep\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <[11 x i8]*> [#uses=2]
-@"\01L_OBJC_METACLASS_NSBitmapImageRep" = internal global %struct._objc_class { %struct._objc_class* bitcast ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1" to %struct._objc_class*), %struct._objc_class* bitcast ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1" to %struct._objc_class*), i8* getelementptr ([17 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i32 0, i32 2, i32 48, %struct._objc_ivar_list* null, %struct._objc_method_list* null, %struct.objc_cache* null, %struct._objc_protocol** null, i8* null, %struct._objc_class_ext* null }, section "__OBJC,__meta_class,regular,no_dead_strip" ; <%struct._objc_class*> [#uses=2]
+@"\01L_OBJC_METACLASS_NSBitmapImageRep" = internal global %struct._objc_class { %struct._objc_class* bitcast ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1" to %struct._objc_class*), %struct._objc_class* bitcast ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1" to %struct._objc_class*), i8* getelementptr ([17 x i8], [17 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i32 0, i32 2, i32 48, %struct._objc_ivar_list* null, %struct._objc_method_list* null, %struct.objc_cache* null, %struct._objc_protocol** null, i8* null, %struct._objc_class_ext* null }, section "__OBJC,__meta_class,regular,no_dead_strip" ; <%struct._objc_class*> [#uses=2]
@"\01L_OBJC_METH_VAR_NAME_1" = internal global [34 x i8] c"_performBlockUsingBackingCGImage:\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <[34 x i8]*> [#uses=2]
@"\01L_OBJC_IMAGE_INFO" = internal constant [2 x i32] zeroinitializer, section "__OBJC, __image_info,regular" ; <[2 x i32]*> [#uses=1]
@"\01L_OBJC_CLASS_NAME_2" = internal global [1 x i8] zeroinitializer, section "__TEXT,__cstring,cstring_literals", align 4 ; <[1 x i8]*> [#uses=1]
-@"\01L_OBJC_MODULES" = internal global %struct._objc_module { i32 7, i32 16, i8* getelementptr ([1 x i8]* @"\01L_OBJC_CLASS_NAME_2", i32 0, i32 0), %struct._objc_symtab* bitcast ({ i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] }* @"\01L_OBJC_SYMBOLS" to %struct._objc_symtab*) }, section "__OBJC,__module_info,regular,no_dead_strip" ; <%struct._objc_module*> [#uses=1]
-@llvm.used = appending global [14 x i8*] [ i8* bitcast (%struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_1" to i8*), i8* bitcast (%struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep" to i8*), i8* bitcast (%struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_0" to i8*), i8* bitcast ({ i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] }* @"\01L_OBJC_SYMBOLS" to i8*), i8* getelementptr ([14 x i8]* @"\01L_OBJC_METH_VAR_NAME_0", i32 0, i32 0), i8* getelementptr ([20 x i8]* @"\01L_OBJC_METH_VAR_TYPE_0", i32 0, i32 0), i8* bitcast ({ i8*, i32, [1 x %struct._objc_method] }* @"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" to i8*), i8* getelementptr ([17 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i8* getelementptr ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1", i32 0, i32 0), i8* bitcast (%struct._objc_class* @"\01L_OBJC_METACLASS_NSBitmapImageRep" to i8*), i8* getelementptr ([34 x i8]* @"\01L_OBJC_METH_VAR_NAME_1", i32 0, i32 0), i8* bitcast ([2 x i32]* @"\01L_OBJC_IMAGE_INFO" to i8*), i8* getelementptr ([1 x i8]* @"\01L_OBJC_CLASS_NAME_2", i32 0, i32 0), i8* bitcast (%struct._objc_module* @"\01L_OBJC_MODULES" to i8*) ], section "llvm.metadata" ; <[14 x i8*]*> [#uses=0]
+@"\01L_OBJC_MODULES" = internal global %struct._objc_module { i32 7, i32 16, i8* getelementptr ([1 x i8], [1 x i8]* @"\01L_OBJC_CLASS_NAME_2", i32 0, i32 0), %struct._objc_symtab* bitcast ({ i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] }* @"\01L_OBJC_SYMBOLS" to %struct._objc_symtab*) }, section "__OBJC,__module_info,regular,no_dead_strip" ; <%struct._objc_module*> [#uses=1]
+@llvm.used = appending global [14 x i8*] [ i8* bitcast (%struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_1" to i8*), i8* bitcast (%struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep" to i8*), i8* bitcast (%struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_0" to i8*), i8* bitcast ({ i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] }* @"\01L_OBJC_SYMBOLS" to i8*), i8* getelementptr ([14 x i8], [14 x i8]* @"\01L_OBJC_METH_VAR_NAME_0", i32 0, i32 0), i8* getelementptr ([20 x i8], [20 x i8]* @"\01L_OBJC_METH_VAR_TYPE_0", i32 0, i32 0), i8* bitcast ({ i8*, i32, [1 x %struct._objc_method] }* @"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" to i8*), i8* getelementptr ([17 x i8], [17 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i8* getelementptr ([11 x i8], [11 x i8]* @"\01L_OBJC_CLASS_NAME_1", i32 0, i32 0), i8* bitcast (%struct._objc_class* @"\01L_OBJC_METACLASS_NSBitmapImageRep" to i8*), i8* getelementptr ([34 x i8], [34 x i8]* @"\01L_OBJC_METH_VAR_NAME_1", i32 0, i32 0), i8* bitcast ([2 x i32]* @"\01L_OBJC_IMAGE_INFO" to i8*), i8* getelementptr ([1 x i8], [1 x i8]* @"\01L_OBJC_CLASS_NAME_2", i32 0, i32 0), i8* bitcast (%struct._objc_module* @"\01L_OBJC_MODULES" to i8*) ], section "llvm.metadata" ; <[14 x i8*]*> [#uses=0]
define internal %struct.objc_object* @"-[NSBitmapImageRep copyWithZone:]"(%struct.NSBitmapImageRep* %self, %struct.objc_selector* %_cmd, %struct.NSZone* %zone) nounwind {
entry:
@@ -62,55 +62,55 @@ entry:
store %struct.NSBitmapImageRep* %self, %struct.NSBitmapImageRep** %self_addr
store %struct.objc_selector* %_cmd, %struct.objc_selector** %_cmd_addr
store %struct.NSZone* %zone, %struct.NSZone** %zone_addr
- %3 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
- %4 = load %struct.NSBitmapImageRep** %self_addr, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
+ %3 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
+ %4 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %self_addr, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
store %struct.NSBitmapImageRep* %4, %struct.NSBitmapImageRep** %3, align 4
%TRAMP.91 = bitcast %struct.__builtin_trampoline* %TRAMP.9 to i8* ; <i8*> [#uses=1]
%FRAME.72 = bitcast %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7 to i8* ; <i8*> [#uses=1]
call void @llvm.init.trampoline(i8* %TRAMP.91, i8* bitcast (void (%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*, %struct.__block_1*, %struct.CGImage*)* @__helper_1.1632 to i8*), i8* %FRAME.72) ; <i8*> [#uses=1]
%tramp = call i8* @llvm.adjust.trampoline(i8* %TRAMP.91)
store i8* %tramp, i8** %0, align 4
- %5 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1 ; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
- %6 = load i8** %0, align 4 ; <i8*> [#uses=1]
+ %5 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1 ; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
+ %6 = load i8*, i8** %0, align 4 ; <i8*> [#uses=1]
%7 = bitcast i8* %6 to void (%struct.__block_1*, %struct.CGImage*)* ; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
store void (%struct.__block_1*, %struct.CGImage*)* %7, void (%struct.__block_1*, %struct.CGImage*)** %5, align 4
store %struct.NSBitmapImageRep* null, %struct.NSBitmapImageRep** %new, align 4
- %8 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %9 = getelementptr %struct.__invoke_impl* %8, i32 0, i32 0 ; <i8**> [#uses=1]
+ %8 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
+ %9 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %8, i32 0, i32 0 ; <i8**> [#uses=1]
store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %9, align 4
- %10 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %11 = getelementptr %struct.__invoke_impl* %10, i32 0, i32 1 ; <i32*> [#uses=1]
+ %10 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
+ %11 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %10, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 67108864, i32* %11, align 4
- %12 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %13 = getelementptr %struct.__invoke_impl* %12, i32 0, i32 2 ; <i32*> [#uses=1]
+ %12 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
+ %13 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %12, i32 0, i32 2 ; <i32*> [#uses=1]
store i32 24, i32* %13, align 4
- %14 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1 ; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
- %15 = load void (%struct.__block_1*, %struct.CGImage*)** %14, align 4 ; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
+ %14 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1 ; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
+ %15 = load void (%struct.__block_1*, %struct.CGImage*)*, void (%struct.__block_1*, %struct.CGImage*)** %14, align 4 ; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
store void (%struct.__block_1*, %struct.CGImage*)* %15, void (%struct.__block_1*, %struct.CGImage*)** %1, align 4
- %16 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %17 = getelementptr %struct.__invoke_impl* %16, i32 0, i32 3 ; <i8**> [#uses=1]
- %18 = load void (%struct.__block_1*, %struct.CGImage*)** %1, align 4 ; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
+ %16 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
+ %17 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %16, i32 0, i32 3 ; <i8**> [#uses=1]
+ %18 = load void (%struct.__block_1*, %struct.CGImage*)*, void (%struct.__block_1*, %struct.CGImage*)** %1, align 4 ; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
%19 = bitcast void (%struct.__block_1*, %struct.CGImage*)* %18 to i8* ; <i8*> [#uses=1]
store i8* %19, i8** %17, align 4
- %20 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 1 ; <%struct.NSZone**> [#uses=1]
- %21 = load %struct.NSZone** %zone_addr, align 4 ; <%struct.NSZone*> [#uses=1]
+ %20 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 1 ; <%struct.NSZone**> [#uses=1]
+ %21 = load %struct.NSZone*, %struct.NSZone** %zone_addr, align 4 ; <%struct.NSZone*> [#uses=1]
store %struct.NSZone* %21, %struct.NSZone** %20, align 4
- %22 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 2 ; <%struct.NSBitmapImageRep***> [#uses=1]
+ %22 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 2 ; <%struct.NSBitmapImageRep***> [#uses=1]
store %struct.NSBitmapImageRep** %new, %struct.NSBitmapImageRep*** %22, align 4
- %23 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
- %24 = load %struct.NSBitmapImageRep** %23, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
+ %23 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
+ %24 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %23, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
store %struct.NSBitmapImageRep* %24, %struct.NSBitmapImageRep** %2, align 4
- %25 = load %struct.NSBitmapImageRep** %2, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
+ %25 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %2, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
%26 = bitcast %struct.NSBitmapImageRep* %25 to %struct.objc_object* ; <%struct.objc_object*> [#uses=1]
store %struct.objc_object* %26, %struct.objc_object** %self.1, align 4
- %27 = load %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_1", align 4 ; <%struct.objc_selector*> [#uses=1]
+ %27 = load %struct.objc_selector*, %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_1", align 4 ; <%struct.objc_selector*> [#uses=1]
%__block_holder_tmp_1.03 = bitcast %struct.__block_1* %__block_holder_tmp_1.0 to void (%struct.CGImage*)* ; <void (%struct.CGImage*)*> [#uses=1]
- %28 = load %struct.objc_object** %self.1, align 4 ; <%struct.objc_object*> [#uses=1]
- %29 = call %struct.objc_object* (%struct.objc_object*, %struct.objc_selector*, ...)* inttoptr (i64 4294901504 to %struct.objc_object* (%struct.objc_object*, %struct.objc_selector*, ...)*)(%struct.objc_object* %28, %struct.objc_selector* %27, void (%struct.CGImage*)* %__block_holder_tmp_1.03) nounwind ; <%struct.objc_object*> [#uses=0]
+ %28 = load %struct.objc_object*, %struct.objc_object** %self.1, align 4 ; <%struct.objc_object*> [#uses=1]
+ %29 = call %struct.objc_object* (%struct.objc_object*, %struct.objc_selector*, ...) inttoptr (i64 4294901504 to %struct.objc_object* (%struct.objc_object*, %struct.objc_selector*, ...)*)(%struct.objc_object* %28, %struct.objc_selector* %27, void (%struct.CGImage*)* %__block_holder_tmp_1.03) nounwind ; <%struct.objc_object*> [#uses=0]
br label %return
return: ; preds = %entry
- %retval5 = load %struct.objc_object** %retval ; <%struct.objc_object*> [#uses=1]
+ %retval5 = load %struct.objc_object*, %struct.objc_object** %retval ; <%struct.objc_object*> [#uses=1]
ret %struct.objc_object* %retval5
}
@@ -131,33 +131,33 @@ entry:
store %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %CHAIN.8, %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"** %CHAIN.8_addr
store %struct.__block_1* %_self, %struct.__block_1** %_self_addr
store %struct.CGImage* %cgImage, %struct.CGImage** %cgImage_addr
- %1 = load %struct.__block_1** %_self_addr, align 4 ; <%struct.__block_1*> [#uses=1]
- %2 = getelementptr %struct.__block_1* %1, i32 0, i32 2 ; <%struct.NSBitmapImageRep***> [#uses=1]
- %3 = load %struct.NSBitmapImageRep*** %2, align 4 ; <%struct.NSBitmapImageRep**> [#uses=1]
+ %1 = load %struct.__block_1*, %struct.__block_1** %_self_addr, align 4 ; <%struct.__block_1*> [#uses=1]
+ %2 = getelementptr %struct.__block_1, %struct.__block_1* %1, i32 0, i32 2 ; <%struct.NSBitmapImageRep***> [#uses=1]
+ %3 = load %struct.NSBitmapImageRep**, %struct.NSBitmapImageRep*** %2, align 4 ; <%struct.NSBitmapImageRep**> [#uses=1]
store %struct.NSBitmapImageRep** %3, %struct.NSBitmapImageRep*** %new, align 4
- %4 = load %struct.__block_1** %_self_addr, align 4 ; <%struct.__block_1*> [#uses=1]
- %5 = getelementptr %struct.__block_1* %4, i32 0, i32 1 ; <%struct.NSZone**> [#uses=1]
- %6 = load %struct.NSZone** %5, align 4 ; <%struct.NSZone*> [#uses=1]
+ %4 = load %struct.__block_1*, %struct.__block_1** %_self_addr, align 4 ; <%struct.__block_1*> [#uses=1]
+ %5 = getelementptr %struct.__block_1, %struct.__block_1* %4, i32 0, i32 1 ; <%struct.NSZone**> [#uses=1]
+ %6 = load %struct.NSZone*, %struct.NSZone** %5, align 4 ; <%struct.NSZone*> [#uses=1]
store %struct.NSZone* %6, %struct.NSZone** %zone, align 4
- %7 = load %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"** %CHAIN.8_addr, align 4 ; <%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*> [#uses=1]
- %8 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
- %9 = load %struct.NSBitmapImageRep** %8, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
+ %7 = load %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*, %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"** %CHAIN.8_addr, align 4 ; <%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*> [#uses=1]
+ %8 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
+ %9 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %8, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
store %struct.NSBitmapImageRep* %9, %struct.NSBitmapImageRep** %0, align 4
- %10 = load %struct.NSBitmapImageRep** %0, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
+ %10 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %0, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
%11 = bitcast %struct.NSBitmapImageRep* %10 to %struct.objc_object* ; <%struct.objc_object*> [#uses=1]
- %12 = getelementptr %struct._objc_super* %objc_super, i32 0, i32 0 ; <%struct.objc_object**> [#uses=1]
+ %12 = getelementptr %struct._objc_super, %struct._objc_super* %objc_super, i32 0, i32 0 ; <%struct.objc_object**> [#uses=1]
store %struct.objc_object* %11, %struct.objc_object** %12, align 4
- %13 = load %struct._objc_class** getelementptr (%struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep", i32 0, i32 1), align 4 ; <%struct._objc_class*> [#uses=1]
- %14 = getelementptr %struct._objc_super* %objc_super, i32 0, i32 1 ; <%struct._objc_class**> [#uses=1]
+ %13 = load %struct._objc_class*, %struct._objc_class** getelementptr (%struct._objc_class, %struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep", i32 0, i32 1), align 4 ; <%struct._objc_class*> [#uses=1]
+ %14 = getelementptr %struct._objc_super, %struct._objc_super* %objc_super, i32 0, i32 1 ; <%struct._objc_class**> [#uses=1]
store %struct._objc_class* %13, %struct._objc_class** %14, align 4
%objc_super1 = bitcast %struct._objc_super* %objc_super to %struct.objc_super* ; <%struct.objc_super*> [#uses=1]
store %struct.objc_super* %objc_super1, %struct.objc_super** %objc_super.5, align 4
- %15 = load %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_0", align 4 ; <%struct.objc_selector*> [#uses=1]
- %16 = load %struct.objc_super** %objc_super.5, align 4 ; <%struct.objc_super*> [#uses=1]
- %17 = load %struct.NSZone** %zone, align 4 ; <%struct.NSZone*> [#uses=1]
- %18 = call %struct.objc_object* (%struct.objc_super*, %struct.objc_selector*, ...)* @objc_msgSendSuper(%struct.objc_super* %16, %struct.objc_selector* %15, %struct.NSZone* %17) nounwind ; <%struct.objc_object*> [#uses=1]
+ %15 = load %struct.objc_selector*, %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_0", align 4 ; <%struct.objc_selector*> [#uses=1]
+ %16 = load %struct.objc_super*, %struct.objc_super** %objc_super.5, align 4 ; <%struct.objc_super*> [#uses=1]
+ %17 = load %struct.NSZone*, %struct.NSZone** %zone, align 4 ; <%struct.NSZone*> [#uses=1]
+ %18 = call %struct.objc_object* (%struct.objc_super*, %struct.objc_selector*, ...) @objc_msgSendSuper(%struct.objc_super* %16, %struct.objc_selector* %15, %struct.NSZone* %17) nounwind ; <%struct.objc_object*> [#uses=1]
%19 = bitcast %struct.objc_object* %18 to %struct.NSBitmapImageRep* ; <%struct.NSBitmapImageRep*> [#uses=1]
- %20 = load %struct.NSBitmapImageRep*** %new, align 4 ; <%struct.NSBitmapImageRep**> [#uses=1]
+ %20 = load %struct.NSBitmapImageRep**, %struct.NSBitmapImageRep*** %new, align 4 ; <%struct.NSBitmapImageRep**> [#uses=1]
store %struct.NSBitmapImageRep* %19, %struct.NSBitmapImageRep** %20, align 4
br label %return
diff --git a/test/CodeGen/PowerPC/unal-altivec-wint.ll b/test/CodeGen/PowerPC/unal-altivec-wint.ll
index 7e0963f54b33..b71a98bc83bb 100644
--- a/test/CodeGen/PowerPC/unal-altivec-wint.ll
+++ b/test/CodeGen/PowerPC/unal-altivec-wint.ll
@@ -6,11 +6,11 @@ declare <4 x i32> @llvm.ppc.altivec.lvx(i8*) #1
define <4 x i32> @test1(<4 x i32>* %h) #0 {
entry:
- %h1 = getelementptr <4 x i32>* %h, i64 1
+ %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
%hv = bitcast <4 x i32>* %h1 to i8*
%vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv)
- %v0 = load <4 x i32>* %h, align 8
+ %v0 = load <4 x i32>, <4 x i32>* %h, align 8
%a = add <4 x i32> %v0, %vl
ret <4 x i32> %a
@@ -27,11 +27,11 @@ declare void @llvm.ppc.altivec.stvx(<4 x i32>, i8*) #0
define <4 x i32> @test2(<4 x i32>* %h, <4 x i32> %d) #0 {
entry:
- %h1 = getelementptr <4 x i32>* %h, i64 1
+ %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
%hv = bitcast <4 x i32>* %h1 to i8*
call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
- %v0 = load <4 x i32>* %h, align 8
+ %v0 = load <4 x i32>, <4 x i32>* %h, align 8
ret <4 x i32> %v0
diff --git a/test/CodeGen/PowerPC/unal-altivec.ll b/test/CodeGen/PowerPC/unal-altivec.ll
index 7f333a1c508b..02f7ab40f049 100644
--- a/test/CodeGen/PowerPC/unal-altivec.ll
+++ b/test/CodeGen/PowerPC/unal-altivec.ll
@@ -8,20 +8,20 @@ vector.ph:
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds float* %b, i64 %index
+ %0 = getelementptr inbounds float, float* %b, i64 %index
%1 = bitcast float* %0 to <4 x float>*
- %wide.load = load <4 x float>* %1, align 4
+ %wide.load = load <4 x float>, <4 x float>* %1, align 4
%.sum11 = or i64 %index, 4
- %2 = getelementptr float* %b, i64 %.sum11
+ %2 = getelementptr float, float* %b, i64 %.sum11
%3 = bitcast float* %2 to <4 x float>*
- %wide.load8 = load <4 x float>* %3, align 4
+ %wide.load8 = load <4 x float>, <4 x float>* %3, align 4
%4 = fadd <4 x float> %wide.load, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
%5 = fadd <4 x float> %wide.load8, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
- %6 = getelementptr inbounds float* %a, i64 %index
+ %6 = getelementptr inbounds float, float* %a, i64 %index
%7 = bitcast float* %6 to <4 x float>*
store <4 x float> %4, <4 x float>* %7, align 4
%.sum12 = or i64 %index, 4
- %8 = getelementptr float* %a, i64 %.sum12
+ %8 = getelementptr float, float* %a, i64 %.sum12
%9 = bitcast float* %8 to <4 x float>*
store <4 x float> %5, <4 x float>* %9, align 4
%index.next = add i64 %index, 8
diff --git a/test/CodeGen/PowerPC/unal-altivec2.ll b/test/CodeGen/PowerPC/unal-altivec2.ll
index 7464675470f9..0d15b977ca18 100644
--- a/test/CodeGen/PowerPC/unal-altivec2.ll
+++ b/test/CodeGen/PowerPC/unal-altivec2.ll
@@ -12,131 +12,131 @@ vector.body: ; preds = %vector.body, %entry
; CHECK: lvsl
; CHECK: blr
%index = phi i64 [ 0, %entry ], [ %index.next.15, %vector.body ]
- %0 = getelementptr inbounds float* %y, i64 %index
+ %0 = getelementptr inbounds float, float* %y, i64 %index
%1 = bitcast float* %0 to <4 x float>*
- %wide.load = load <4 x float>* %1, align 4
+ %wide.load = load <4 x float>, <4 x float>* %1, align 4
%2 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load)
- %3 = getelementptr inbounds float* %x, i64 %index
+ %3 = getelementptr inbounds float, float* %x, i64 %index
%4 = bitcast float* %3 to <4 x float>*
store <4 x float> %2, <4 x float>* %4, align 4
%index.next = add i64 %index, 4
- %5 = getelementptr inbounds float* %y, i64 %index.next
+ %5 = getelementptr inbounds float, float* %y, i64 %index.next
%6 = bitcast float* %5 to <4 x float>*
- %wide.load.1 = load <4 x float>* %6, align 4
+ %wide.load.1 = load <4 x float>, <4 x float>* %6, align 4
%7 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.1)
- %8 = getelementptr inbounds float* %x, i64 %index.next
+ %8 = getelementptr inbounds float, float* %x, i64 %index.next
%9 = bitcast float* %8 to <4 x float>*
store <4 x float> %7, <4 x float>* %9, align 4
%index.next.1 = add i64 %index.next, 4
- %10 = getelementptr inbounds float* %y, i64 %index.next.1
+ %10 = getelementptr inbounds float, float* %y, i64 %index.next.1
%11 = bitcast float* %10 to <4 x float>*
- %wide.load.2 = load <4 x float>* %11, align 4
+ %wide.load.2 = load <4 x float>, <4 x float>* %11, align 4
%12 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.2)
- %13 = getelementptr inbounds float* %x, i64 %index.next.1
+ %13 = getelementptr inbounds float, float* %x, i64 %index.next.1
%14 = bitcast float* %13 to <4 x float>*
store <4 x float> %12, <4 x float>* %14, align 4
%index.next.2 = add i64 %index.next.1, 4
- %15 = getelementptr inbounds float* %y, i64 %index.next.2
+ %15 = getelementptr inbounds float, float* %y, i64 %index.next.2
%16 = bitcast float* %15 to <4 x float>*
- %wide.load.3 = load <4 x float>* %16, align 4
+ %wide.load.3 = load <4 x float>, <4 x float>* %16, align 4
%17 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.3)
- %18 = getelementptr inbounds float* %x, i64 %index.next.2
+ %18 = getelementptr inbounds float, float* %x, i64 %index.next.2
%19 = bitcast float* %18 to <4 x float>*
store <4 x float> %17, <4 x float>* %19, align 4
%index.next.3 = add i64 %index.next.2, 4
- %20 = getelementptr inbounds float* %y, i64 %index.next.3
+ %20 = getelementptr inbounds float, float* %y, i64 %index.next.3
%21 = bitcast float* %20 to <4 x float>*
- %wide.load.4 = load <4 x float>* %21, align 4
+ %wide.load.4 = load <4 x float>, <4 x float>* %21, align 4
%22 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.4)
- %23 = getelementptr inbounds float* %x, i64 %index.next.3
+ %23 = getelementptr inbounds float, float* %x, i64 %index.next.3
%24 = bitcast float* %23 to <4 x float>*
store <4 x float> %22, <4 x float>* %24, align 4
%index.next.4 = add i64 %index.next.3, 4
- %25 = getelementptr inbounds float* %y, i64 %index.next.4
+ %25 = getelementptr inbounds float, float* %y, i64 %index.next.4
%26 = bitcast float* %25 to <4 x float>*
- %wide.load.5 = load <4 x float>* %26, align 4
+ %wide.load.5 = load <4 x float>, <4 x float>* %26, align 4
%27 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.5)
- %28 = getelementptr inbounds float* %x, i64 %index.next.4
+ %28 = getelementptr inbounds float, float* %x, i64 %index.next.4
%29 = bitcast float* %28 to <4 x float>*
store <4 x float> %27, <4 x float>* %29, align 4
%index.next.5 = add i64 %index.next.4, 4
- %30 = getelementptr inbounds float* %y, i64 %index.next.5
+ %30 = getelementptr inbounds float, float* %y, i64 %index.next.5
%31 = bitcast float* %30 to <4 x float>*
- %wide.load.6 = load <4 x float>* %31, align 4
+ %wide.load.6 = load <4 x float>, <4 x float>* %31, align 4
%32 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.6)
- %33 = getelementptr inbounds float* %x, i64 %index.next.5
+ %33 = getelementptr inbounds float, float* %x, i64 %index.next.5
%34 = bitcast float* %33 to <4 x float>*
store <4 x float> %32, <4 x float>* %34, align 4
%index.next.6 = add i64 %index.next.5, 4
- %35 = getelementptr inbounds float* %y, i64 %index.next.6
+ %35 = getelementptr inbounds float, float* %y, i64 %index.next.6
%36 = bitcast float* %35 to <4 x float>*
- %wide.load.7 = load <4 x float>* %36, align 4
+ %wide.load.7 = load <4 x float>, <4 x float>* %36, align 4
%37 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.7)
- %38 = getelementptr inbounds float* %x, i64 %index.next.6
+ %38 = getelementptr inbounds float, float* %x, i64 %index.next.6
%39 = bitcast float* %38 to <4 x float>*
store <4 x float> %37, <4 x float>* %39, align 4
%index.next.7 = add i64 %index.next.6, 4
- %40 = getelementptr inbounds float* %y, i64 %index.next.7
+ %40 = getelementptr inbounds float, float* %y, i64 %index.next.7
%41 = bitcast float* %40 to <4 x float>*
- %wide.load.8 = load <4 x float>* %41, align 4
+ %wide.load.8 = load <4 x float>, <4 x float>* %41, align 4
%42 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.8)
- %43 = getelementptr inbounds float* %x, i64 %index.next.7
+ %43 = getelementptr inbounds float, float* %x, i64 %index.next.7
%44 = bitcast float* %43 to <4 x float>*
store <4 x float> %42, <4 x float>* %44, align 4
%index.next.8 = add i64 %index.next.7, 4
- %45 = getelementptr inbounds float* %y, i64 %index.next.8
+ %45 = getelementptr inbounds float, float* %y, i64 %index.next.8
%46 = bitcast float* %45 to <4 x float>*
- %wide.load.9 = load <4 x float>* %46, align 4
+ %wide.load.9 = load <4 x float>, <4 x float>* %46, align 4
%47 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.9)
- %48 = getelementptr inbounds float* %x, i64 %index.next.8
+ %48 = getelementptr inbounds float, float* %x, i64 %index.next.8
%49 = bitcast float* %48 to <4 x float>*
store <4 x float> %47, <4 x float>* %49, align 4
%index.next.9 = add i64 %index.next.8, 4
- %50 = getelementptr inbounds float* %y, i64 %index.next.9
+ %50 = getelementptr inbounds float, float* %y, i64 %index.next.9
%51 = bitcast float* %50 to <4 x float>*
- %wide.load.10 = load <4 x float>* %51, align 4
+ %wide.load.10 = load <4 x float>, <4 x float>* %51, align 4
%52 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.10)
- %53 = getelementptr inbounds float* %x, i64 %index.next.9
+ %53 = getelementptr inbounds float, float* %x, i64 %index.next.9
%54 = bitcast float* %53 to <4 x float>*
store <4 x float> %52, <4 x float>* %54, align 4
%index.next.10 = add i64 %index.next.9, 4
- %55 = getelementptr inbounds float* %y, i64 %index.next.10
+ %55 = getelementptr inbounds float, float* %y, i64 %index.next.10
%56 = bitcast float* %55 to <4 x float>*
- %wide.load.11 = load <4 x float>* %56, align 4
+ %wide.load.11 = load <4 x float>, <4 x float>* %56, align 4
%57 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.11)
- %58 = getelementptr inbounds float* %x, i64 %index.next.10
+ %58 = getelementptr inbounds float, float* %x, i64 %index.next.10
%59 = bitcast float* %58 to <4 x float>*
store <4 x float> %57, <4 x float>* %59, align 4
%index.next.11 = add i64 %index.next.10, 4
- %60 = getelementptr inbounds float* %y, i64 %index.next.11
+ %60 = getelementptr inbounds float, float* %y, i64 %index.next.11
%61 = bitcast float* %60 to <4 x float>*
- %wide.load.12 = load <4 x float>* %61, align 4
+ %wide.load.12 = load <4 x float>, <4 x float>* %61, align 4
%62 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.12)
- %63 = getelementptr inbounds float* %x, i64 %index.next.11
+ %63 = getelementptr inbounds float, float* %x, i64 %index.next.11
%64 = bitcast float* %63 to <4 x float>*
store <4 x float> %62, <4 x float>* %64, align 4
%index.next.12 = add i64 %index.next.11, 4
- %65 = getelementptr inbounds float* %y, i64 %index.next.12
+ %65 = getelementptr inbounds float, float* %y, i64 %index.next.12
%66 = bitcast float* %65 to <4 x float>*
- %wide.load.13 = load <4 x float>* %66, align 4
+ %wide.load.13 = load <4 x float>, <4 x float>* %66, align 4
%67 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.13)
- %68 = getelementptr inbounds float* %x, i64 %index.next.12
+ %68 = getelementptr inbounds float, float* %x, i64 %index.next.12
%69 = bitcast float* %68 to <4 x float>*
store <4 x float> %67, <4 x float>* %69, align 4
%index.next.13 = add i64 %index.next.12, 4
- %70 = getelementptr inbounds float* %y, i64 %index.next.13
+ %70 = getelementptr inbounds float, float* %y, i64 %index.next.13
%71 = bitcast float* %70 to <4 x float>*
- %wide.load.14 = load <4 x float>* %71, align 4
+ %wide.load.14 = load <4 x float>, <4 x float>* %71, align 4
%72 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.14)
- %73 = getelementptr inbounds float* %x, i64 %index.next.13
+ %73 = getelementptr inbounds float, float* %x, i64 %index.next.13
%74 = bitcast float* %73 to <4 x float>*
store <4 x float> %72, <4 x float>* %74, align 4
%index.next.14 = add i64 %index.next.13, 4
- %75 = getelementptr inbounds float* %y, i64 %index.next.14
+ %75 = getelementptr inbounds float, float* %y, i64 %index.next.14
%76 = bitcast float* %75 to <4 x float>*
- %wide.load.15 = load <4 x float>* %76, align 4
+ %wide.load.15 = load <4 x float>, <4 x float>* %76, align 4
%77 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.15)
- %78 = getelementptr inbounds float* %x, i64 %index.next.14
+ %78 = getelementptr inbounds float, float* %x, i64 %index.next.14
%79 = bitcast float* %78 to <4 x float>*
store <4 x float> %77, <4 x float>* %79, align 4
%index.next.15 = add i64 %index.next.14, 4
@@ -153,7 +153,7 @@ declare <4 x float> @llvm_cos_v4f32(<4 x float>) #1
define <2 x double> @bar(double* %x) {
entry:
%p = bitcast double* %x to <2 x double>*
- %r = load <2 x double>* %p, align 8
+ %r = load <2 x double>, <2 x double>* %p, align 8
; CHECK-LABEL: @bar
; CHECK-NOT: lvsl
diff --git a/test/CodeGen/PowerPC/unaligned.ll b/test/CodeGen/PowerPC/unaligned.ll
index 64c03cdda35e..6b23b18762d3 100644
--- a/test/CodeGen/PowerPC/unaligned.ll
+++ b/test/CodeGen/PowerPC/unaligned.ll
@@ -5,7 +5,7 @@ target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
define void @foo1(i16* %p, i16* %r) nounwind {
entry:
- %v = load i16* %p, align 1
+ %v = load i16, i16* %p, align 1
store i16 %v, i16* %r, align 1
ret void
@@ -20,7 +20,7 @@ entry:
define void @foo2(i32* %p, i32* %r) nounwind {
entry:
- %v = load i32* %p, align 1
+ %v = load i32, i32* %p, align 1
store i32 %v, i32* %r, align 1
ret void
@@ -35,7 +35,7 @@ entry:
define void @foo3(i64* %p, i64* %r) nounwind {
entry:
- %v = load i64* %p, align 1
+ %v = load i64, i64* %p, align 1
store i64 %v, i64* %r, align 1
ret void
@@ -50,7 +50,7 @@ entry:
define void @foo4(float* %p, float* %r) nounwind {
entry:
- %v = load float* %p, align 1
+ %v = load float, float* %p, align 1
store float %v, float* %r, align 1
ret void
@@ -65,7 +65,7 @@ entry:
define void @foo5(double* %p, double* %r) nounwind {
entry:
- %v = load double* %p, align 1
+ %v = load double, double* %p, align 1
store double %v, double* %r, align 1
ret void
@@ -80,7 +80,7 @@ entry:
define void @foo6(<4 x float>* %p, <4 x float>* %r) nounwind {
entry:
- %v = load <4 x float>* %p, align 1
+ %v = load <4 x float>, <4 x float>* %p, align 1
store <4 x float> %v, <4 x float>* %r, align 1
ret void
diff --git a/test/CodeGen/PowerPC/unwind-dw2-g.ll b/test/CodeGen/PowerPC/unwind-dw2-g.ll
index 4ae6ff24a038..8bd158867c79 100644
--- a/test/CodeGen/PowerPC/unwind-dw2-g.ll
+++ b/test/CodeGen/PowerPC/unwind-dw2-g.ll
@@ -21,15 +21,15 @@ attributes #0 = { nounwind }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!8, !11}
-!0 = !{!"0x11\0012\00clang version 3.4\000\00\000\00\000", !1, !2, !2, !3, !2, !2} ; [ DW_TAG_compile_unit ] [/tmp/unwind-dw2.c] [DW_LANG_C99]
-!1 = !{!"/tmp/unwind-dw2.c", !"/tmp"}
+!0 = !DICompileUnit(language: DW_LANG_C99, producer: "clang version 3.4", isOptimized: false, emissionKind: 0, file: !1, enums: !2, retainedTypes: !2, subprograms: !3, globals: !2, imports: !2)
+!1 = !DIFile(filename: "/tmp/unwind-dw2.c", directory: "/tmp")
!2 = !{}
!3 = !{!4}
-!4 = !{!"0x2e\00foo\00foo\00\001\000\001\000\006\000\000\001", !1, !5, !6, null, void ()* @foo, null, null, !2} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
-!5 = !{!"0x29", !1} ; [ DW_TAG_file_type ] [/tmp/unwind-dw2.c]
-!6 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !DISubprogram(name: "foo", line: 1, isLocal: false, isDefinition: true, virtualIndex: 6, isOptimized: false, scopeLine: 1, file: !1, scope: !5, type: !6, function: void ()* @foo, variables: !2)
+!5 = !DIFile(filename: "/tmp/unwind-dw2.c", directory: "/tmp")
+!6 = !DISubroutineType(types: !7)
!7 = !{null}
!8 = !{i32 2, !"Dwarf Version", i32 3}
-!9 = !MDLocation(line: 2, scope: !4)
-!10 = !MDLocation(line: 3, scope: !4)
-!11 = !{i32 1, !"Debug Info Version", i32 2}
+!9 = !DILocation(line: 2, scope: !4)
+!10 = !DILocation(line: 3, scope: !4)
+!11 = !{i32 1, !"Debug Info Version", i32 3}
diff --git a/test/CodeGen/PowerPC/vaddsplat.ll b/test/CodeGen/PowerPC/vaddsplat.ll
index 4236fabea0a6..70a7ea0c5533 100644
--- a/test/CodeGen/PowerPC/vaddsplat.ll
+++ b/test/CodeGen/PowerPC/vaddsplat.ll
@@ -10,7 +10,7 @@ target triple = "powerpc64-unknown-linux-gnu"
%v16i8 = type <16 x i8>
define void @test_v4i32_pos_even(%v4i32* %P, %v4i32* %S) {
- %p = load %v4i32* %P
+ %p = load %v4i32, %v4i32* %P
%r = add %v4i32 %p, < i32 18, i32 18, i32 18, i32 18 >
store %v4i32 %r, %v4i32* %S
ret void
@@ -21,7 +21,7 @@ define void @test_v4i32_pos_even(%v4i32* %P, %v4i32* %S) {
; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG1]]
define void @test_v4i32_neg_even(%v4i32* %P, %v4i32* %S) {
- %p = load %v4i32* %P
+ %p = load %v4i32, %v4i32* %P
%r = add %v4i32 %p, < i32 -28, i32 -28, i32 -28, i32 -28 >
store %v4i32 %r, %v4i32* %S
ret void
@@ -32,7 +32,7 @@ define void @test_v4i32_neg_even(%v4i32* %P, %v4i32* %S) {
; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG1]]
define void @test_v8i16_pos_even(%v8i16* %P, %v8i16* %S) {
- %p = load %v8i16* %P
+ %p = load %v8i16, %v8i16* %P
%r = add %v8i16 %p, < i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30 >
store %v8i16 %r, %v8i16* %S
ret void
@@ -43,7 +43,7 @@ define void @test_v8i16_pos_even(%v8i16* %P, %v8i16* %S) {
; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG1]]
define void @test_v8i16_neg_even(%v8i16* %P, %v8i16* %S) {
- %p = load %v8i16* %P
+ %p = load %v8i16, %v8i16* %P
%r = add %v8i16 %p, < i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32 >
store %v8i16 %r, %v8i16* %S
ret void
@@ -54,7 +54,7 @@ define void @test_v8i16_neg_even(%v8i16* %P, %v8i16* %S) {
; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG1]]
define void @test_v16i8_pos_even(%v16i8* %P, %v16i8* %S) {
- %p = load %v16i8* %P
+ %p = load %v16i8, %v16i8* %P
%r = add %v16i8 %p, < i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16 >
store %v16i8 %r, %v16i8* %S
ret void
@@ -65,7 +65,7 @@ define void @test_v16i8_pos_even(%v16i8* %P, %v16i8* %S) {
; CHECK: vaddubm {{[0-9]+}}, [[REG1]], [[REG1]]
define void @test_v16i8_neg_even(%v16i8* %P, %v16i8* %S) {
- %p = load %v16i8* %P
+ %p = load %v16i8, %v16i8* %P
%r = add %v16i8 %p, < i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18 >
store %v16i8 %r, %v16i8* %S
ret void
@@ -76,7 +76,7 @@ define void @test_v16i8_neg_even(%v16i8* %P, %v16i8* %S) {
; CHECK: vaddubm {{[0-9]+}}, [[REG1]], [[REG1]]
define void @test_v4i32_pos_odd(%v4i32* %P, %v4i32* %S) {
- %p = load %v4i32* %P
+ %p = load %v4i32, %v4i32* %P
%r = add %v4i32 %p, < i32 27, i32 27, i32 27, i32 27 >
store %v4i32 %r, %v4i32* %S
ret void
@@ -88,7 +88,7 @@ define void @test_v4i32_pos_odd(%v4i32* %P, %v4i32* %S) {
; CHECK: vsubuwm {{[0-9]+}}, [[REG1]], [[REG2]]
define void @test_v4i32_neg_odd(%v4i32* %P, %v4i32* %S) {
- %p = load %v4i32* %P
+ %p = load %v4i32, %v4i32* %P
%r = add %v4i32 %p, < i32 -27, i32 -27, i32 -27, i32 -27 >
store %v4i32 %r, %v4i32* %S
ret void
@@ -100,7 +100,7 @@ define void @test_v4i32_neg_odd(%v4i32* %P, %v4i32* %S) {
; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG2]]
define void @test_v8i16_pos_odd(%v8i16* %P, %v8i16* %S) {
- %p = load %v8i16* %P
+ %p = load %v8i16, %v8i16* %P
%r = add %v8i16 %p, < i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31 >
store %v8i16 %r, %v8i16* %S
ret void
@@ -112,7 +112,7 @@ define void @test_v8i16_pos_odd(%v8i16* %P, %v8i16* %S) {
; CHECK: vsubuhm {{[0-9]+}}, [[REG1]], [[REG2]]
define void @test_v8i16_neg_odd(%v8i16* %P, %v8i16* %S) {
- %p = load %v8i16* %P
+ %p = load %v8i16, %v8i16* %P
%r = add %v8i16 %p, < i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31 >
store %v8i16 %r, %v8i16* %S
ret void
@@ -124,7 +124,7 @@ define void @test_v8i16_neg_odd(%v8i16* %P, %v8i16* %S) {
; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG2]]
define void @test_v16i8_pos_odd(%v16i8* %P, %v16i8* %S) {
- %p = load %v16i8* %P
+ %p = load %v16i8, %v16i8* %P
%r = add %v16i8 %p, < i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17 >
store %v16i8 %r, %v16i8* %S
ret void
@@ -136,7 +136,7 @@ define void @test_v16i8_pos_odd(%v16i8* %P, %v16i8* %S) {
; CHECK: vsububm {{[0-9]+}}, [[REG1]], [[REG2]]
define void @test_v16i8_neg_odd(%v16i8* %P, %v16i8* %S) {
- %p = load %v16i8* %P
+ %p = load %v16i8, %v16i8* %P
%r = add %v16i8 %p, < i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17 >
store %v16i8 %r, %v16i8* %S
ret void
diff --git a/test/CodeGen/PowerPC/varargs-struct-float.ll b/test/CodeGen/PowerPC/varargs-struct-float.ll
index 0fd9fc50892e..7bb5a3444cf5 100644
--- a/test/CodeGen/PowerPC/varargs-struct-float.ll
+++ b/test/CodeGen/PowerPC/varargs-struct-float.ll
@@ -8,11 +8,11 @@ target triple = "powerpc64-unknown-linux-gnu"
define void @foo(float inreg %s.coerce) nounwind {
entry:
%s = alloca %struct.Sf1, align 4
- %coerce.dive = getelementptr %struct.Sf1* %s, i32 0, i32 0
+ %coerce.dive = getelementptr %struct.Sf1, %struct.Sf1* %s, i32 0, i32 0
store float %s.coerce, float* %coerce.dive, align 1
- %coerce.dive1 = getelementptr %struct.Sf1* %s, i32 0, i32 0
- %0 = load float* %coerce.dive1, align 1
- call void (i32, ...)* @testvaSf1(i32 1, float inreg %0)
+ %coerce.dive1 = getelementptr %struct.Sf1, %struct.Sf1* %s, i32 0, i32 0
+ %0 = load float, float* %coerce.dive1, align 1
+ call void (i32, ...) @testvaSf1(i32 1, float inreg %0)
ret void
}
diff --git a/test/CodeGen/PowerPC/vcmp-fold.ll b/test/CodeGen/PowerPC/vcmp-fold.ll
index 7a42c27d2b4a..ee167083d4d6 100644
--- a/test/CodeGen/PowerPC/vcmp-fold.ll
+++ b/test/CodeGen/PowerPC/vcmp-fold.ll
@@ -5,11 +5,11 @@
define void @test(<4 x float>* %x, <4 x float>* %y, i32* %P) {
entry:
- %tmp = load <4 x float>* %x ; <<4 x float>> [#uses=1]
- %tmp2 = load <4 x float>* %y ; <<4 x float>> [#uses=1]
+ %tmp = load <4 x float>, <4 x float>* %x ; <<4 x float>> [#uses=1]
+ %tmp2 = load <4 x float>, <4 x float>* %y ; <<4 x float>> [#uses=1]
%tmp.upgrd.1 = call i32 @llvm.ppc.altivec.vcmpbfp.p( i32 1, <4 x float> %tmp, <4 x float> %tmp2 ) ; <i32> [#uses=1]
- %tmp4 = load <4 x float>* %x ; <<4 x float>> [#uses=1]
- %tmp6 = load <4 x float>* %y ; <<4 x float>> [#uses=1]
+ %tmp4 = load <4 x float>, <4 x float>* %x ; <<4 x float>> [#uses=1]
+ %tmp6 = load <4 x float>, <4 x float>* %y ; <<4 x float>> [#uses=1]
%tmp.upgrd.2 = call <4 x i32> @llvm.ppc.altivec.vcmpbfp( <4 x float> %tmp4, <4 x float> %tmp6 ) ; <<4 x i32>> [#uses=1]
%tmp7 = bitcast <4 x i32> %tmp.upgrd.2 to <4 x float> ; <<4 x float>> [#uses=1]
store <4 x float> %tmp7, <4 x float>* %x
diff --git a/test/CodeGen/PowerPC/vec-abi-align.ll b/test/CodeGen/PowerPC/vec-abi-align.ll
index 5075ff2b8c07..48f1adbe5e59 100644
--- a/test/CodeGen/PowerPC/vec-abi-align.ll
+++ b/test/CodeGen/PowerPC/vec-abi-align.ll
@@ -26,52 +26,52 @@ entry:
; Function Attrs: nounwind
define void @test2(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, %struct.s2* byval nocapture readonly %vs) #0 {
entry:
- %m = getelementptr inbounds %struct.s2* %vs, i64 0, i32 0
- %0 = load i64* %m, align 8
+ %m = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 0
+ %0 = load i64, i64* %m, align 8
store i64 %0, i64* @n, align 8
- %v = getelementptr inbounds %struct.s2* %vs, i64 0, i32 1
- %1 = load <4 x float>* %v, align 16
+ %v = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 1
+ %1 = load <4 x float>, <4 x float>* %v, align 16
store <4 x float> %1, <4 x float>* @ve, align 16
ret void
; CHECK-LABEL: @test2
-; CHECK: ld {{[0-9]+}}, 112(1)
-; CHECK: li [[REG16:[0-9]+]], 16
-; CHECK: addi [[REGB:[0-9]+]], 1, 112
-; CHECK: lvx 2, [[REGB]], [[REG16]]
+; CHECK-DAG: ld {{[0-9]+}}, 112(1)
+; CHECK-DAG: li [[REG16:[0-9]+]], 16
+; CHECK-DAG: addi [[REGB:[0-9]+]], 1, 112
+; CHECK-DAG: lvx 2, [[REGB]], [[REG16]]
; CHECK: blr
; CHECK-VSX-LABEL: @test2
-; CHECK-VSX: ld {{[0-9]+}}, 112(1)
-; CHECK-VSX: li [[REG16:[0-9]+]], 16
-; CHECK-VSX: addi [[REGB:[0-9]+]], 1, 112
-; CHECK-VSX: lxvw4x {{[0-9]+}}, [[REGB]], [[REG16]]
+; CHECK-VSX-DAG: ld {{[0-9]+}}, 112(1)
+; CHECK-VSX-DAG: li [[REG16:[0-9]+]], 16
+; CHECK-VSX-DAG: addi [[REGB:[0-9]+]], 1, 112
+; CHECK-VSX-DAG: lxvw4x {{[0-9]+}}, [[REGB]], [[REG16]]
; CHECK-VSX: blr
}
; Function Attrs: nounwind
define void @test3(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, i64 %d9, %struct.s2* byval nocapture readonly %vs) #0 {
entry:
- %m = getelementptr inbounds %struct.s2* %vs, i64 0, i32 0
- %0 = load i64* %m, align 8
+ %m = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 0
+ %0 = load i64, i64* %m, align 8
store i64 %0, i64* @n, align 8
- %v = getelementptr inbounds %struct.s2* %vs, i64 0, i32 1
- %1 = load <4 x float>* %v, align 16
+ %v = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 1
+ %1 = load <4 x float>, <4 x float>* %v, align 16
store <4 x float> %1, <4 x float>* @ve, align 16
ret void
; CHECK-LABEL: @test3
-; CHECK: ld {{[0-9]+}}, 128(1)
-; CHECK: li [[REG16:[0-9]+]], 16
-; CHECK: addi [[REGB:[0-9]+]], 1, 128
-; CHECK: lvx 2, [[REGB]], [[REG16]]
+; CHECK-DAG: ld {{[0-9]+}}, 128(1)
+; CHECK-DAG: li [[REG16:[0-9]+]], 16
+; CHECK-DAG: addi [[REGB:[0-9]+]], 1, 128
+; CHECK-DAG: lvx 2, [[REGB]], [[REG16]]
; CHECK: blr
; CHECK-VSX-LABEL: @test3
-; CHECK-VSX: ld {{[0-9]+}}, 128(1)
-; CHECK-VSX: li [[REG16:[0-9]+]], 16
-; CHECK-VSX: addi [[REGB:[0-9]+]], 1, 128
-; CHECK-VSX: lxvw4x {{[0-9]+}}, [[REGB]], [[REG16]]
+; CHECK-VSX-DAG: ld {{[0-9]+}}, 128(1)
+; CHECK-VSX-DAG: li [[REG16:[0-9]+]], 16
+; CHECK-VSX-DAG: addi [[REGB:[0-9]+]], 1, 128
+; CHECK-VSX-DAG: lxvw4x {{[0-9]+}}, [[REGB]], [[REG16]]
; CHECK-VSX: blr
}
diff --git a/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll b/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll
new file mode 100644
index 000000000000..6b41141163ad
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll
@@ -0,0 +1,62 @@
+; Check VMX 64-bit integer operations
+;
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
+
+define <2 x i64> @test_add(<2 x i64> %x, <2 x i64> %y) nounwind {
+ %result = add <2 x i64> %x, %y
+ ret <2 x i64> %result
+; CHECK: vaddudm 2, 2, 3
+}
+
+define <2 x i64> @increment_by_one(<2 x i64> %x) nounwind {
+ %result = add <2 x i64> %x, <i64 1, i64 1>
+ ret <2 x i64> %result
+; CHECK: vaddudm 2, 2, 3
+}
+
+define <2 x i64> @increment_by_val(<2 x i64> %x, i64 %val) nounwind {
+ %tmpvec = insertelement <2 x i64> <i64 0, i64 0>, i64 %val, i32 0
+ %tmpvec2 = insertelement <2 x i64> %tmpvec, i64 %val, i32 1
+ %result = add <2 x i64> %x, %tmpvec2
+ ret <2 x i64> %result
+; CHECK: vaddudm 2, 2, 3
+; FIXME: This is currently generating the following instruction sequence
+;
+; std 5, -8(1)
+; std 5, -16(1)
+; addi 3, 1, -16
+; ori 2, 2, 0
+; lxvd2x 35, 0, 3
+; vaddudm 2, 2, 3
+; blr
+;
+; This will almost certainly cause a load-hit-store hazard.
+; Since val is a value parameter, it should not need to be
+; saved onto the stack at all (unless we're using this to set
+; up the vector register). Instead, it would be better to splat
+; the value into a vector register.
+}
+
+define <2 x i64> @test_sub(<2 x i64> %x, <2 x i64> %y) nounwind {
+ %result = sub <2 x i64> %x, %y
+ ret <2 x i64> %result
+; CHECK: vsubudm 2, 2, 3
+}
+
+define <2 x i64> @decrement_by_one(<2 x i64> %x) nounwind {
+ %result = sub <2 x i64> %x, <i64 -1, i64 -1>
+ ret <2 x i64> %result
+; CHECK: vsubudm 2, 2, 3
+}
+
+define <2 x i64> @decrement_by_val(<2 x i64> %x, i64 %val) nounwind {
+ %tmpvec = insertelement <2 x i64> <i64 0, i64 0>, i64 %val, i32 0
+ %tmpvec2 = insertelement <2 x i64> %tmpvec, i64 %val, i32 1
+ %result = sub <2 x i64> %x, %tmpvec2
+ ret <2 x i64> %result
+; CHECK: vsubudm 2, 2, 3
+}
+
+
+
diff --git a/test/CodeGen/PowerPC/vec_add_sub_quadword.ll b/test/CodeGen/PowerPC/vec_add_sub_quadword.ll
new file mode 100644
index 000000000000..f7ebf479755c
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_add_sub_quadword.ll
@@ -0,0 +1,130 @@
+; Check VMX 128-bit integer operations
+;
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
+
+define <1 x i128> @test_add(<1 x i128> %x, <1 x i128> %y) nounwind {
+ %result = add <1 x i128> %x, %y
+ ret <1 x i128> %result
+; CHECK-LABEL: @test_add
+; CHECK: vadduqm 2, 2, 3
+}
+
+define <1 x i128> @increment_by_one(<1 x i128> %x) nounwind {
+ %result = add <1 x i128> %x, <i128 1>
+ ret <1 x i128> %result
+; CHECK-LABEL: @increment_by_one
+; CHECK vadduqm 2, 2, 3
+}
+
+define <1 x i128> @increment_by_val(<1 x i128> %x, i128 %val) nounwind {
+ %tmpvec = insertelement <1 x i128> <i128 0>, i128 %val, i32 0
+ %tmpvec2 = insertelement <1 x i128> %tmpvec, i128 %val, i32 1
+ %result = add <1 x i128> %x, %tmpvec2
+ ret <1 x i128> %result
+; CHECK-LABEL: @increment_by_val
+; CHECK: vadduqm 2, 2, 3
+}
+
+define <1 x i128> @test_sub(<1 x i128> %x, <1 x i128> %y) nounwind {
+ %result = sub <1 x i128> %x, %y
+ ret <1 x i128> %result
+; CHECK-LABEL: @test_sub
+; CHECK: vsubuqm 2, 2, 3
+}
+
+define <1 x i128> @decrement_by_one(<1 x i128> %x) nounwind {
+ %result = sub <1 x i128> %x, <i128 1>
+ ret <1 x i128> %result
+; CHECK-LABEL: @decrement_by_one
+; CHECK vsubuqm 2, 2, 3
+}
+
+define <1 x i128> @decrement_by_val(<1 x i128> %x, i128 %val) nounwind {
+ %tmpvec = insertelement <1 x i128> <i128 0>, i128 %val, i32 0
+ %tmpvec2 = insertelement <1 x i128> %tmpvec, i128 %val, i32 1
+ %result = sub <1 x i128> %x, %tmpvec2
+ ret <1 x i128> %result
+; CHECK-LABEL: @decrement_by_val
+; CHECK vsubuqm 2, 2, 3
+}
+
+declare <1 x i128> @llvm.ppc.altivec.vaddeuqm(<1 x i128> %x,
+ <1 x i128> %y,
+ <1 x i128> %z) nounwind readnone
+declare <1 x i128> @llvm.ppc.altivec.vaddcuq(<1 x i128> %x,
+ <1 x i128> %y) nounwind readnone
+declare <1 x i128> @llvm.ppc.altivec.vaddecuq(<1 x i128> %x,
+ <1 x i128> %y,
+ <1 x i128> %z) nounwind readnone
+declare <1 x i128> @llvm.ppc.altivec.vsubeuqm(<1 x i128> %x,
+ <1 x i128> %y,
+ <1 x i128> %z) nounwind readnone
+declare <1 x i128> @llvm.ppc.altivec.vsubcuq(<1 x i128> %x,
+ <1 x i128> %y) nounwind readnone
+declare <1 x i128> @llvm.ppc.altivec.vsubecuq(<1 x i128> %x,
+ <1 x i128> %y,
+ <1 x i128> %z) nounwind readnone
+
+define <1 x i128> @test_vaddeuqm(<1 x i128> %x,
+ <1 x i128> %y,
+ <1 x i128> %z) nounwind {
+ %tmp = tail call <1 x i128> @llvm.ppc.altivec.vaddeuqm(<1 x i128> %x,
+ <1 x i128> %y,
+ <1 x i128> %z)
+ ret <1 x i128> %tmp
+; CHECK-LABEL: @test_vaddeuqm
+; CHECK: vaddeuqm 2, 2, 3, 4
+}
+
+define <1 x i128> @test_vaddcuq(<1 x i128> %x,
+ <1 x i128> %y) nounwind {
+ %tmp = tail call <1 x i128> @llvm.ppc.altivec.vaddcuq(<1 x i128> %x,
+ <1 x i128> %y)
+ ret <1 x i128> %tmp
+; CHECK-LABEL: @test_vaddcuq
+; CHECK: vaddcuq 2, 2, 3
+}
+
+define <1 x i128> @test_vaddecuq(<1 x i128> %x,
+ <1 x i128> %y,
+ <1 x i128> %z) nounwind {
+ %tmp = tail call <1 x i128> @llvm.ppc.altivec.vaddecuq(<1 x i128> %x,
+ <1 x i128> %y,
+ <1 x i128> %z)
+ ret <1 x i128> %tmp
+; CHECK-LABEL: @test_vaddecuq
+; CHECK: vaddecuq 2, 2, 3, 4
+}
+
+define <1 x i128> @test_vsubeuqm(<1 x i128> %x,
+ <1 x i128> %y,
+ <1 x i128> %z) nounwind {
+ %tmp = tail call <1 x i128> @llvm.ppc.altivec.vsubeuqm(<1 x i128> %x,
+ <1 x i128> %y,
+ <1 x i128> %z)
+ ret <1 x i128> %tmp
+; CHECK-LABEL: test_vsubeuqm
+; CHECK: vsubeuqm 2, 2, 3, 4
+}
+
+define <1 x i128> @test_vsubcuq(<1 x i128> %x,
+ <1 x i128> %y) nounwind {
+ %tmp = tail call <1 x i128> @llvm.ppc.altivec.vsubcuq(<1 x i128> %x,
+ <1 x i128> %y)
+ ret <1 x i128> %tmp
+; CHECK-LABEL: test_vsubcuq
+; CHECK: vsubcuq 2, 2, 3
+}
+
+define <1 x i128> @test_vsubecuq(<1 x i128> %x,
+ <1 x i128> %y,
+ <1 x i128> %z) nounwind {
+ %tmp = tail call <1 x i128> @llvm.ppc.altivec.vsubecuq(<1 x i128> %x,
+ <1 x i128> %y,
+ <1 x i128> %z)
+ ret <1 x i128> %tmp
+; CHECK-LABEL: test_vsubecuq
+; CHECK: vsubecuq 2, 2, 3, 4
+}
+
diff --git a/test/CodeGen/PowerPC/vec_auto_constant.ll b/test/CodeGen/PowerPC/vec_auto_constant.ll
index 973f0890b139..ba8ef531014f 100644
--- a/test/CodeGen/PowerPC/vec_auto_constant.ll
+++ b/test/CodeGen/PowerPC/vec_auto_constant.ll
@@ -25,8 +25,8 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store <16 x i8> %x, <16 x i8>* %x_addr
store <16 x i8> <i8 22, i8 21, i8 20, i8 3, i8 25, i8 24, i8 23, i8 3, i8 28, i8 27, i8 26, i8 3, i8 31, i8 30, i8 29, i8 3>, <16 x i8>* %temp, align 16
- %0 = load <16 x i8>* %x_addr, align 16 ; <<16 x i8>> [#uses=1]
- %1 = load <16 x i8>* %temp, align 16 ; <<16 x i8>> [#uses=1]
+ %0 = load <16 x i8>, <16 x i8>* %x_addr, align 16 ; <<16 x i8>> [#uses=1]
+ %1 = load <16 x i8>, <16 x i8>* %temp, align 16 ; <<16 x i8>> [#uses=1]
%tmp = add <16 x i8> %0, %1 ; <<16 x i8>> [#uses=1]
store <16 x i8> %tmp, <16 x i8>* @baz, align 16
br label %return
diff --git a/test/CodeGen/PowerPC/vec_br_cmp.ll b/test/CodeGen/PowerPC/vec_br_cmp.ll
index c34d850c0ac7..14c9620143a1 100644
--- a/test/CodeGen/PowerPC/vec_br_cmp.ll
+++ b/test/CodeGen/PowerPC/vec_br_cmp.ll
@@ -5,8 +5,8 @@
; A predicate compare used immediately by a branch should not generate an mfcr.
define void @test(<4 x float>* %A, <4 x float>* %B) {
- %tmp = load <4 x float>* %A ; <<4 x float>> [#uses=1]
- %tmp3 = load <4 x float>* %B ; <<4 x float>> [#uses=1]
+ %tmp = load <4 x float>, <4 x float>* %A ; <<4 x float>> [#uses=1]
+ %tmp3 = load <4 x float>, <4 x float>* %B ; <<4 x float>> [#uses=1]
%tmp.upgrd.1 = tail call i32 @llvm.ppc.altivec.vcmpeqfp.p( i32 1, <4 x float> %tmp, <4 x float> %tmp3 ) ; <i32> [#uses=1]
%tmp.upgrd.2 = icmp eq i32 %tmp.upgrd.1, 0 ; <i1> [#uses=1]
br i1 %tmp.upgrd.2, label %cond_true, label %UnifiedReturnBlock
diff --git a/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll b/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll
index 7e58ec0bdef4..3b8507728aea 100644
--- a/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll
+++ b/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll
@@ -8,7 +8,7 @@ define void @foo() nounwind ssp {
; CHECK: _foo:
; CHECK-NOT: stw
entry:
- %tmp0 = load <16 x i8>* @a, align 16
+ %tmp0 = load <16 x i8>, <16 x i8>* @a, align 16
%tmp180.i = extractelement <16 x i8> %tmp0, i32 0 ; <i8> [#uses=1]
%tmp181.i = insertelement <16 x i8> <i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp180.i, i32 2 ; <<16 x i8>> [#uses=1]
%tmp182.i = extractelement <16 x i8> %tmp0, i32 1 ; <i8> [#uses=1]
diff --git a/test/CodeGen/PowerPC/vec_clz.ll b/test/CodeGen/PowerPC/vec_clz.ll
new file mode 100644
index 000000000000..01cdecdbb762
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_clz.ll
@@ -0,0 +1,40 @@
+; Check the vctlz* instructions that were added in P8
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>) nounwind readnone
+
+define <16 x i8> @test_v16i8(<16 x i8> %x) nounwind readnone {
+ %vcnt = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %x)
+ ret <16 x i8> %vcnt
+; CHECK: @test_v16i8
+; CHECK: vclzb 2, 2
+; CHECK: blr
+}
+
+define <8 x i16> @test_v8i16(<8 x i16> %x) nounwind readnone {
+ %vcnt = tail call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %x)
+ ret <8 x i16> %vcnt
+; CHECK: @test_v8i16
+; CHECK: vclzh 2, 2
+; CHECK: blr
+}
+
+define <4 x i32> @test_v4i32(<4 x i32> %x) nounwind readnone {
+ %vcnt = tail call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %x)
+ ret <4 x i32> %vcnt
+; CHECK: @test_v4i32
+; CHECK: vclzw 2, 2
+; CHECK: blr
+}
+
+define <2 x i64> @test_v2i64(<2 x i64> %x) nounwind readnone {
+ %vcnt = tail call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %x)
+ ret <2 x i64> %vcnt
+; CHECK: @test_v2i64
+; CHECK: vclzd 2, 2
+; CHECK: blr
+}
diff --git a/test/CodeGen/PowerPC/vec_cmpd.ll b/test/CodeGen/PowerPC/vec_cmpd.ll
new file mode 100644
index 000000000000..4a06ed9ffafd
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_cmpd.ll
@@ -0,0 +1,258 @@
+; Test the doubleword comparison instructions that were added in POWER8
+;
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
+
+define <2 x i64> @v2si64_cmp(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
+ %cmp = icmp eq <2 x i64> %x, %y
+ %result = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %result
+; CHECK-LABEL: v2si64_cmp:
+; CHECK: vcmpequd 2, 2, 3
+}
+
+define <4 x i64> @v4si64_cmp(<4 x i64> %x, <4 x i64> %y) nounwind readnone {
+ %cmp = icmp eq <4 x i64> %x, %y
+ %result = sext <4 x i1> %cmp to <4 x i64>
+ ret <4 x i64> %result
+; CHECK-LABEL: v4si64_cmp
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define <8 x i64> @v8si64_cmp(<8 x i64> %x, <8 x i64> %y) nounwind readnone {
+ %cmp = icmp eq <8 x i64> %x, %y
+ %result = sext <8 x i1> %cmp to <8 x i64>
+ ret <8 x i64> %result
+; CHECK-LABEL: v8si64_cmp
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define <16 x i64> @v16si64_cmp(<16 x i64> %x, <16 x i64> %y) nounwind readnone {
+ %cmp = icmp eq <16 x i64> %x, %y
+ %result = sext <16 x i1> %cmp to <16 x i64>
+ ret <16 x i64> %result
+; CHECK-LABEL: v16si64_cmp
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define <32 x i64> @v32si64_cmp(<32 x i64> %x, <32 x i64> %y) nounwind readnone {
+ %cmp = icmp eq <32 x i64> %x, %y
+ %result = sext <32 x i1> %cmp to <32 x i64>
+ ret <32 x i64> %result
+; CHECK-LABEL: v32si64_cmp
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+; Greater than signed
+define <2 x i64> @v2si64_cmp_gt(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
+ %cmp = icmp sgt <2 x i64> %x, %y
+ %result = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %result
+; CHECK-LABEL: v2si64_cmp_gt
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define <4 x i64> @v4si64_cmp_gt(<4 x i64> %x, <4 x i64> %y) nounwind readnone {
+ %cmp = icmp sgt <4 x i64> %x, %y
+ %result = sext <4 x i1> %cmp to <4 x i64>
+ ret <4 x i64> %result
+; CHECK-LABEL: v4si64_cmp_gt
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define <8 x i64> @v8si64_cmp_gt(<8 x i64> %x, <8 x i64> %y) nounwind readnone {
+ %cmp = icmp sgt <8 x i64> %x, %y
+ %result = sext <8 x i1> %cmp to <8 x i64>
+ ret <8 x i64> %result
+; CHECK-LABEL: v8si64_cmp_gt
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define <16 x i64> @v16si64_cmp_gt(<16 x i64> %x, <16 x i64> %y) nounwind readnone {
+ %cmp = icmp sgt <16 x i64> %x, %y
+ %result = sext <16 x i1> %cmp to <16 x i64>
+ ret <16 x i64> %result
+; CHECK-LABEL: v16si64_cmp_gt
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define <32 x i64> @v32si64_cmp_gt(<32 x i64> %x, <32 x i64> %y) nounwind readnone {
+ %cmp = icmp sgt <32 x i64> %x, %y
+ %result = sext <32 x i1> %cmp to <32 x i64>
+ ret <32 x i64> %result
+; CHECK-LABEL: v32si64_cmp_gt
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+; Greater than unsigned
+define <2 x i64> @v2ui64_cmp_gt(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
+ %cmp = icmp ugt <2 x i64> %x, %y
+ %result = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %result
+; CHECK-LABEL: v2ui64_cmp_gt
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define <4 x i64> @v4ui64_cmp_gt(<4 x i64> %x, <4 x i64> %y) nounwind readnone {
+ %cmp = icmp ugt <4 x i64> %x, %y
+ %result = sext <4 x i1> %cmp to <4 x i64>
+ ret <4 x i64> %result
+; CHECK-LABEL: v4ui64_cmp_gt
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define <8 x i64> @v8ui64_cmp_gt(<8 x i64> %x, <8 x i64> %y) nounwind readnone {
+ %cmp = icmp ugt <8 x i64> %x, %y
+ %result = sext <8 x i1> %cmp to <8 x i64>
+ ret <8 x i64> %result
+; CHECK-LABEL: v8ui64_cmp_gt
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define <16 x i64> @v16ui64_cmp_gt(<16 x i64> %x, <16 x i64> %y) nounwind readnone {
+ %cmp = icmp ugt <16 x i64> %x, %y
+ %result = sext <16 x i1> %cmp to <16 x i64>
+ ret <16 x i64> %result
+; CHECK-LABEL: v16ui64_cmp_gt
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define <32 x i64> @v32ui64_cmp_gt(<32 x i64> %x, <32 x i64> %y) nounwind readnone {
+ %cmp = icmp ugt <32 x i64> %x, %y
+ %result = sext <32 x i1> %cmp to <32 x i64>
+ ret <32 x i64> %result
+; CHECK-LABEL: v32ui64_cmp_gt
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+; Check the intrinsics also
+declare <2 x i64> @llvm.ppc.altivec.vcmpequd(<2 x i64>, <2 x i64>) nounwind readnone
+declare i32 @llvm.ppc.altivec.vcmpequd.p(i32, <2 x i64>, <2 x i64>) nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64>, <2 x i64>) nounwind readnone
+declare i32 @llvm.ppc.altivec.vcmpgtsd.p(i32, <2 x i64>, <2 x i64>) nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64>, <2 x i64>) nounwind readnone
+declare i32 @llvm.ppc.altivec.vcmpgtud.p(i32, <2 x i64>, <2 x i64>) nounwind readnone
+
+define <2 x i64> @test_vcmpequd(<2 x i64> %x, <2 x i64> %y) {
+ %tmp = tail call <2 x i64> @llvm.ppc.altivec.vcmpequd(<2 x i64> %x, <2 x i64> %y)
+ ret <2 x i64> %tmp
+; CHECK-LABEL: test_vcmpequd:
+; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define i32 @test_vcmpequd_p(<2 x i64> %x, <2 x i64> %y) {
+ %tmp = tail call i32 @llvm.ppc.altivec.vcmpequd.p(i32 2, <2 x i64> %x, <2 x i64> %y)
+ ret i32 %tmp
+; CHECK-LABEL: test_vcmpequd_p:
+; CHECK: vcmpequd. {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define <2 x i64> @test_vcmpgtsd(<2 x i64> %x, <2 x i64> %y) {
+ %tmp = tail call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %x, <2 x i64> %y)
+ ret <2 x i64> %tmp
+; CHECK-LABEL: test_vcmpgtsd
+; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define i32 @test_vcmpgtsd_p(<2 x i64> %x, <2 x i64> %y) {
+ %tmp = tail call i32 @llvm.ppc.altivec.vcmpgtsd.p(i32 2, <2 x i64> %x, <2 x i64> %y)
+ ret i32 %tmp
+; CHECK-LABEL: test_vcmpgtsd_p
+; CHECK: vcmpgtsd. {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define <2 x i64> @test_vcmpgtud(<2 x i64> %x, <2 x i64> %y) {
+ %tmp = tail call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %x, <2 x i64> %y)
+ ret <2 x i64> %tmp
+; CHECK-LABEL: test_vcmpgtud
+; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+define i32 @test_vcmpgtud_p(<2 x i64> %x, <2 x i64> %y) {
+ %tmp = tail call i32 @llvm.ppc.altivec.vcmpgtud.p(i32 2, <2 x i64> %x, <2 x i64> %y)
+ ret i32 %tmp
+; CHECK-LABEL: test_vcmpgtud_p
+; CHECK: vcmpgtud. {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
+}
+
+
+
+
diff --git a/test/CodeGen/PowerPC/vec_constants.ll b/test/CodeGen/PowerPC/vec_constants.ll
index f16b9f511f53..858b85dce543 100644
--- a/test/CodeGen/PowerPC/vec_constants.ll
+++ b/test/CodeGen/PowerPC/vec_constants.ll
@@ -1,16 +1,14 @@
-; RUN: llc -O0 -mcpu=pwr7 < %s | FileCheck %s
-
-target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
-target triple = "powerpc64-unknown-linux-gnu"
+; RUN: llc -O0 -mcpu=pwr7 -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -O0 -mcpu=pwr7 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
define void @test1(<4 x i32>* %P1, <4 x i32>* %P2, <4 x float>* %P3) nounwind {
- %tmp = load <4 x i32>* %P1 ; <<4 x i32>> [#uses=1]
+ %tmp = load <4 x i32>, <4 x i32>* %P1 ; <<4 x i32>> [#uses=1]
%tmp4 = and <4 x i32> %tmp, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 > ; <<4 x i32>> [#uses=1]
store <4 x i32> %tmp4, <4 x i32>* %P1
- %tmp7 = load <4 x i32>* %P2 ; <<4 x i32>> [#uses=1]
+ %tmp7 = load <4 x i32>, <4 x i32>* %P2 ; <<4 x i32>> [#uses=1]
%tmp9 = and <4 x i32> %tmp7, < i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647 > ; <<4 x i32>> [#uses=1]
store <4 x i32> %tmp9, <4 x i32>* %P2
- %tmp.upgrd.1 = load <4 x float>* %P3 ; <<4 x float>> [#uses=1]
+ %tmp.upgrd.1 = load <4 x float>, <4 x float>* %P3 ; <<4 x float>> [#uses=1]
%tmp11 = bitcast <4 x float> %tmp.upgrd.1 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp12 = and <4 x i32> %tmp11, < i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647 > ; <<4 x i32>> [#uses=1]
%tmp13 = bitcast <4 x i32> %tmp12 to <4 x float> ; <<4 x float>> [#uses=1]
diff --git a/test/CodeGen/PowerPC/vec_conv.ll b/test/CodeGen/PowerPC/vec_conv.ll
index a39ae9100355..6e19f5a010d2 100644
--- a/test/CodeGen/PowerPC/vec_conv.ll
+++ b/test/CodeGen/PowerPC/vec_conv.ll
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define void @v4f32_to_v4i32(<4 x float> %x, <4 x i32>* nocapture %y) nounwind {
entry:
- %0 = load <4 x float>* @cte_float, align 16
+ %0 = load <4 x float>, <4 x float>* @cte_float, align 16
%mul = fmul <4 x float> %0, %x
%1 = fptosi <4 x float> %mul to <4 x i32>
store <4 x i32> %1, <4 x i32>* %y, align 16
@@ -23,7 +23,7 @@ entry:
define void @v4f32_to_v4u32(<4 x float> %x, <4 x i32>* nocapture %y) nounwind {
entry:
- %0 = load <4 x float>* @cte_float, align 16
+ %0 = load <4 x float>, <4 x float>* @cte_float, align 16
%mul = fmul <4 x float> %0, %x
%1 = fptoui <4 x float> %mul to <4 x i32>
store <4 x i32> %1, <4 x i32>* %y, align 16
@@ -35,7 +35,7 @@ entry:
define void @v4i32_to_v4f32(<4 x i32> %x, <4 x float>* nocapture %y) nounwind {
entry:
- %0 = load <4 x i32>* @cte_int, align 16
+ %0 = load <4 x i32>, <4 x i32>* @cte_int, align 16
%mul = mul <4 x i32> %0, %x
%1 = sitofp <4 x i32> %mul to <4 x float>
store <4 x float> %1, <4 x float>* %y, align 16
@@ -47,7 +47,7 @@ entry:
define void @v4u32_to_v4f32(<4 x i32> %x, <4 x float>* nocapture %y) nounwind {
entry:
- %0 = load <4 x i32>* @cte_int, align 16
+ %0 = load <4 x i32>, <4 x i32>* @cte_int, align 16
%mul = mul <4 x i32> %0, %x
%1 = uitofp <4 x i32> %mul to <4 x float>
store <4 x float> %1, <4 x float>* %y, align 16
diff --git a/test/CodeGen/PowerPC/vec_fneg.ll b/test/CodeGen/PowerPC/vec_fneg.ll
index e01e65979f6f..d6f6def64ea2 100644
--- a/test/CodeGen/PowerPC/vec_fneg.ll
+++ b/test/CodeGen/PowerPC/vec_fneg.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep vsubfp
define void @t(<4 x float>* %A) {
- %tmp2 = load <4 x float>* %A
+ %tmp2 = load <4 x float>, <4 x float>* %A
%tmp3 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp2
store <4 x float> %tmp3, <4 x float>* %A
ret void
diff --git a/test/CodeGen/PowerPC/vec_minmax.ll b/test/CodeGen/PowerPC/vec_minmax.ll
new file mode 100644
index 000000000000..e9ba6a01a9b8
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_minmax.ll
@@ -0,0 +1,34 @@
+; Test the vector min/max doubleword instructions added for P8
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
+
+declare <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64>, <2 x i64>) nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.vmaxud(<2 x i64>, <2 x i64>) nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.vminsd(<2 x i64>, <2 x i64>) nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.vminud(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <2 x i64> @test_vmaxsd(<2 x i64> %x, <2 x i64> %y) {
+ %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64> %x, <2 x i64> %y)
+ ret <2 x i64> %tmp
+; CHECK: vmaxsd 2, 2, 3
+}
+
+define <2 x i64> @test_vmaxud(<2 x i64> %x, <2 x i64> %y) {
+ %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmaxud(<2 x i64> %x, <2 x i64> %y)
+ ret <2 x i64> %tmp
+; CHECK: vmaxud 2, 2, 3
+}
+
+define <2 x i64> @test_vminsd(<2 x i64> %x, <2 x i64> %y) {
+ %tmp = tail call <2 x i64> @llvm.ppc.altivec.vminsd(<2 x i64> %x, <2 x i64> %y)
+ ret <2 x i64> %tmp
+; CHECK: vminsd 2, 2, 3
+}
+
+define <2 x i64> @test_vminud(<2 x i64> %x, <2 x i64> %y) {
+ %tmp = tail call <2 x i64> @llvm.ppc.altivec.vminud(<2 x i64> %x, <2 x i64> %y)
+ ret <2 x i64> %tmp
+; CHECK: vminud 2, 2, 3
+}
+
+
diff --git a/test/CodeGen/PowerPC/vec_misaligned.ll b/test/CodeGen/PowerPC/vec_misaligned.ll
index 49f11e4e2604..ac639d719911 100644
--- a/test/CodeGen/PowerPC/vec_misaligned.ll
+++ b/test/CodeGen/PowerPC/vec_misaligned.ll
@@ -19,18 +19,18 @@ entry:
store i32 %x, i32* %x_addr
%ap1 = bitcast i8** %ap to i8* ; <i8*> [#uses=1]
call void @llvm.va_start( i8* %ap1 )
- %tmp = load i8** %ap, align 4 ; <i8*> [#uses=1]
+ %tmp = load i8*, i8** %ap, align 4 ; <i8*> [#uses=1]
store i8* %tmp, i8** %ap.0, align 4
- %tmp2 = load i8** %ap.0, align 4 ; <i8*> [#uses=1]
- %tmp3 = getelementptr i8* %tmp2, i64 16 ; <i8*> [#uses=1]
+ %tmp2 = load i8*, i8** %ap.0, align 4 ; <i8*> [#uses=1]
+ %tmp3 = getelementptr i8, i8* %tmp2, i64 16 ; <i8*> [#uses=1]
store i8* %tmp3, i8** %ap, align 4
- %tmp4 = load i8** %ap.0, align 4 ; <i8*> [#uses=1]
+ %tmp4 = load i8*, i8** %ap.0, align 4 ; <i8*> [#uses=1]
%tmp45 = bitcast i8* %tmp4 to %struct.S2203* ; <%struct.S2203*> [#uses=1]
- %tmp6 = getelementptr %struct.S2203* @s, i32 0, i32 0 ; <%struct.u16qi*> [#uses=1]
- %tmp7 = getelementptr %struct.S2203* %tmp45, i32 0, i32 0 ; <%struct.u16qi*> [#uses=1]
- %tmp8 = getelementptr %struct.u16qi* %tmp6, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
- %tmp9 = getelementptr %struct.u16qi* %tmp7, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
- %tmp10 = load <16 x i8>* %tmp9, align 4 ; <<16 x i8>> [#uses=1]
+ %tmp6 = getelementptr %struct.S2203, %struct.S2203* @s, i32 0, i32 0 ; <%struct.u16qi*> [#uses=1]
+ %tmp7 = getelementptr %struct.S2203, %struct.S2203* %tmp45, i32 0, i32 0 ; <%struct.u16qi*> [#uses=1]
+ %tmp8 = getelementptr %struct.u16qi, %struct.u16qi* %tmp6, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
+ %tmp9 = getelementptr %struct.u16qi, %struct.u16qi* %tmp7, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
+ %tmp10 = load <16 x i8>, <16 x i8>* %tmp9, align 4 ; <<16 x i8>> [#uses=1]
; CHECK: lvsl
; CHECK: vperm
; CHECK-LE: lvsr
diff --git a/test/CodeGen/PowerPC/vec_mul.ll b/test/CodeGen/PowerPC/vec_mul.ll
index 86596d4b0a87..e1c9217ff135 100644
--- a/test/CodeGen/PowerPC/vec_mul.ll
+++ b/test/CodeGen/PowerPC/vec_mul.ll
@@ -1,12 +1,12 @@
-; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu -march=ppc32 -mattr=+altivec -mattr=-vsx | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu -march=ppc32 -mattr=+altivec -mattr=-vsx -mattr=-power8-altivec | FileCheck %s
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=-vsx -mcpu=pwr7 | FileCheck %s
-; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=-vsx -mcpu=pwr8 | FileCheck %s -check-prefix=CHECK-LE
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=-vsx -mcpu=pwr8 -mattr=-power8-altivec | FileCheck %s -check-prefix=CHECK-LE
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=+vsx -mcpu=pwr7 | FileCheck %s -check-prefix=CHECK-VSX
-; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=+vsx -mcpu=pwr8 | FileCheck %s -check-prefix=CHECK-LE-VSX
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=+vsx -mcpu=pwr8 -mattr=-power8-altivec | FileCheck %s -check-prefix=CHECK-LE-VSX
define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) {
- %tmp = load <4 x i32>* %X ; <<4 x i32>> [#uses=1]
- %tmp2 = load <4 x i32>* %Y ; <<4 x i32>> [#uses=1]
+ %tmp = load <4 x i32>, <4 x i32>* %X ; <<4 x i32>> [#uses=1]
+ %tmp2 = load <4 x i32>, <4 x i32>* %Y ; <<4 x i32>> [#uses=1]
%tmp3 = mul <4 x i32> %tmp, %tmp2 ; <<4 x i32>> [#uses=1]
ret <4 x i32> %tmp3
}
@@ -24,8 +24,8 @@ define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) {
; CHECK-LE-VSX-NOT: mullw
define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) {
- %tmp = load <8 x i16>* %X ; <<8 x i16>> [#uses=1]
- %tmp2 = load <8 x i16>* %Y ; <<8 x i16>> [#uses=1]
+ %tmp = load <8 x i16>, <8 x i16>* %X ; <<8 x i16>> [#uses=1]
+ %tmp2 = load <8 x i16>, <8 x i16>* %Y ; <<8 x i16>> [#uses=1]
%tmp3 = mul <8 x i16> %tmp, %tmp2 ; <<8 x i16>> [#uses=1]
ret <8 x i16> %tmp3
}
@@ -43,8 +43,8 @@ define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) {
; CHECK-LE-VSX-NOT: mullw
define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) {
- %tmp = load <16 x i8>* %X ; <<16 x i8>> [#uses=1]
- %tmp2 = load <16 x i8>* %Y ; <<16 x i8>> [#uses=1]
+ %tmp = load <16 x i8>, <16 x i8>* %X ; <<16 x i8>> [#uses=1]
+ %tmp2 = load <16 x i8>, <16 x i8>* %Y ; <<16 x i8>> [#uses=1]
%tmp3 = mul <16 x i8> %tmp, %tmp2 ; <<16 x i8>> [#uses=1]
ret <16 x i8> %tmp3
}
@@ -68,8 +68,8 @@ define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) {
; CHECK-LE-VSX-NOT: mullw
define <4 x float> @test_float(<4 x float>* %X, <4 x float>* %Y) {
- %tmp = load <4 x float>* %X
- %tmp2 = load <4 x float>* %Y
+ %tmp = load <4 x float>, <4 x float>* %X
+ %tmp2 = load <4 x float>, <4 x float>* %Y
%tmp3 = fmul <4 x float> %tmp, %tmp2
ret <4 x float> %tmp3
}
diff --git a/test/CodeGen/PowerPC/vec_mul_even_odd.ll b/test/CodeGen/PowerPC/vec_mul_even_odd.ll
new file mode 100644
index 000000000000..b24bafd196dd
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_mul_even_odd.ll
@@ -0,0 +1,42 @@
+; Check the vector multiply even/odd word instructions that were added in P8
+;
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
+
+declare <2 x i64> @llvm.ppc.altivec.vmuleuw(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.vmulesw(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.vmulouw(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.vmulosw(<4 x i32>, <4 x i32>) nounwind readnone
+declare <4 x i32> @llvm.ppc.altivec.vmuluwm(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <2 x i64> @test_vmuleuw(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
+ %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmuleuw(<4 x i32> %x, <4 x i32> %y)
+ ret <2 x i64> %tmp
+; CHECK: vmuleuw 2, 2, 3
+}
+
+define <2 x i64> @test_vmulesw(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
+ %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulesw(<4 x i32> %x, <4 x i32> %y)
+ ret <2 x i64> %tmp
+; CHECK: vmulesw 2, 2, 3
+}
+
+define <2 x i64> @test_vmulouw(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
+ %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulouw(<4 x i32> %x, <4 x i32> %y)
+ ret <2 x i64> %tmp
+; CHECK: vmulouw 2, 2, 3
+}
+
+define <2 x i64> @test_vmulosw(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
+ %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulosw(<4 x i32> %x, <4 x i32> %y)
+ ret <2 x i64> %tmp
+; CHECK: vmulosw 2, 2, 3
+}
+
+define <4 x i32> @test_vmuluwm(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
+ %tmp = mul <4 x i32> %x, %y
+ ret <4 x i32> %tmp
+; CHECK-LABEL: test_vmuluwm
+; CHECK: vmuluwm 2, 2, 3
+}
+
diff --git a/test/CodeGen/PowerPC/vec_perf_shuffle.ll b/test/CodeGen/PowerPC/vec_perf_shuffle.ll
index 2c3594d224fe..f8b37fae7c23 100644
--- a/test/CodeGen/PowerPC/vec_perf_shuffle.ll
+++ b/test/CodeGen/PowerPC/vec_perf_shuffle.ll
@@ -1,36 +1,36 @@
; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep vperm
define <4 x float> @test_uu72(<4 x float>* %P1, <4 x float>* %P2) {
- %V1 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %V2 = load <4 x float>* %P2 ; <<4 x float>> [#uses=1]
+ %V1 = load <4 x float>, <4 x float>* %P1 ; <<4 x float>> [#uses=1]
+ %V2 = load <4 x float>, <4 x float>* %P2 ; <<4 x float>> [#uses=1]
%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 undef, i32 undef, i32 7, i32 2 > ; <<4 x float>> [#uses=1]
ret <4 x float> %V3
}
define <4 x float> @test_30u5(<4 x float>* %P1, <4 x float>* %P2) {
- %V1 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %V2 = load <4 x float>* %P2 ; <<4 x float>> [#uses=1]
+ %V1 = load <4 x float>, <4 x float>* %P1 ; <<4 x float>> [#uses=1]
+ %V2 = load <4 x float>, <4 x float>* %P2 ; <<4 x float>> [#uses=1]
%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 3, i32 0, i32 undef, i32 5 > ; <<4 x float>> [#uses=1]
ret <4 x float> %V3
}
define <4 x float> @test_3u73(<4 x float>* %P1, <4 x float>* %P2) {
- %V1 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %V2 = load <4 x float>* %P2 ; <<4 x float>> [#uses=1]
+ %V1 = load <4 x float>, <4 x float>* %P1 ; <<4 x float>> [#uses=1]
+ %V2 = load <4 x float>, <4 x float>* %P2 ; <<4 x float>> [#uses=1]
%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 3, i32 undef, i32 7, i32 3 > ; <<4 x float>> [#uses=1]
ret <4 x float> %V3
}
define <4 x float> @test_3774(<4 x float>* %P1, <4 x float>* %P2) {
- %V1 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %V2 = load <4 x float>* %P2 ; <<4 x float>> [#uses=1]
+ %V1 = load <4 x float>, <4 x float>* %P1 ; <<4 x float>> [#uses=1]
+ %V2 = load <4 x float>, <4 x float>* %P2 ; <<4 x float>> [#uses=1]
%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 3, i32 7, i32 7, i32 4 > ; <<4 x float>> [#uses=1]
ret <4 x float> %V3
}
define <4 x float> @test_4450(<4 x float>* %P1, <4 x float>* %P2) {
- %V1 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %V2 = load <4 x float>* %P2 ; <<4 x float>> [#uses=1]
+ %V1 = load <4 x float>, <4 x float>* %P1 ; <<4 x float>> [#uses=1]
+ %V2 = load <4 x float>, <4 x float>* %P2 ; <<4 x float>> [#uses=1]
%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 4, i32 4, i32 5, i32 0 > ; <<4 x float>> [#uses=1]
ret <4 x float> %V3
}
diff --git a/test/CodeGen/PowerPC/vec_popcnt.ll b/test/CodeGen/PowerPC/vec_popcnt.ll
new file mode 100644
index 000000000000..0ce9dfac1df6
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_popcnt.ll
@@ -0,0 +1,72 @@
+; Check the vecpopcnt* instructions that were added in P8
+; In addition, check the conversions to/from the v2i64 VMX register that was also added in P8.
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
+
+define <16 x i8> @test_v16i8_v2i64(<2 x i64> %x) nounwind readnone {
+ %tmp = bitcast <2 x i64> %x to <16 x i8>;
+ %vcnt = tail call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp)
+ ret <16 x i8> %vcnt
+; CHECK: @test_v16i8_v2i64
+; CHECK: vpopcntb 2, 2
+; CHECK: blr
+}
+
+define <8 x i16> @test_v8i16_v2i64(<2 x i64> %x) nounwind readnone {
+ %tmp = bitcast <2 x i64> %x to <8 x i16>
+ %vcnt = tail call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %tmp)
+ ret <8 x i16> %vcnt
+; CHECK: @test_v8i16_v2i64
+; CHECK: vpopcnth 2, 2
+; CHECK: blr
+}
+
+define <4 x i32> @test_v4i32_v2i64(<2 x i64> %x) nounwind readnone {
+ %tmp = bitcast <2 x i64> %x to <4 x i32>
+ %vcnt = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %tmp)
+ ret <4 x i32> %vcnt
+; CHECK: @test_v4i32_v2i64
+; CHECK: vpopcntw 2, 2
+; CHECK: blr
+}
+
+define <2 x i64> @test_v2i64_v2i64(<2 x i64> %x) nounwind readnone {
+ %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %x)
+ ret <2 x i64> %vcnt
+; CHECK: @test_v2i64_v2i64
+; CHECK: vpopcntd 2, 2
+; CHECK: blr
+}
+
+define <2 x i64> @test_v2i64_v4i32(<4 x i32> %x) nounwind readnone {
+ %tmp = bitcast <4 x i32> %x to <2 x i64>
+ %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
+ ret <2 x i64> %vcnt
+; CHECK: @test_v2i64_v4i32
+; CHECK: vpopcntd 2, 2
+; CHECK: blr
+}
+
+
+define <2 x i64> @test_v2i64_v8i16(<8 x i16> %x) nounwind readnone {
+ %tmp = bitcast <8 x i16> %x to <2 x i64>
+ %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
+ ret <2 x i64> %vcnt
+; CHECK: @test_v2i64_v8i16
+; CHECK: vpopcntd 2, 2
+; CHECK: blr
+}
+
+define <2 x i64> @test_v2i64_v16i8(<16 x i8> %x) nounwind readnone {
+ %tmp = bitcast <16 x i8> %x to <2 x i64>
+ %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
+ ret <2 x i64> %vcnt
+; CHECK: @test_v2i64_v16i8
+; CHECK: vpopcntd 2, 2
+; CHECK: blr
+}
diff --git a/test/CodeGen/PowerPC/vec_rotate_shift.ll b/test/CodeGen/PowerPC/vec_rotate_shift.ll
new file mode 100644
index 000000000000..1a2e9578e039
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_rotate_shift.ll
@@ -0,0 +1,36 @@
+; Test the vector rotate and shift doubleword instructions that were added in P8
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
+
+declare <2 x i64> @llvm.ppc.altivec.vrld(<2 x i64>, <2 x i64>) nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.vsld(<2 x i64>, <2 x i64>) nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.vsrd(<2 x i64>, <2 x i64>) nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.vsrad(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <2 x i64> @test_vrld(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
+ %tmp = tail call <2 x i64> @llvm.ppc.altivec.vrld(<2 x i64> %x, <2 x i64> %y)
+ ret <2 x i64> %tmp
+; CHECK: vrld 2, 2, 3
+}
+
+define <2 x i64> @test_vsld(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
+ %tmp = shl <2 x i64> %x, %y
+ ret <2 x i64> %tmp
+; CHECK-LABEL: @test_vsld
+; CHECK: vsld 2, 2, 3
+}
+
+define <2 x i64> @test_vsrd(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
+ %tmp = lshr <2 x i64> %x, %y
+ ret <2 x i64> %tmp
+; CHECK-LABEL: @test_vsrd
+; CHECK: vsrd 2, 2, 3
+}
+
+define <2 x i64> @test_vsrad(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
+ %tmp = ashr <2 x i64> %x, %y
+ ret <2 x i64> %tmp
+; CHECK-LABER: @test_vsrad
+; CHECK: vsrad 2, 2, 3
+}
+
diff --git a/test/CodeGen/PowerPC/vec_shuffle.ll b/test/CodeGen/PowerPC/vec_shuffle.ll
index 82706321c1c1..a942dd1c41c9 100644
--- a/test/CodeGen/PowerPC/vec_shuffle.ll
+++ b/test/CodeGen/PowerPC/vec_shuffle.ll
@@ -9,8 +9,8 @@
define void @VSLDOI_xy(<8 x i16>* %A, <8 x i16>* %B) {
entry:
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=1]
- %tmp2 = load <8 x i16>* %B ; <<8 x i16>> [#uses=1]
+ %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=1]
+ %tmp2 = load <8 x i16>, <8 x i16>* %B ; <<8 x i16>> [#uses=1]
%tmp.upgrd.1 = bitcast <8 x i16> %tmp to <16 x i8> ; <<16 x i8>> [#uses=11]
%tmp2.upgrd.2 = bitcast <8 x i16> %tmp2 to <16 x i8> ; <<16 x i8>> [#uses=5]
%tmp.upgrd.3 = extractelement <16 x i8> %tmp.upgrd.1, i32 5 ; <i8> [#uses=1]
@@ -51,8 +51,8 @@ entry:
}
define void @VSLDOI_xx(<8 x i16>* %A, <8 x i16>* %B) {
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=1]
- %tmp2 = load <8 x i16>* %A ; <<8 x i16>> [#uses=1]
+ %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=1]
+ %tmp2 = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=1]
%tmp.upgrd.5 = bitcast <8 x i16> %tmp to <16 x i8> ; <<16 x i8>> [#uses=11]
%tmp2.upgrd.6 = bitcast <8 x i16> %tmp2 to <16 x i8> ; <<16 x i8>> [#uses=5]
%tmp.upgrd.7 = extractelement <16 x i8> %tmp.upgrd.5, i32 5 ; <i8> [#uses=1]
@@ -94,9 +94,9 @@ define void @VSLDOI_xx(<8 x i16>* %A, <8 x i16>* %B) {
define void @VPERM_promote(<8 x i16>* %A, <8 x i16>* %B) {
entry:
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=1]
+ %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=1]
%tmp.upgrd.9 = bitcast <8 x i16> %tmp to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp2 = load <8 x i16>* %B ; <<8 x i16>> [#uses=1]
+ %tmp2 = load <8 x i16>, <8 x i16>* %B ; <<8 x i16>> [#uses=1]
%tmp2.upgrd.10 = bitcast <8 x i16> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp3 = call <4 x i32> @llvm.ppc.altivec.vperm( <4 x i32> %tmp.upgrd.9, <4 x i32> %tmp2.upgrd.10, <16 x i8> < i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14 > ) ; <<4 x i32>> [#uses=1]
%tmp3.upgrd.11 = bitcast <4 x i32> %tmp3 to <8 x i16> ; <<8 x i16>> [#uses=1]
@@ -108,8 +108,8 @@ declare <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32>, <4 x i32>, <16 x i8>)
define void @tb_l(<16 x i8>* %A, <16 x i8>* %B) {
entry:
- %tmp = load <16 x i8>* %A ; <<16 x i8>> [#uses=8]
- %tmp2 = load <16 x i8>* %B ; <<16 x i8>> [#uses=8]
+ %tmp = load <16 x i8>, <16 x i8>* %A ; <<16 x i8>> [#uses=8]
+ %tmp2 = load <16 x i8>, <16 x i8>* %B ; <<16 x i8>> [#uses=8]
%tmp.upgrd.12 = extractelement <16 x i8> %tmp, i32 8 ; <i8> [#uses=1]
%tmp3 = extractelement <16 x i8> %tmp2, i32 8 ; <i8> [#uses=1]
%tmp4 = extractelement <16 x i8> %tmp, i32 9 ; <i8> [#uses=1]
@@ -148,8 +148,8 @@ entry:
define void @th_l(<8 x i16>* %A, <8 x i16>* %B) {
entry:
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=4]
- %tmp2 = load <8 x i16>* %B ; <<8 x i16>> [#uses=4]
+ %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=4]
+ %tmp2 = load <8 x i16>, <8 x i16>* %B ; <<8 x i16>> [#uses=4]
%tmp.upgrd.13 = extractelement <8 x i16> %tmp, i32 4 ; <i16> [#uses=1]
%tmp3 = extractelement <8 x i16> %tmp2, i32 4 ; <i16> [#uses=1]
%tmp4 = extractelement <8 x i16> %tmp, i32 5 ; <i16> [#uses=1]
@@ -172,8 +172,8 @@ entry:
define void @tw_l(<4 x i32>* %A, <4 x i32>* %B) {
entry:
- %tmp = load <4 x i32>* %A ; <<4 x i32>> [#uses=2]
- %tmp2 = load <4 x i32>* %B ; <<4 x i32>> [#uses=2]
+ %tmp = load <4 x i32>, <4 x i32>* %A ; <<4 x i32>> [#uses=2]
+ %tmp2 = load <4 x i32>, <4 x i32>* %B ; <<4 x i32>> [#uses=2]
%tmp.upgrd.14 = extractelement <4 x i32> %tmp, i32 2 ; <i32> [#uses=1]
%tmp3 = extractelement <4 x i32> %tmp2, i32 2 ; <i32> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp, i32 3 ; <i32> [#uses=1]
@@ -188,8 +188,8 @@ entry:
define void @tb_h(<16 x i8>* %A, <16 x i8>* %B) {
entry:
- %tmp = load <16 x i8>* %A ; <<16 x i8>> [#uses=8]
- %tmp2 = load <16 x i8>* %B ; <<16 x i8>> [#uses=8]
+ %tmp = load <16 x i8>, <16 x i8>* %A ; <<16 x i8>> [#uses=8]
+ %tmp2 = load <16 x i8>, <16 x i8>* %B ; <<16 x i8>> [#uses=8]
%tmp.upgrd.15 = extractelement <16 x i8> %tmp, i32 0 ; <i8> [#uses=1]
%tmp3 = extractelement <16 x i8> %tmp2, i32 0 ; <i8> [#uses=1]
%tmp4 = extractelement <16 x i8> %tmp, i32 1 ; <i8> [#uses=1]
@@ -228,8 +228,8 @@ entry:
define void @th_h(<8 x i16>* %A, <8 x i16>* %B) {
entry:
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=4]
- %tmp2 = load <8 x i16>* %B ; <<8 x i16>> [#uses=4]
+ %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=4]
+ %tmp2 = load <8 x i16>, <8 x i16>* %B ; <<8 x i16>> [#uses=4]
%tmp.upgrd.16 = extractelement <8 x i16> %tmp, i32 0 ; <i16> [#uses=1]
%tmp3 = extractelement <8 x i16> %tmp2, i32 0 ; <i16> [#uses=1]
%tmp4 = extractelement <8 x i16> %tmp, i32 1 ; <i16> [#uses=1]
@@ -252,8 +252,8 @@ entry:
define void @tw_h(<4 x i32>* %A, <4 x i32>* %B) {
entry:
- %tmp = load <4 x i32>* %A ; <<4 x i32>> [#uses=2]
- %tmp2 = load <4 x i32>* %B ; <<4 x i32>> [#uses=2]
+ %tmp = load <4 x i32>, <4 x i32>* %A ; <<4 x i32>> [#uses=2]
+ %tmp2 = load <4 x i32>, <4 x i32>* %B ; <<4 x i32>> [#uses=2]
%tmp.upgrd.17 = extractelement <4 x i32> %tmp2, i32 0 ; <i32> [#uses=1]
%tmp3 = extractelement <4 x i32> %tmp, i32 0 ; <i32> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp2, i32 1 ; <i32> [#uses=1]
@@ -267,8 +267,8 @@ entry:
}
define void @tw_h_flop(<4 x i32>* %A, <4 x i32>* %B) {
- %tmp = load <4 x i32>* %A ; <<4 x i32>> [#uses=2]
- %tmp2 = load <4 x i32>* %B ; <<4 x i32>> [#uses=2]
+ %tmp = load <4 x i32>, <4 x i32>* %A ; <<4 x i32>> [#uses=2]
+ %tmp2 = load <4 x i32>, <4 x i32>* %B ; <<4 x i32>> [#uses=2]
%tmp.upgrd.18 = extractelement <4 x i32> %tmp, i32 0 ; <i32> [#uses=1]
%tmp3 = extractelement <4 x i32> %tmp2, i32 0 ; <i32> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp, i32 1 ; <i32> [#uses=1]
@@ -283,7 +283,7 @@ define void @tw_h_flop(<4 x i32>* %A, <4 x i32>* %B) {
define void @VMRG_UNARY_tb_l(<16 x i8>* %A, <16 x i8>* %B) {
entry:
- %tmp = load <16 x i8>* %A ; <<16 x i8>> [#uses=16]
+ %tmp = load <16 x i8>, <16 x i8>* %A ; <<16 x i8>> [#uses=16]
%tmp.upgrd.19 = extractelement <16 x i8> %tmp, i32 8 ; <i8> [#uses=1]
%tmp3 = extractelement <16 x i8> %tmp, i32 8 ; <i8> [#uses=1]
%tmp4 = extractelement <16 x i8> %tmp, i32 9 ; <i8> [#uses=1]
@@ -322,7 +322,7 @@ entry:
define void @VMRG_UNARY_th_l(<8 x i16>* %A, <8 x i16>* %B) {
entry:
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=8]
+ %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=8]
%tmp.upgrd.20 = extractelement <8 x i16> %tmp, i32 4 ; <i16> [#uses=1]
%tmp3 = extractelement <8 x i16> %tmp, i32 4 ; <i16> [#uses=1]
%tmp4 = extractelement <8 x i16> %tmp, i32 5 ; <i16> [#uses=1]
@@ -345,7 +345,7 @@ entry:
define void @VMRG_UNARY_tw_l(<4 x i32>* %A, <4 x i32>* %B) {
entry:
- %tmp = load <4 x i32>* %A ; <<4 x i32>> [#uses=4]
+ %tmp = load <4 x i32>, <4 x i32>* %A ; <<4 x i32>> [#uses=4]
%tmp.upgrd.21 = extractelement <4 x i32> %tmp, i32 2 ; <i32> [#uses=1]
%tmp3 = extractelement <4 x i32> %tmp, i32 2 ; <i32> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp, i32 3 ; <i32> [#uses=1]
@@ -360,7 +360,7 @@ entry:
define void @VMRG_UNARY_tb_h(<16 x i8>* %A, <16 x i8>* %B) {
entry:
- %tmp = load <16 x i8>* %A ; <<16 x i8>> [#uses=16]
+ %tmp = load <16 x i8>, <16 x i8>* %A ; <<16 x i8>> [#uses=16]
%tmp.upgrd.22 = extractelement <16 x i8> %tmp, i32 0 ; <i8> [#uses=1]
%tmp3 = extractelement <16 x i8> %tmp, i32 0 ; <i8> [#uses=1]
%tmp4 = extractelement <16 x i8> %tmp, i32 1 ; <i8> [#uses=1]
@@ -399,7 +399,7 @@ entry:
define void @VMRG_UNARY_th_h(<8 x i16>* %A, <8 x i16>* %B) {
entry:
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=8]
+ %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=8]
%tmp.upgrd.23 = extractelement <8 x i16> %tmp, i32 0 ; <i16> [#uses=1]
%tmp3 = extractelement <8 x i16> %tmp, i32 0 ; <i16> [#uses=1]
%tmp4 = extractelement <8 x i16> %tmp, i32 1 ; <i16> [#uses=1]
@@ -422,7 +422,7 @@ entry:
define void @VMRG_UNARY_tw_h(<4 x i32>* %A, <4 x i32>* %B) {
entry:
- %tmp = load <4 x i32>* %A ; <<4 x i32>> [#uses=4]
+ %tmp = load <4 x i32>, <4 x i32>* %A ; <<4 x i32>> [#uses=4]
%tmp.upgrd.24 = extractelement <4 x i32> %tmp, i32 0 ; <i32> [#uses=1]
%tmp3 = extractelement <4 x i32> %tmp, i32 0 ; <i32> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp, i32 1 ; <i32> [#uses=1]
@@ -437,7 +437,7 @@ entry:
define void @VPCKUHUM_unary(<8 x i16>* %A, <8 x i16>* %B) {
entry:
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=2]
+ %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=2]
%tmp.upgrd.25 = bitcast <8 x i16> %tmp to <16 x i8> ; <<16 x i8>> [#uses=8]
%tmp3 = bitcast <8 x i16> %tmp to <16 x i8> ; <<16 x i8>> [#uses=8]
%tmp.upgrd.26 = extractelement <16 x i8> %tmp.upgrd.25, i32 1 ; <i8> [#uses=1]
@@ -479,7 +479,7 @@ entry:
define void @VPCKUWUM_unary(<4 x i32>* %A, <4 x i32>* %B) {
entry:
- %tmp = load <4 x i32>* %A ; <<4 x i32>> [#uses=2]
+ %tmp = load <4 x i32>, <4 x i32>* %A ; <<4 x i32>> [#uses=2]
%tmp.upgrd.28 = bitcast <4 x i32> %tmp to <8 x i16> ; <<8 x i16>> [#uses=4]
%tmp3 = bitcast <4 x i32> %tmp to <8 x i16> ; <<8 x i16>> [#uses=4]
%tmp.upgrd.29 = extractelement <8 x i16> %tmp.upgrd.28, i32 1 ; <i16> [#uses=1]
diff --git a/test/CodeGen/PowerPC/vec_shuffle_le.ll b/test/CodeGen/PowerPC/vec_shuffle_le.ll
index c7fc1c60c5ea..46d451ff1573 100644
--- a/test/CodeGen/PowerPC/vec_shuffle_le.ll
+++ b/test/CodeGen/PowerPC/vec_shuffle_le.ll
@@ -3,8 +3,8 @@
define void @VPKUHUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VPKUHUM_xy:
- %tmp = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
@@ -16,7 +16,7 @@ entry:
define void @VPKUHUM_xx(<16 x i8>* %A) {
entry:
; CHECK: VPKUHUM_xx:
- %tmp = load <16 x i8>* %A
+ %tmp = load <16 x i8>, <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
; CHECK: vpkuhum
store <16 x i8> %tmp2, <16 x i8>* %A
@@ -26,8 +26,8 @@ entry:
define void @VPKUWUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VPKUWUM_xy:
- %tmp = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 16, i32 17, i32 20, i32 21, i32 24, i32 25, i32 28, i32 29>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
@@ -39,7 +39,7 @@ entry:
define void @VPKUWUM_xx(<16 x i8>* %A) {
entry:
; CHECK: VPKUWUM_xx:
- %tmp = load <16 x i8>* %A
+ %tmp = load <16 x i8>, <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13>
; CHECK: vpkuwum
store <16 x i8> %tmp2, <16 x i8>* %A
@@ -49,8 +49,8 @@ entry:
define void @VMRGLB_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VMRGLB_xy:
- %tmp = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
@@ -62,7 +62,7 @@ entry:
define void @VMRGLB_xx(<16 x i8>* %A) {
entry:
; CHECK: VMRGLB_xx:
- %tmp = load <16 x i8>* %A
+ %tmp = load <16 x i8>, <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
; CHECK: vmrglb
store <16 x i8> %tmp2, <16 x i8>* %A
@@ -72,8 +72,8 @@ entry:
define void @VMRGHB_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VMRGHB_xy:
- %tmp = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
@@ -85,7 +85,7 @@ entry:
define void @VMRGHB_xx(<16 x i8>* %A) {
entry:
; CHECK: VMRGHB_xx:
- %tmp = load <16 x i8>* %A
+ %tmp = load <16 x i8>, <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 8, i32 9, i32 9, i32 10, i32 10, i32 11, i32 11, i32 12, i32 12, i32 13, i32 13, i32 14, i32 14, i32 15, i32 15>
; CHECK: vmrghb
store <16 x i8> %tmp2, <16 x i8>* %A
@@ -95,8 +95,8 @@ entry:
define void @VMRGLH_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VMRGLH_xy:
- %tmp = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 16, i32 17, i32 2, i32 3, i32 18, i32 19, i32 4, i32 5, i32 20, i32 21, i32 6, i32 7, i32 22, i32 23>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
@@ -108,7 +108,7 @@ entry:
define void @VMRGLH_xx(<16 x i8>* %A) {
entry:
; CHECK: VMRGLH_xx:
- %tmp = load <16 x i8>* %A
+ %tmp = load <16 x i8>, <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 4, i32 5, i32 4, i32 5, i32 6, i32 7, i32 6, i32 7>
; CHECK: vmrglh
store <16 x i8> %tmp2, <16 x i8>* %A
@@ -118,8 +118,8 @@ entry:
define void @VMRGHH_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VMRGHH_xy:
- %tmp = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 24, i32 25, i32 10, i32 11, i32 26, i32 27, i32 12, i32 13, i32 28, i32 29, i32 14, i32 15, i32 30, i32 31>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
@@ -131,7 +131,7 @@ entry:
define void @VMRGHH_xx(<16 x i8>* %A) {
entry:
; CHECK: VMRGHH_xx:
- %tmp = load <16 x i8>* %A
+ %tmp = load <16 x i8>, <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 8, i32 9, i32 10, i32 11, i32 10, i32 11, i32 12, i32 13, i32 12, i32 13, i32 14, i32 15, i32 14, i32 15>
; CHECK: vmrghh
store <16 x i8> %tmp2, <16 x i8>* %A
@@ -141,8 +141,8 @@ entry:
define void @VMRGLW_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VMRGLW_xy:
- %tmp = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
@@ -154,7 +154,7 @@ entry:
define void @VMRGLW_xx(<16 x i8>* %A) {
entry:
; CHECK: VMRGLW_xx:
- %tmp = load <16 x i8>* %A
+ %tmp = load <16 x i8>, <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
; CHECK: vmrglw
store <16 x i8> %tmp2, <16 x i8>* %A
@@ -164,8 +164,8 @@ entry:
define void @VMRGHW_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VMRGHW_xy:
- %tmp = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 24, i32 25, i32 26, i32 27, i32 12, i32 13, i32 14, i32 15, i32 28, i32 29, i32 30, i32 31>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
@@ -177,7 +177,7 @@ entry:
define void @VMRGHW_xx(<16 x i8>* %A) {
entry:
; CHECK: VMRGHW_xx:
- %tmp = load <16 x i8>* %A
+ %tmp = load <16 x i8>, <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 12, i32 13, i32 14, i32 15>
; CHECK: vmrghw
store <16 x i8> %tmp2, <16 x i8>* %A
@@ -187,8 +187,8 @@ entry:
define void @VSLDOI_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VSLDOI_xy:
- %tmp = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
@@ -200,7 +200,7 @@ entry:
define void @VSLDOI_xx(<16 x i8>* %A) {
entry:
; CHECK: VSLDOI_xx:
- %tmp = load <16 x i8>* %A
+ %tmp = load <16 x i8>, <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
; CHECK: vsldoi
store <16 x i8> %tmp2, <16 x i8>* %A
diff --git a/test/CodeGen/PowerPC/vec_shuffle_p8vector.ll b/test/CodeGen/PowerPC/vec_shuffle_p8vector.ll
new file mode 100644
index 000000000000..77802348d8e3
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_shuffle_p8vector.ll
@@ -0,0 +1,54 @@
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64-unknown-linux-gnu -mattr=+power8-vector < %s | FileCheck %s
+; RUN: llc -mcpu=pwr7 -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck -check-prefix=CHECK-PWR7 %s
+
+define void @VPKUDUM_unary(<2 x i64>* %A) {
+entry:
+ %tmp = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = bitcast <2 x i64> %tmp to <4 x i32>
+ %tmp3 = extractelement <4 x i32> %tmp2, i32 1
+ %tmp4 = extractelement <4 x i32> %tmp2, i32 3
+ %tmp5 = insertelement <4 x i32> undef, i32 %tmp3, i32 0
+ %tmp6 = insertelement <4 x i32> %tmp5, i32 %tmp4, i32 1
+ %tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 2
+ %tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 3
+ %tmp9 = bitcast <4 x i32> %tmp8 to <2 x i64>
+ store <2 x i64> %tmp9, <2 x i64>* %A
+ ret void
+}
+
+; CHECK-LABEL: @VPKUDUM_unary
+; CHECK-NOT: vperm
+; CHECK-NOT: vmrglw
+; CHECK-NOT: vmrghw
+; CHECK: vpkudum
+; CHECK-PWR7: vmrglw
+; CHECK-PWR7: vmrghw
+; CHECK-PWR7: vmrglw
+
+define void @VPKUDUM(<2 x i64>* %A, <2 x i64>* %B) {
+entry:
+ %tmp = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = bitcast <2 x i64> %tmp to <4 x i32>
+ %tmp3 = load <2 x i64>, <2 x i64>* %B
+ %tmp4 = bitcast <2 x i64> %tmp3 to <4 x i32>
+ %tmp5 = extractelement <4 x i32> %tmp2, i32 1
+ %tmp6 = extractelement <4 x i32> %tmp2, i32 3
+ %tmp7 = extractelement <4 x i32> %tmp4, i32 1
+ %tmp8 = extractelement <4 x i32> %tmp4, i32 3
+ %tmp9 = insertelement <4 x i32> undef, i32 %tmp5, i32 0
+ %tmp10 = insertelement <4 x i32> %tmp9, i32 %tmp6, i32 1
+ %tmp11 = insertelement <4 x i32> %tmp10, i32 %tmp7, i32 2
+ %tmp12 = insertelement <4 x i32> %tmp11, i32 %tmp8, i32 3
+ %tmp13 = bitcast <4 x i32> %tmp12 to <2 x i64>
+ store <2 x i64> %tmp13, <2 x i64>* %A
+ ret void
+}
+
+; CHECK-LABEL: @VPKUDUM
+; CHECK-NOT: vperm
+; CHECK-NOT: vmrglw
+; CHECK-NOT: vmrghw
+; CHECK: vpkudum
+; CHECK-PWR7: vmrglw
+; CHECK-PWR7: vmrghw
+; CHECK-PWR7: vmrglw
diff --git a/test/CodeGen/PowerPC/vec_shuffle_p8vector_le.ll b/test/CodeGen/PowerPC/vec_shuffle_p8vector_le.ll
new file mode 100644
index 000000000000..709388675f64
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_shuffle_p8vector_le.ll
@@ -0,0 +1,43 @@
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu -mattr=+power8-vector < %s | FileCheck %s
+
+define void @VPKUDUM_unary(<2 x i64>* %A) {
+entry:
+ %tmp = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = bitcast <2 x i64> %tmp to <4 x i32>
+ %tmp3 = extractelement <4 x i32> %tmp2, i32 0
+ %tmp4 = extractelement <4 x i32> %tmp2, i32 2
+ %tmp5 = insertelement <4 x i32> undef, i32 %tmp3, i32 0
+ %tmp6 = insertelement <4 x i32> %tmp5, i32 %tmp4, i32 1
+ %tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 2
+ %tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 3
+ %tmp9 = bitcast <4 x i32> %tmp8 to <2 x i64>
+ store <2 x i64> %tmp9, <2 x i64>* %A
+ ret void
+}
+
+; CHECK-LABEL: @VPKUDUM_unary
+; CHECK-NOT: vperm
+; CHECK: vpkudum
+
+define void @VPKUDUM(<2 x i64>* %A, <2 x i64>* %B) {
+entry:
+ %tmp = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = bitcast <2 x i64> %tmp to <4 x i32>
+ %tmp3 = load <2 x i64>, <2 x i64>* %B
+ %tmp4 = bitcast <2 x i64> %tmp3 to <4 x i32>
+ %tmp5 = extractelement <4 x i32> %tmp2, i32 0
+ %tmp6 = extractelement <4 x i32> %tmp2, i32 2
+ %tmp7 = extractelement <4 x i32> %tmp4, i32 0
+ %tmp8 = extractelement <4 x i32> %tmp4, i32 2
+ %tmp9 = insertelement <4 x i32> undef, i32 %tmp5, i32 0
+ %tmp10 = insertelement <4 x i32> %tmp9, i32 %tmp6, i32 1
+ %tmp11 = insertelement <4 x i32> %tmp10, i32 %tmp7, i32 2
+ %tmp12 = insertelement <4 x i32> %tmp11, i32 %tmp8, i32 3
+ %tmp13 = bitcast <4 x i32> %tmp12 to <2 x i64>
+ store <2 x i64> %tmp13, <2 x i64>* %A
+ ret void
+}
+
+; CHECK-LABEL: @VPKUDUM
+; CHECK-NOT: vperm
+; CHECK: vpkudum
diff --git a/test/CodeGen/PowerPC/vec_splat.ll b/test/CodeGen/PowerPC/vec_splat.ll
index 61237284d36c..aeed94c91f40 100644
--- a/test/CodeGen/PowerPC/vec_splat.ll
+++ b/test/CodeGen/PowerPC/vec_splat.ll
@@ -14,7 +14,7 @@ define void @splat(%f4* %P, %f4* %Q, float %X) nounwind {
%tmp2 = insertelement %f4 %tmp, float %X, i32 1 ; <%f4> [#uses=1]
%tmp4 = insertelement %f4 %tmp2, float %X, i32 2 ; <%f4> [#uses=1]
%tmp6 = insertelement %f4 %tmp4, float %X, i32 3 ; <%f4> [#uses=1]
- %q = load %f4* %Q ; <%f4> [#uses=1]
+ %q = load %f4, %f4* %Q ; <%f4> [#uses=1]
%R = fadd %f4 %q, %tmp6 ; <%f4> [#uses=1]
store %f4 %R, %f4* %P
ret void
@@ -25,21 +25,21 @@ define void @splat_i4(%i4* %P, %i4* %Q, i32 %X) nounwind {
%tmp2 = insertelement %i4 %tmp, i32 %X, i32 1 ; <%i4> [#uses=1]
%tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2 ; <%i4> [#uses=1]
%tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3 ; <%i4> [#uses=1]
- %q = load %i4* %Q ; <%i4> [#uses=1]
+ %q = load %i4, %i4* %Q ; <%i4> [#uses=1]
%R = add %i4 %q, %tmp6 ; <%i4> [#uses=1]
store %i4 %R, %i4* %P
ret void
}
define void @splat_imm_i32(%i4* %P, %i4* %Q, i32 %X) nounwind {
- %q = load %i4* %Q ; <%i4> [#uses=1]
+ %q = load %i4, %i4* %Q ; <%i4> [#uses=1]
%R = add %i4 %q, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <%i4> [#uses=1]
store %i4 %R, %i4* %P
ret void
}
define void @splat_imm_i16(%i4* %P, %i4* %Q, i32 %X) nounwind {
- %q = load %i4* %Q ; <%i4> [#uses=1]
+ %q = load %i4, %i4* %Q ; <%i4> [#uses=1]
%R = add %i4 %q, < i32 65537, i32 65537, i32 65537, i32 65537 > ; <%i4> [#uses=1]
store %i4 %R, %i4* %P
ret void
@@ -60,7 +60,7 @@ define void @splat_h(i16 %tmp, <16 x i8>* %dst) nounwind {
}
define void @spltish(<16 x i8>* %A, <16 x i8>* %B) nounwind {
- %tmp = load <16 x i8>* %B ; <<16 x i8>> [#uses=1]
+ %tmp = load <16 x i8>, <16 x i8>* %B ; <<16 x i8>> [#uses=1]
%tmp.s = bitcast <16 x i8> %tmp to <16 x i8> ; <<16 x i8>> [#uses=1]
%tmp4 = sub <16 x i8> %tmp.s, bitcast (<8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16
15, i16 15, i16 15 > to <16 x i8>) ; <<16 x i8>> [#uses=1]
diff --git a/test/CodeGen/PowerPC/vec_splat_constant.ll b/test/CodeGen/PowerPC/vec_splat_constant.ll
index b227794421f2..53676fcbba4a 100644
--- a/test/CodeGen/PowerPC/vec_splat_constant.ll
+++ b/test/CodeGen/PowerPC/vec_splat_constant.ll
@@ -12,8 +12,8 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store <16 x i8> %x, <16 x i8>* %x_addr
store <16 x i8> <i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14>, <16 x i8>* %temp, align 16
- %0 = load <16 x i8>* %x_addr, align 16 ; <<16 x i8>> [#uses=1]
- %1 = load <16 x i8>* %temp, align 16 ; <<16 x i8>> [#uses=1]
+ %0 = load <16 x i8>, <16 x i8>* %x_addr, align 16 ; <<16 x i8>> [#uses=1]
+ %1 = load <16 x i8>, <16 x i8>* %temp, align 16 ; <<16 x i8>> [#uses=1]
%tmp = add <16 x i8> %0, %1 ; <<16 x i8>> [#uses=1]
store <16 x i8> %tmp, <16 x i8>* @baz, align 16
br label %return
diff --git a/test/CodeGen/PowerPC/vec_veqv_vnand_vorc.ll b/test/CodeGen/PowerPC/vec_veqv_vnand_vorc.ll
new file mode 100644
index 000000000000..f7d5a51c11d4
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_veqv_vnand_vorc.ll
@@ -0,0 +1,29 @@
+; Check the miscellaneous logical vector operations added in P8
+;
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
+; Test x eqv y
+define <4 x i32> @test_veqv(<4 x i32> %x, <4 x i32> %y) nounwind {
+ %tmp = xor <4 x i32> %x, %y
+ %ret_val = xor <4 x i32> %tmp, < i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %ret_val
+; CHECK: veqv 2, 2, 3
+}
+
+; Test x vnand y
+define <4 x i32> @test_vnand(<4 x i32> %x, <4 x i32> %y) nounwind {
+ %tmp = and <4 x i32> %x, %y
+ %ret_val = xor <4 x i32> %tmp, <i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %ret_val
+; CHECK: vnand 2, 2, 3
+}
+
+; Test x vorc y and variants
+define <4 x i32> @test_vorc(<4 x i32> %x, <4 x i32> %y) nounwind {
+ %tmp1 = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %tmp2 = or <4 x i32> %x, %tmp1
+; CHECK: vorc 3, 2, 3
+ %tmp3 = xor <4 x i32> %tmp2, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %tmp4 = or <4 x i32> %tmp3, %x
+; CHECK: vorc 2, 2, 3
+ ret <4 x i32> %tmp4
+}
diff --git a/test/CodeGen/PowerPC/vec_zero.ll b/test/CodeGen/PowerPC/vec_zero.ll
index f862b2cb4c4b..aec61fbd6bd6 100644
--- a/test/CodeGen/PowerPC/vec_zero.ll
+++ b/test/CodeGen/PowerPC/vec_zero.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep vxor
define void @foo(<4 x float>* %P) {
- %T = load <4 x float>* %P ; <<4 x float>> [#uses=1]
+ %T = load <4 x float>, <4 x float>* %P ; <<4 x float>> [#uses=1]
%S = fadd <4 x float> zeroinitializer, %T ; <<4 x float>> [#uses=1]
store <4 x float> %S, <4 x float>* %P
ret void
diff --git a/test/CodeGen/PowerPC/vector-identity-shuffle.ll b/test/CodeGen/PowerPC/vector-identity-shuffle.ll
index dfa2e35435a8..35979f68a886 100644
--- a/test/CodeGen/PowerPC/vector-identity-shuffle.ll
+++ b/test/CodeGen/PowerPC/vector-identity-shuffle.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep vperm
define void @test(<4 x float>* %tmp2.i) {
- %tmp2.i.upgrd.1 = load <4 x float>* %tmp2.i ; <<4 x float>> [#uses=4]
+ %tmp2.i.upgrd.1 = load <4 x float>, <4 x float>* %tmp2.i ; <<4 x float>> [#uses=4]
%xFloat0.48 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 0 ; <float> [#uses=1]
%inFloat0.49 = insertelement <4 x float> undef, float %xFloat0.48, i32 0 ; <<4 x float>> [#uses=1]
%xFloat1.50 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 1 ; <float> [#uses=1]
diff --git a/test/CodeGen/PowerPC/vector.ll b/test/CodeGen/PowerPC/vector.ll
index 859a85a14101..723ca54c02ba 100644
--- a/test/CodeGen/PowerPC/vector.ll
+++ b/test/CodeGen/PowerPC/vector.ll
@@ -12,56 +12,56 @@
;;; TEST HANDLING OF VARIOUS VECTOR SIZES
define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) {
- %p = load %f1* %P ; <%f1> [#uses=1]
- %q = load %f1* %Q ; <%f1> [#uses=1]
+ %p = load %f1, %f1* %P ; <%f1> [#uses=1]
+ %q = load %f1, %f1* %Q ; <%f1> [#uses=1]
%R = fadd %f1 %p, %q ; <%f1> [#uses=1]
store %f1 %R, %f1* %S
ret void
}
define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) {
- %p = load %f2* %P ; <%f2> [#uses=1]
- %q = load %f2* %Q ; <%f2> [#uses=1]
+ %p = load %f2, %f2* %P ; <%f2> [#uses=1]
+ %q = load %f2, %f2* %Q ; <%f2> [#uses=1]
%R = fadd %f2 %p, %q ; <%f2> [#uses=1]
store %f2 %R, %f2* %S
ret void
}
define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
- %q = load %f4* %Q ; <%f4> [#uses=1]
+ %p = load %f4, %f4* %P ; <%f4> [#uses=1]
+ %q = load %f4, %f4* %Q ; <%f4> [#uses=1]
%R = fadd %f4 %p, %q ; <%f4> [#uses=1]
store %f4 %R, %f4* %S
ret void
}
define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
+ %p = load %f8, %f8* %P ; <%f8> [#uses=1]
+ %q = load %f8, %f8* %Q ; <%f8> [#uses=1]
%R = fadd %f8 %p, %q ; <%f8> [#uses=1]
store %f8 %R, %f8* %S
ret void
}
define void @test_fmul(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
+ %p = load %f8, %f8* %P ; <%f8> [#uses=1]
+ %q = load %f8, %f8* %Q ; <%f8> [#uses=1]
%R = fmul %f8 %p, %q ; <%f8> [#uses=1]
store %f8 %R, %f8* %S
ret void
}
define void @test_div(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
+ %p = load %f8, %f8* %P ; <%f8> [#uses=1]
+ %q = load %f8, %f8* %Q ; <%f8> [#uses=1]
%R = fdiv %f8 %p, %q ; <%f8> [#uses=1]
store %f8 %R, %f8* %S
ret void
}
define void @test_rem(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
+ %p = load %f8, %f8* %P ; <%f8> [#uses=1]
+ %q = load %f8, %f8* %Q ; <%f8> [#uses=1]
%R = frem %f8 %p, %q ; <%f8> [#uses=1]
store %f8 %R, %f8* %S
ret void
@@ -70,7 +70,7 @@ define void @test_rem(%f8* %P, %f8* %Q, %f8* %S) {
;;; TEST VECTOR CONSTRUCTS
define void @test_cst(%f4* %P, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
+ %p = load %f4, %f4* %P ; <%f4> [#uses=1]
%R = fadd %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float
2.000000e+00, float 4.500000e+00 > ; <%f4> [#uses=1]
store %f4 %R, %f4* %S
@@ -78,14 +78,14 @@ define void @test_cst(%f4* %P, %f4* %S) {
}
define void @test_zero(%f4* %P, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
+ %p = load %f4, %f4* %P ; <%f4> [#uses=1]
%R = fadd %f4 %p, zeroinitializer ; <%f4> [#uses=1]
store %f4 %R, %f4* %S
ret void
}
define void @test_undef(%f4* %P, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
+ %p = load %f4, %f4* %P ; <%f4> [#uses=1]
%R = fadd %f4 %p, undef ; <%f4> [#uses=1]
store %f4 %R, %f4* %S
ret void
@@ -111,19 +111,19 @@ define void @test_scalar_to_vector(float %F, %f4* %S) {
}
define float @test_extract_elt(%f8* %P) {
- %p = load %f8* %P ; <%f8> [#uses=1]
+ %p = load %f8, %f8* %P ; <%f8> [#uses=1]
%R = extractelement %f8 %p, i32 3 ; <float> [#uses=1]
ret float %R
}
define double @test_extract_elt2(%d8* %P) {
- %p = load %d8* %P ; <%d8> [#uses=1]
+ %p = load %d8, %d8* %P ; <%d8> [#uses=1]
%R = extractelement %d8 %p, i32 3 ; <double> [#uses=1]
ret double %R
}
define void @test_cast_1(%f4* %b, %i4* %a) {
- %tmp = load %f4* %b ; <%f4> [#uses=1]
+ %tmp = load %f4, %f4* %b ; <%f4> [#uses=1]
%tmp2 = fadd %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float
3.000000e+00, float 4.000000e+00 > ; <%f4> [#uses=1]
%tmp3 = bitcast %f4 %tmp2 to %i4 ; <%i4> [#uses=1]
@@ -133,7 +133,7 @@ define void @test_cast_1(%f4* %b, %i4* %a) {
}
define void @test_cast_2(%f8* %a, <8 x i32>* %b) {
- %T = load %f8* %a ; <%f8> [#uses=1]
+ %T = load %f8, %f8* %a ; <%f8> [#uses=1]
%T2 = bitcast %f8 %T to <8 x i32>
store <8 x i32> %T2, <8 x i32>* %b
ret void
@@ -147,7 +147,7 @@ define void @splat(%f4* %P, %f4* %Q, float %X) {
%tmp2 = insertelement %f4 %tmp, float %X, i32 1
%tmp4 = insertelement %f4 %tmp2, float %X, i32 2
%tmp6 = insertelement %f4 %tmp4, float %X, i32 3
- %q = load %f4* %Q ; <%f4> [#uses=1]
+ %q = load %f4, %f4* %Q ; <%f4> [#uses=1]
%R = fadd %f4 %q, %tmp6 ; <%f4> [#uses=1]
store %f4 %R, %f4* %P
ret void
@@ -158,7 +158,7 @@ define void @splat_i4(%i4* %P, %i4* %Q, i32 %X) {
%tmp2 = insertelement %i4 %tmp, i32 %X, i32 1
%tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2
%tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3
- %q = load %i4* %Q ; <%i4> [#uses=1]
+ %q = load %i4, %i4* %Q ; <%i4> [#uses=1]
%R = add %i4 %q, %tmp6 ; <%i4> [#uses=1]
store %i4 %R, %i4* %P
ret void
diff --git a/test/CodeGen/PowerPC/vperm-lowering.ll b/test/CodeGen/PowerPC/vperm-lowering.ll
index d55d26c959b6..c78ffdd0c073 100644
--- a/test/CodeGen/PowerPC/vperm-lowering.ll
+++ b/test/CodeGen/PowerPC/vperm-lowering.ll
@@ -9,58 +9,23 @@ define <16 x i8> @foo() nounwind ssp {
}
; CHECK: .LCPI0_0:
-; CHECK: .byte 31
-; CHECK: .byte 26
-; CHECK: .byte 21
-; CHECK: .byte 16
-; CHECK: .byte 11
-; CHECK: .byte 6
-; CHECK: .byte 1
-; CHECK: .byte 28
-; CHECK: .byte 23
-; CHECK: .byte 18
-; CHECK: .byte 13
-; CHECK: .byte 8
-; CHECK: .byte 3
-; CHECK: .byte 30
-; CHECK: .byte 25
-; CHECK: .byte 20
-; CHECK: .LCPI0_1:
; CHECK: .byte 0
-; CHECK: .byte 1
-; CHECK: .byte 2
-; CHECK: .byte 3
-; CHECK: .byte 4
; CHECK: .byte 5
-; CHECK: .byte 6
-; CHECK: .byte 7
-; CHECK: .byte 8
-; CHECK: .byte 9
; CHECK: .byte 10
-; CHECK: .byte 11
-; CHECK: .byte 12
-; CHECK: .byte 13
-; CHECK: .byte 14
; CHECK: .byte 15
-; CHECK: .LCPI0_2:
-; CHECK: .byte 16
-; CHECK: .byte 17
-; CHECK: .byte 18
-; CHECK: .byte 19
; CHECK: .byte 20
-; CHECK: .byte 21
-; CHECK: .byte 22
-; CHECK: .byte 23
-; CHECK: .byte 24
; CHECK: .byte 25
-; CHECK: .byte 26
-; CHECK: .byte 27
-; CHECK: .byte 28
-; CHECK: .byte 29
; CHECK: .byte 30
-; CHECK: .byte 31
+; CHECK: .byte 3
+; CHECK: .byte 8
+; CHECK: .byte 13
+; CHECK: .byte 18
+; CHECK: .byte 23
+; CHECK: .byte 28
+; CHECK: .byte 1
+; CHECK: .byte 6
+; CHECK: .byte 11
; CHECK: foo:
-; CHECK: addis [[REG1:[0-9]+]], 2, .LCPI0_2@toc@ha
-; CHECK: addi [[REG2:[0-9]+]], [[REG1]], .LCPI0_2@toc@l
+; CHECK: addis [[REG1:[0-9]+]], 2, .LCPI0_0@toc@ha
+; CHECK: addi [[REG2:[0-9]+]], [[REG1]], .LCPI0_0@toc@l
; CHECK: lvx [[REG3:[0-9]+]], 0, [[REG2]]
-; CHECK: vperm {{[0-9]+}}, [[REG3]], {{[0-9]+}}, {{[0-9]+}}
diff --git a/test/CodeGen/PowerPC/vsx-div.ll b/test/CodeGen/PowerPC/vsx-div.ll
index 8a9578e5ed80..0e8388543a2f 100644
--- a/test/CodeGen/PowerPC/vsx-div.ll
+++ b/test/CodeGen/PowerPC/vsx-div.ll
@@ -7,7 +7,7 @@
define void @test1() {
entry:
- %0 = load <4 x float>* @vf, align 16
+ %0 = load <4 x float>, <4 x float>* @vf, align 16
%1 = tail call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %0)
store <4 x float> %1, <4 x float>* @vf_res, align 16
ret void
@@ -17,7 +17,7 @@ entry:
define void @test2() {
entry:
- %0 = load <2 x double>* @vd, align 16
+ %0 = load <2 x double>, <2 x double>* @vd, align 16
%1 = tail call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %0)
store <2 x double> %1, <2 x double>* @vd_res, align 16
ret void
diff --git a/test/CodeGen/PowerPC/vsx-elementary-arith.ll b/test/CodeGen/PowerPC/vsx-elementary-arith.ll
new file mode 100644
index 000000000000..d8f76bb989e7
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx-elementary-arith.ll
@@ -0,0 +1,120 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 | FileCheck %s
+@a = global float 3.000000e+00, align 4
+@b = global float 4.000000e+00, align 4
+@c = global double 3.000000e+00, align 8
+@d = global double 4.000000e+00, align 8
+
+; Function Attrs: nounwind
+define float @emit_xsaddsp() {
+entry:
+ %0 = load float, float* @a, align 4
+ %1 = load float, float* @b, align 4
+ %add = fadd float %0, %1
+ ret float %add
+; CHECK-LABEL: @emit_xsaddsp
+; CHECK: xsaddsp {{[0-9]+}}
+}
+
+; Function Attrs: nounwind
+define float @emit_xssubsp() {
+entry:
+ %0 = load float, float* @a, align 4
+ %1 = load float, float* @b, align 4
+ %sub = fsub float %0, %1
+ ret float %sub
+; CHECK-LABEL: @emit_xssubsp
+; CHECK: xssubsp {{[0-9]+}}
+}
+
+; Function Attrs: nounwind
+define float @emit_xsdivsp() {
+entry:
+ %0 = load float, float* @a, align 4
+ %1 = load float, float* @b, align 4
+ %div = fdiv float %0, %1
+ ret float %div
+; CHECK-LABEL: @emit_xsdivsp
+; CHECK: xsdivsp {{[0-9]+}}
+}
+
+; Function Attrs: nounwind
+define float @emit_xsmulsp() {
+entry:
+ %0 = load float, float* @a, align 4
+ %1 = load float, float* @b, align 4
+ %mul = fmul float %0, %1
+ ret float %mul
+; CHECK-LABEL: @emit_xsmulsp
+; CHECK: xsmulsp {{[0-9]+}}
+}
+
+; Function Attrs: nounwind
+define float @emit_xssqrtsp() {
+entry:
+ %0 = load float, float* @b, align 4
+ %call = call float @sqrtf(float %0)
+ ret float %call
+; CHECK-LABEL: @emit_xssqrtsp
+; CHECK: xssqrtsp {{[0-9]+}}
+}
+
+; Function Attrs: nounwind
+declare float @sqrtf(float)
+
+; Function Attrs: nounwind
+define double @emit_xsadddp() {
+entry:
+ %0 = load double, double* @c, align 8
+ %1 = load double, double* @d, align 8
+ %add = fadd double %0, %1
+ ret double %add
+; CHECK-LABEL: @emit_xsadddp
+; CHECK: xsadddp {{[0-9]+}}
+}
+
+; Function Attrs: nounwind
+define double @emit_xssubdp() {
+entry:
+ %0 = load double, double* @c, align 8
+ %1 = load double, double* @d, align 8
+ %sub = fsub double %0, %1
+ ret double %sub
+; CHECK-LABEL: @emit_xssubdp
+; CHECK: xssubdp {{[0-9]+}}
+}
+
+; Function Attrs: nounwind
+define double @emit_xsdivdp() {
+entry:
+ %0 = load double, double* @c, align 8
+ %1 = load double, double* @d, align 8
+ %div = fdiv double %0, %1
+ ret double %div
+; CHECK-LABEL: @emit_xsdivdp
+; CHECK: xsdivdp {{[0-9]+}}
+}
+
+; Function Attrs: nounwind
+define double @emit_xsmuldp() {
+entry:
+ %0 = load double, double* @c, align 8
+ %1 = load double, double* @d, align 8
+ %mul = fmul double %0, %1
+ ret double %mul
+; CHECK-LABEL: @emit_xsmuldp
+; CHECK: xsmuldp {{[0-9]+}}
+}
+
+; Function Attrs: nounwind
+define double @emit_xssqrtdp() {
+entry:
+ %0 = load double, double* @d, align 8
+ %call = call double @sqrt(double %0)
+ ret double %call
+; CHECK-LABEL: @emit_xssqrtdp
+; CHECK: xssqrtdp {{[0-9]+}}
+}
+
+; Function Attrs: nounwind
+declare double @sqrt(double)
diff --git a/test/CodeGen/PowerPC/vsx-fma-m.ll b/test/CodeGen/PowerPC/vsx-fma-m.ll
index c492e169e10f..d85927396e3e 100644
--- a/test/CodeGen/PowerPC/vsx-fma-m.ll
+++ b/test/CodeGen/PowerPC/vsx-fma-m.ll
@@ -12,7 +12,7 @@ entry:
%0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
store double %0, double* %d, align 8
%1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
- %arrayidx1 = getelementptr inbounds double* %d, i64 1
+ %arrayidx1 = getelementptr inbounds double, double* %d, i64 1
store double %1, double* %arrayidx1, align 8
ret void
@@ -39,10 +39,10 @@ entry:
%0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
store double %0, double* %d, align 8
%1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
- %arrayidx1 = getelementptr inbounds double* %d, i64 1
+ %arrayidx1 = getelementptr inbounds double, double* %d, i64 1
store double %1, double* %arrayidx1, align 8
%2 = tail call double @llvm.fma.f64(double %b, double %f, double %a)
- %arrayidx2 = getelementptr inbounds double* %d, i64 2
+ %arrayidx2 = getelementptr inbounds double, double* %d, i64 2
store double %2, double* %arrayidx2, align 8
ret void
@@ -77,12 +77,12 @@ entry:
store double %0, double* %d, align 8
%1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
%2 = tail call double @llvm.fma.f64(double %b, double %c, double %1)
- %arrayidx1 = getelementptr inbounds double* %d, i64 3
+ %arrayidx1 = getelementptr inbounds double, double* %d, i64 3
store double %2, double* %arrayidx1, align 8
%3 = tail call double @llvm.fma.f64(double %b, double %f, double %a)
- %arrayidx2 = getelementptr inbounds double* %d, i64 2
+ %arrayidx2 = getelementptr inbounds double, double* %d, i64 2
store double %3, double* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds double* %d, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %d, i64 1
store double %1, double* %arrayidx3, align 8
ret void
@@ -98,9 +98,9 @@ entry:
; re-ordering the instructions.
; CHECK-DAG: xsmaddadp [[F1]], 2, 3
-; CHECK-DAG: xsmaddmdp 2, 3, 4
+; CHECK-DAG: xsmaddmdp 3, 2, 4
; CHECK-DAG: stxsdx [[F1]], 0, 8
-; CHECK-DAG: stxsdx 2, 8, [[C1]]
+; CHECK-DAG: stxsdx 3, 8, [[C1]]
; CHECK-DAG: stxsdx 1, 8, [[C2]]
; CHECK-DAG: stxsdx 4, 8, [[C3]]
; CHECK: blr
@@ -125,13 +125,13 @@ entry:
%0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
store double %0, double* %d, align 8
%1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
- %arrayidx1 = getelementptr inbounds double* %d, i64 1
+ %arrayidx1 = getelementptr inbounds double, double* %d, i64 1
store double %1, double* %arrayidx1, align 8
%2 = tail call double @llvm.fma.f64(double %b, double %c, double %1)
- %arrayidx3 = getelementptr inbounds double* %d, i64 3
+ %arrayidx3 = getelementptr inbounds double, double* %d, i64 3
store double %2, double* %arrayidx3, align 8
%3 = tail call double @llvm.fma.f64(double %b, double %f, double %a)
- %arrayidx4 = getelementptr inbounds double* %d, i64 2
+ %arrayidx4 = getelementptr inbounds double, double* %d, i64 2
store double %3, double* %arrayidx4, align 8
ret void
@@ -178,7 +178,7 @@ entry:
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
store <2 x double> %0, <2 x double>* %d, align 8
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
- %arrayidx1 = getelementptr inbounds <2 x double>* %d, i64 1
+ %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
store <2 x double> %1, <2 x double>* %arrayidx1, align 8
ret void
@@ -205,10 +205,10 @@ entry:
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
store <2 x double> %0, <2 x double>* %d, align 8
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
- %arrayidx1 = getelementptr inbounds <2 x double>* %d, i64 1
+ %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
store <2 x double> %1, <2 x double>* %arrayidx1, align 8
%2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a)
- %arrayidx2 = getelementptr inbounds <2 x double>* %d, i64 2
+ %arrayidx2 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 2
store <2 x double> %2, <2 x double>* %arrayidx2, align 8
ret void
@@ -243,12 +243,12 @@ entry:
store <2 x double> %0, <2 x double>* %d, align 8
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
%2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %1)
- %arrayidx1 = getelementptr inbounds <2 x double>* %d, i64 3
+ %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 3
store <2 x double> %2, <2 x double>* %arrayidx1, align 8
%3 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a)
- %arrayidx2 = getelementptr inbounds <2 x double>* %d, i64 2
+ %arrayidx2 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 2
store <2 x double> %3, <2 x double>* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds <2 x double>* %d, i64 1
+ %arrayidx3 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
store <2 x double> %1, <2 x double>* %arrayidx3, align 8
ret void
@@ -300,13 +300,13 @@ entry:
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
store <2 x double> %0, <2 x double>* %d, align 8
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
- %arrayidx1 = getelementptr inbounds <2 x double>* %d, i64 1
+ %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
store <2 x double> %1, <2 x double>* %arrayidx1, align 8
%2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %1)
- %arrayidx3 = getelementptr inbounds <2 x double>* %d, i64 3
+ %arrayidx3 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 3
store <2 x double> %2, <2 x double>* %arrayidx3, align 8
%3 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a)
- %arrayidx4 = getelementptr inbounds <2 x double>* %d, i64 2
+ %arrayidx4 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 2
store <2 x double> %3, <2 x double>* %arrayidx4, align 8
ret void
diff --git a/test/CodeGen/PowerPC/vsx-infl-copy1.ll b/test/CodeGen/PowerPC/vsx-infl-copy1.ll
new file mode 100644
index 000000000000..531e3ad2d87c
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx-infl-copy1.ll
@@ -0,0 +1,133 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+@ub = external global [1024 x i32], align 4
+@uc = external global [1024 x i32], align 4
+
+; Function Attrs: noinline nounwind
+define void @_Z8example9Pj() #0 {
+entry:
+ br label %vector.body
+
+; CHECK-LABEL: @_Z8example9Pj
+; CHECK: xxlor
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %vec.phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %43, %vector.body ]
+ %vec.phi20 = phi <4 x i32> [ zeroinitializer, %entry ], [ %44, %vector.body ]
+ %vec.phi21 = phi <4 x i32> [ zeroinitializer, %entry ], [ %45, %vector.body ]
+ %vec.phi23 = phi <4 x i32> [ zeroinitializer, %entry ], [ %46, %vector.body ]
+ %vec.phi24 = phi <4 x i32> [ zeroinitializer, %entry ], [ %47, %vector.body ]
+ %vec.phi25 = phi <4 x i32> [ zeroinitializer, %entry ], [ %48, %vector.body ]
+ %vec.phi26 = phi <4 x i32> [ zeroinitializer, %entry ], [ %49, %vector.body ]
+ %vec.phi27 = phi <4 x i32> [ zeroinitializer, %entry ], [ %50, %vector.body ]
+ %vec.phi28 = phi <4 x i32> [ zeroinitializer, %entry ], [ %51, %vector.body ]
+ %vec.phi29 = phi <4 x i32> [ zeroinitializer, %entry ], [ %52, %vector.body ]
+ %vec.phi30 = phi <4 x i32> [ zeroinitializer, %entry ], [ %53, %vector.body ]
+ %wide.load32 = load <4 x i32>, <4 x i32>* null, align 4
+ %.sum82 = add i64 %index, 24
+ %0 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum82
+ %1 = bitcast i32* %0 to <4 x i32>*
+ %wide.load36 = load <4 x i32>, <4 x i32>* %1, align 4
+ %wide.load37 = load <4 x i32>, <4 x i32>* undef, align 4
+ %.sum84 = add i64 %index, 32
+ %2 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum84
+ %3 = bitcast i32* %2 to <4 x i32>*
+ %wide.load38 = load <4 x i32>, <4 x i32>* %3, align 4
+ %.sum85 = add i64 %index, 36
+ %4 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum85
+ %5 = bitcast i32* %4 to <4 x i32>*
+ %wide.load39 = load <4 x i32>, <4 x i32>* %5, align 4
+ %6 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 undef
+ %7 = bitcast i32* %6 to <4 x i32>*
+ %wide.load40 = load <4 x i32>, <4 x i32>* %7, align 4
+ %.sum87 = add i64 %index, 44
+ %8 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum87
+ %9 = bitcast i32* %8 to <4 x i32>*
+ %wide.load41 = load <4 x i32>, <4 x i32>* %9, align 4
+ %10 = getelementptr inbounds [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %index
+ %11 = bitcast i32* %10 to <4 x i32>*
+ %wide.load42 = load <4 x i32>, <4 x i32>* %11, align 4
+ %.sum8889 = or i64 %index, 4
+ %12 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum8889
+ %13 = bitcast i32* %12 to <4 x i32>*
+ %wide.load43 = load <4 x i32>, <4 x i32>* %13, align 4
+ %.sum9091 = or i64 %index, 8
+ %14 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum9091
+ %15 = bitcast i32* %14 to <4 x i32>*
+ %wide.load44 = load <4 x i32>, <4 x i32>* %15, align 4
+ %.sum94 = add i64 %index, 16
+ %16 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum94
+ %17 = bitcast i32* %16 to <4 x i32>*
+ %wide.load46 = load <4 x i32>, <4 x i32>* %17, align 4
+ %.sum95 = add i64 %index, 20
+ %18 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum95
+ %19 = bitcast i32* %18 to <4 x i32>*
+ %wide.load47 = load <4 x i32>, <4 x i32>* %19, align 4
+ %20 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 undef
+ %21 = bitcast i32* %20 to <4 x i32>*
+ %wide.load48 = load <4 x i32>, <4 x i32>* %21, align 4
+ %.sum97 = add i64 %index, 28
+ %22 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum97
+ %23 = bitcast i32* %22 to <4 x i32>*
+ %wide.load49 = load <4 x i32>, <4 x i32>* %23, align 4
+ %.sum98 = add i64 %index, 32
+ %24 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum98
+ %25 = bitcast i32* %24 to <4 x i32>*
+ %wide.load50 = load <4 x i32>, <4 x i32>* %25, align 4
+ %.sum99 = add i64 %index, 36
+ %26 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum99
+ %27 = bitcast i32* %26 to <4 x i32>*
+ %wide.load51 = load <4 x i32>, <4 x i32>* %27, align 4
+ %.sum100 = add i64 %index, 40
+ %28 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum100
+ %29 = bitcast i32* %28 to <4 x i32>*
+ %wide.load52 = load <4 x i32>, <4 x i32>* %29, align 4
+ %.sum101 = add i64 %index, 44
+ %30 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum101
+ %31 = bitcast i32* %30 to <4 x i32>*
+ %wide.load53 = load <4 x i32>, <4 x i32>* %31, align 4
+ %32 = add <4 x i32> zeroinitializer, %vec.phi
+ %33 = add <4 x i32> zeroinitializer, %vec.phi20
+ %34 = add <4 x i32> %wide.load32, %vec.phi21
+ %35 = add <4 x i32> zeroinitializer, %vec.phi23
+ %36 = add <4 x i32> zeroinitializer, %vec.phi24
+ %37 = add <4 x i32> %wide.load36, %vec.phi25
+ %38 = add <4 x i32> %wide.load37, %vec.phi26
+ %39 = add <4 x i32> %wide.load38, %vec.phi27
+ %40 = add <4 x i32> %wide.load39, %vec.phi28
+ %41 = add <4 x i32> %wide.load40, %vec.phi29
+ %42 = add <4 x i32> %wide.load41, %vec.phi30
+ %43 = sub <4 x i32> %32, %wide.load42
+ %44 = sub <4 x i32> %33, %wide.load43
+ %45 = sub <4 x i32> %34, %wide.load44
+ %46 = sub <4 x i32> %35, %wide.load46
+ %47 = sub <4 x i32> %36, %wide.load47
+ %48 = sub <4 x i32> %37, %wide.load48
+ %49 = sub <4 x i32> %38, %wide.load49
+ %50 = sub <4 x i32> %39, %wide.load50
+ %51 = sub <4 x i32> %40, %wide.load51
+ %52 = sub <4 x i32> %41, %wide.load52
+ %53 = sub <4 x i32> %42, %wide.load53
+ %index.next = add i64 %index, 48
+ br i1 false, label %middle.block, label %vector.body
+
+middle.block: ; preds = %vector.body
+ %.lcssa112 = phi <4 x i32> [ %53, %vector.body ]
+ %.lcssa111 = phi <4 x i32> [ %52, %vector.body ]
+ %.lcssa110 = phi <4 x i32> [ %51, %vector.body ]
+ %.lcssa109 = phi <4 x i32> [ %50, %vector.body ]
+ %.lcssa108 = phi <4 x i32> [ %49, %vector.body ]
+ %.lcssa107 = phi <4 x i32> [ %48, %vector.body ]
+ %.lcssa106 = phi <4 x i32> [ %47, %vector.body ]
+ %.lcssa105 = phi <4 x i32> [ %46, %vector.body ]
+ %.lcssa103 = phi <4 x i32> [ %45, %vector.body ]
+ %.lcssa102 = phi <4 x i32> [ %44, %vector.body ]
+ %.lcssa = phi <4 x i32> [ %43, %vector.body ]
+ ret void
+}
+
+attributes #0 = { noinline nounwind }
+
diff --git a/test/CodeGen/PowerPC/vsx-infl-copy2.ll b/test/CodeGen/PowerPC/vsx-infl-copy2.ll
new file mode 100644
index 000000000000..32d6f1e68bd8
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx-infl-copy2.ll
@@ -0,0 +1,114 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @_Z28test_goto_loop_unroll_factorILi22EiEvPKT0_iPKc(i32* nocapture readonly %first) #0 {
+entry:
+ br i1 false, label %loop2_start, label %if.end5
+
+; CHECK-LABEL: @_Z28test_goto_loop_unroll_factorILi22EiEvPKT0_iPKc
+
+loop2_start: ; preds = %loop2_start, %entry
+ br i1 undef, label %loop2_start, label %if.then.i31
+
+if.end5: ; preds = %entry
+ br i1 undef, label %loop_start.preheader, label %if.then.i31
+
+loop_start.preheader: ; preds = %if.end5
+ br i1 false, label %middle.block, label %vector.body
+
+vector.body: ; preds = %vector.body, %loop_start.preheader
+ %vec.phi61 = phi <4 x i32> [ %34, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi62 = phi <4 x i32> [ %35, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi63 = phi <4 x i32> [ %36, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi65 = phi <4 x i32> [ %37, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi67 = phi <4 x i32> [ %38, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi68 = phi <4 x i32> [ %39, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi69 = phi <4 x i32> [ %40, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi70 = phi <4 x i32> [ %41, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi71 = phi <4 x i32> [ %42, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %.sum = add i64 0, 4
+ %wide.load72 = load <4 x i32>, <4 x i32>* null, align 4
+ %.sum109 = add i64 0, 8
+ %0 = getelementptr i32, i32* %first, i64 %.sum109
+ %1 = bitcast i32* %0 to <4 x i32>*
+ %wide.load73 = load <4 x i32>, <4 x i32>* %1, align 4
+ %.sum110 = add i64 0, 12
+ %2 = getelementptr i32, i32* %first, i64 %.sum110
+ %3 = bitcast i32* %2 to <4 x i32>*
+ %wide.load74 = load <4 x i32>, <4 x i32>* %3, align 4
+ %.sum112 = add i64 0, 20
+ %4 = getelementptr i32, i32* %first, i64 %.sum112
+ %5 = bitcast i32* %4 to <4 x i32>*
+ %wide.load76 = load <4 x i32>, <4 x i32>* %5, align 4
+ %.sum114 = add i64 0, 28
+ %6 = getelementptr i32, i32* %first, i64 %.sum114
+ %7 = bitcast i32* %6 to <4 x i32>*
+ %wide.load78 = load <4 x i32>, <4 x i32>* %7, align 4
+ %.sum115 = add i64 0, 32
+ %8 = getelementptr i32, i32* %first, i64 %.sum115
+ %9 = bitcast i32* %8 to <4 x i32>*
+ %wide.load79 = load <4 x i32>, <4 x i32>* %9, align 4
+ %.sum116 = add i64 0, 36
+ %10 = getelementptr i32, i32* %first, i64 %.sum116
+ %11 = bitcast i32* %10 to <4 x i32>*
+ %wide.load80 = load <4 x i32>, <4 x i32>* %11, align 4
+ %.sum117 = add i64 0, 40
+ %12 = getelementptr i32, i32* %first, i64 %.sum117
+ %13 = bitcast i32* %12 to <4 x i32>*
+ %wide.load81 = load <4 x i32>, <4 x i32>* %13, align 4
+ %.sum118 = add i64 0, 44
+ %14 = getelementptr i32, i32* %first, i64 %.sum118
+ %15 = bitcast i32* %14 to <4 x i32>*
+ %wide.load82 = load <4 x i32>, <4 x i32>* %15, align 4
+ %16 = mul <4 x i32> %wide.load72, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %17 = mul <4 x i32> %wide.load73, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %18 = mul <4 x i32> %wide.load74, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %19 = mul <4 x i32> %wide.load76, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %20 = mul <4 x i32> %wide.load78, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %21 = mul <4 x i32> %wide.load79, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %22 = mul <4 x i32> %wide.load80, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %23 = mul <4 x i32> %wide.load81, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %24 = mul <4 x i32> %wide.load82, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %25 = add <4 x i32> %16, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %26 = add <4 x i32> %17, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %27 = add <4 x i32> %18, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %28 = add <4 x i32> %19, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %29 = add <4 x i32> %20, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %30 = add <4 x i32> %21, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %31 = add <4 x i32> %22, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %32 = add <4 x i32> %23, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %33 = add <4 x i32> %24, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %34 = add nsw <4 x i32> %25, %vec.phi61
+ %35 = add nsw <4 x i32> %26, %vec.phi62
+ %36 = add nsw <4 x i32> %27, %vec.phi63
+ %37 = add nsw <4 x i32> %28, %vec.phi65
+ %38 = add nsw <4 x i32> %29, %vec.phi67
+ %39 = add nsw <4 x i32> %30, %vec.phi68
+ %40 = add nsw <4 x i32> %31, %vec.phi69
+ %41 = add nsw <4 x i32> %32, %vec.phi70
+ %42 = add nsw <4 x i32> %33, %vec.phi71
+ br i1 false, label %middle.block, label %vector.body
+
+middle.block: ; preds = %vector.body, %loop_start.preheader
+ %rdx.vec.exit.phi85 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %34, %vector.body ]
+ %rdx.vec.exit.phi86 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %35, %vector.body ]
+ %rdx.vec.exit.phi87 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %36, %vector.body ]
+ %rdx.vec.exit.phi89 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %37, %vector.body ]
+ %rdx.vec.exit.phi91 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %38, %vector.body ]
+ %rdx.vec.exit.phi92 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %39, %vector.body ]
+ %rdx.vec.exit.phi93 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %40, %vector.body ]
+ %rdx.vec.exit.phi94 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %41, %vector.body ]
+ %rdx.vec.exit.phi95 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %42, %vector.body ]
+ br i1 false, label %if.then.i31, label %loop_start.prol
+
+loop_start.prol: ; preds = %loop_start.prol, %middle.block
+ br label %loop_start.prol
+
+if.then.i31: ; preds = %middle.block, %if.end5, %loop2_start
+ unreachable
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll b/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll
index 7367672eab8b..d6940e46df37 100644
--- a/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll
+++ b/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll
@@ -1,7 +1,6 @@
; RUN: llc -mcpu=pwr8 -mattr=+vsx -O2 -mtriple=powerpc64le-unknown-linux-gnu < %s > %t
; RUN: grep lxvd2x < %t | count 18
; RUN: grep stxvd2x < %t | count 18
-; RUN: grep xxpermdi < %t | count 36
@vf = global <4 x float> <float -1.500000e+00, float 2.500000e+00, float -3.500000e+00, float 4.500000e+00>, align 16
@vd = global <2 x double> <double 3.500000e+00, double -7.500000e+00>, align 16
@@ -51,117 +50,117 @@ entry:
%__b.addr.i = alloca <4 x i32>*, align 8
store i32 0, i32* %__a.addr.i, align 4
store <4 x i32>* @vsi, <4 x i32>** %__b.addr.i, align 8
- %0 = load i32* %__a.addr.i, align 4
- %1 = load <4 x i32>** %__b.addr.i, align 8
+ %0 = load i32, i32* %__a.addr.i, align 4
+ %1 = load <4 x i32>*, <4 x i32>** %__b.addr.i, align 8
%2 = bitcast <4 x i32>* %1 to i8*
- %3 = getelementptr i8* %2, i32 %0
+ %3 = getelementptr i8, i8* %2, i32 %0
%4 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %3)
store <4 x i32> %4, <4 x i32>* @res_vsi, align 16
store i32 0, i32* %__a.addr.i31, align 4
store <4 x i32>* @vui, <4 x i32>** %__b.addr.i32, align 8
- %5 = load i32* %__a.addr.i31, align 4
- %6 = load <4 x i32>** %__b.addr.i32, align 8
+ %5 = load i32, i32* %__a.addr.i31, align 4
+ %6 = load <4 x i32>*, <4 x i32>** %__b.addr.i32, align 8
%7 = bitcast <4 x i32>* %6 to i8*
- %8 = getelementptr i8* %7, i32 %5
+ %8 = getelementptr i8, i8* %7, i32 %5
%9 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %8)
store <4 x i32> %9, <4 x i32>* @res_vui, align 16
store i32 0, i32* %__a.addr.i29, align 4
store <4 x float>* @vf, <4 x float>** %__b.addr.i30, align 8
- %10 = load i32* %__a.addr.i29, align 4
- %11 = load <4 x float>** %__b.addr.i30, align 8
+ %10 = load i32, i32* %__a.addr.i29, align 4
+ %11 = load <4 x float>*, <4 x float>** %__b.addr.i30, align 8
%12 = bitcast <4 x float>* %11 to i8*
- %13 = getelementptr i8* %12, i32 %10
+ %13 = getelementptr i8, i8* %12, i32 %10
%14 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %13)
%15 = bitcast <4 x i32> %14 to <4 x float>
store <4 x float> %15, <4 x float>* @res_vf, align 16
store i32 0, i32* %__a.addr.i27, align 4
store <2 x i64>* @vsll, <2 x i64>** %__b.addr.i28, align 8
- %16 = load i32* %__a.addr.i27, align 4
- %17 = load <2 x i64>** %__b.addr.i28, align 8
+ %16 = load i32, i32* %__a.addr.i27, align 4
+ %17 = load <2 x i64>*, <2 x i64>** %__b.addr.i28, align 8
%18 = bitcast <2 x i64>* %17 to i8*
- %19 = getelementptr i8* %18, i32 %16
+ %19 = getelementptr i8, i8* %18, i32 %16
%20 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %19)
%21 = bitcast <2 x double> %20 to <2 x i64>
store <2 x i64> %21, <2 x i64>* @res_vsll, align 16
store i32 0, i32* %__a.addr.i25, align 4
store <2 x i64>* @vull, <2 x i64>** %__b.addr.i26, align 8
- %22 = load i32* %__a.addr.i25, align 4
- %23 = load <2 x i64>** %__b.addr.i26, align 8
+ %22 = load i32, i32* %__a.addr.i25, align 4
+ %23 = load <2 x i64>*, <2 x i64>** %__b.addr.i26, align 8
%24 = bitcast <2 x i64>* %23 to i8*
- %25 = getelementptr i8* %24, i32 %22
+ %25 = getelementptr i8, i8* %24, i32 %22
%26 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %25)
%27 = bitcast <2 x double> %26 to <2 x i64>
store <2 x i64> %27, <2 x i64>* @res_vull, align 16
store i32 0, i32* %__a.addr.i23, align 4
store <2 x double>* @vd, <2 x double>** %__b.addr.i24, align 8
- %28 = load i32* %__a.addr.i23, align 4
- %29 = load <2 x double>** %__b.addr.i24, align 8
+ %28 = load i32, i32* %__a.addr.i23, align 4
+ %29 = load <2 x double>*, <2 x double>** %__b.addr.i24, align 8
%30 = bitcast <2 x double>* %29 to i8*
- %31 = getelementptr i8* %30, i32 %28
+ %31 = getelementptr i8, i8* %30, i32 %28
%32 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %31)
store <2 x double> %32, <2 x double>* @res_vd, align 16
- %33 = load <4 x i32>* @vsi, align 16
+ %33 = load <4 x i32>, <4 x i32>* @vsi, align 16
store <4 x i32> %33, <4 x i32>* %__a.addr.i20, align 16
store i32 0, i32* %__b.addr.i21, align 4
store <4 x i32>* @res_vsi, <4 x i32>** %__c.addr.i22, align 8
- %34 = load <4 x i32>* %__a.addr.i20, align 16
- %35 = load i32* %__b.addr.i21, align 4
- %36 = load <4 x i32>** %__c.addr.i22, align 8
+ %34 = load <4 x i32>, <4 x i32>* %__a.addr.i20, align 16
+ %35 = load i32, i32* %__b.addr.i21, align 4
+ %36 = load <4 x i32>*, <4 x i32>** %__c.addr.i22, align 8
%37 = bitcast <4 x i32>* %36 to i8*
- %38 = getelementptr i8* %37, i32 %35
+ %38 = getelementptr i8, i8* %37, i32 %35
call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %34, i8* %38)
- %39 = load <4 x i32>* @vui, align 16
+ %39 = load <4 x i32>, <4 x i32>* @vui, align 16
store <4 x i32> %39, <4 x i32>* %__a.addr.i17, align 16
store i32 0, i32* %__b.addr.i18, align 4
store <4 x i32>* @res_vui, <4 x i32>** %__c.addr.i19, align 8
- %40 = load <4 x i32>* %__a.addr.i17, align 16
- %41 = load i32* %__b.addr.i18, align 4
- %42 = load <4 x i32>** %__c.addr.i19, align 8
+ %40 = load <4 x i32>, <4 x i32>* %__a.addr.i17, align 16
+ %41 = load i32, i32* %__b.addr.i18, align 4
+ %42 = load <4 x i32>*, <4 x i32>** %__c.addr.i19, align 8
%43 = bitcast <4 x i32>* %42 to i8*
- %44 = getelementptr i8* %43, i32 %41
+ %44 = getelementptr i8, i8* %43, i32 %41
call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %40, i8* %44)
- %45 = load <4 x float>* @vf, align 16
+ %45 = load <4 x float>, <4 x float>* @vf, align 16
store <4 x float> %45, <4 x float>* %__a.addr.i14, align 16
store i32 0, i32* %__b.addr.i15, align 4
store <4 x float>* @res_vf, <4 x float>** %__c.addr.i16, align 8
- %46 = load <4 x float>* %__a.addr.i14, align 16
+ %46 = load <4 x float>, <4 x float>* %__a.addr.i14, align 16
%47 = bitcast <4 x float> %46 to <4 x i32>
- %48 = load i32* %__b.addr.i15, align 4
- %49 = load <4 x float>** %__c.addr.i16, align 8
+ %48 = load i32, i32* %__b.addr.i15, align 4
+ %49 = load <4 x float>*, <4 x float>** %__c.addr.i16, align 8
%50 = bitcast <4 x float>* %49 to i8*
- %51 = getelementptr i8* %50, i32 %48
+ %51 = getelementptr i8, i8* %50, i32 %48
call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %47, i8* %51) #1
- %52 = load <2 x i64>* @vsll, align 16
+ %52 = load <2 x i64>, <2 x i64>* @vsll, align 16
store <2 x i64> %52, <2 x i64>* %__a.addr.i11, align 16
store i32 0, i32* %__b.addr.i12, align 4
store <2 x i64>* @res_vsll, <2 x i64>** %__c.addr.i13, align 8
- %53 = load <2 x i64>* %__a.addr.i11, align 16
+ %53 = load <2 x i64>, <2 x i64>* %__a.addr.i11, align 16
%54 = bitcast <2 x i64> %53 to <2 x double>
- %55 = load i32* %__b.addr.i12, align 4
- %56 = load <2 x i64>** %__c.addr.i13, align 8
+ %55 = load i32, i32* %__b.addr.i12, align 4
+ %56 = load <2 x i64>*, <2 x i64>** %__c.addr.i13, align 8
%57 = bitcast <2 x i64>* %56 to i8*
- %58 = getelementptr i8* %57, i32 %55
+ %58 = getelementptr i8, i8* %57, i32 %55
call void @llvm.ppc.vsx.stxvd2x(<2 x double> %54, i8* %58)
- %59 = load <2 x i64>* @vull, align 16
+ %59 = load <2 x i64>, <2 x i64>* @vull, align 16
store <2 x i64> %59, <2 x i64>* %__a.addr.i8, align 16
store i32 0, i32* %__b.addr.i9, align 4
store <2 x i64>* @res_vull, <2 x i64>** %__c.addr.i10, align 8
- %60 = load <2 x i64>* %__a.addr.i8, align 16
+ %60 = load <2 x i64>, <2 x i64>* %__a.addr.i8, align 16
%61 = bitcast <2 x i64> %60 to <2 x double>
- %62 = load i32* %__b.addr.i9, align 4
- %63 = load <2 x i64>** %__c.addr.i10, align 8
+ %62 = load i32, i32* %__b.addr.i9, align 4
+ %63 = load <2 x i64>*, <2 x i64>** %__c.addr.i10, align 8
%64 = bitcast <2 x i64>* %63 to i8*
- %65 = getelementptr i8* %64, i32 %62
+ %65 = getelementptr i8, i8* %64, i32 %62
call void @llvm.ppc.vsx.stxvd2x(<2 x double> %61, i8* %65)
- %66 = load <2 x double>* @vd, align 16
+ %66 = load <2 x double>, <2 x double>* @vd, align 16
store <2 x double> %66, <2 x double>* %__a.addr.i6, align 16
store i32 0, i32* %__b.addr.i7, align 4
store <2 x double>* @res_vd, <2 x double>** %__c.addr.i, align 8
- %67 = load <2 x double>* %__a.addr.i6, align 16
- %68 = load i32* %__b.addr.i7, align 4
- %69 = load <2 x double>** %__c.addr.i, align 8
+ %67 = load <2 x double>, <2 x double>* %__a.addr.i6, align 16
+ %68 = load i32, i32* %__b.addr.i7, align 4
+ %69 = load <2 x double>*, <2 x double>** %__c.addr.i, align 8
%70 = bitcast <2 x double>* %69 to i8*
- %71 = getelementptr i8* %70, i32 %68
+ %71 = getelementptr i8, i8* %70, i32 %68
call void @llvm.ppc.vsx.stxvd2x(<2 x double> %67, i8* %71)
ret void
}
diff --git a/test/CodeGen/PowerPC/vsx-ldst.ll b/test/CodeGen/PowerPC/vsx-ldst.ll
index 688187d1fcb6..7f12b0480e13 100644
--- a/test/CodeGen/PowerPC/vsx-ldst.ll
+++ b/test/CodeGen/PowerPC/vsx-ldst.ll
@@ -12,7 +12,6 @@
; RUN: llc -mcpu=pwr8 -mattr=+vsx -O2 -mtriple=powerpc64le-unknown-linux-gnu < %s > %t
; RUN: grep lxvd2x < %t | count 6
; RUN: grep stxvd2x < %t | count 6
-; RUN: grep xxpermdi < %t | count 12
@vsi = global <4 x i32> <i32 -1, i32 2, i32 -3, i32 4>, align 16
@vui = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
@@ -30,12 +29,12 @@
; Function Attrs: nounwind
define void @test1() {
entry:
- %0 = load <4 x i32>* @vsi, align 16
- %1 = load <4 x i32>* @vui, align 16
- %2 = load <4 x i32>* bitcast (<4 x float>* @vf to <4 x i32>*), align 16
- %3 = load <2 x double>* bitcast (<2 x i64>* @vsll to <2 x double>*), align 16
- %4 = load <2 x double>* bitcast (<2 x i64>* @vull to <2 x double>*), align 16
- %5 = load <2 x double>* @vd, align 16
+ %0 = load <4 x i32>, <4 x i32>* @vsi, align 16
+ %1 = load <4 x i32>, <4 x i32>* @vui, align 16
+ %2 = load <4 x i32>, <4 x i32>* bitcast (<4 x float>* @vf to <4 x i32>*), align 16
+ %3 = load <2 x double>, <2 x double>* bitcast (<2 x i64>* @vsll to <2 x double>*), align 16
+ %4 = load <2 x double>, <2 x double>* bitcast (<2 x i64>* @vull to <2 x double>*), align 16
+ %5 = load <2 x double>, <2 x double>* @vd, align 16
store <4 x i32> %0, <4 x i32>* @res_vsi, align 16
store <4 x i32> %1, <4 x i32>* @res_vui, align 16
store <4 x i32> %2, <4 x i32>* bitcast (<4 x float>* @res_vf to <4 x i32>*), align 16
diff --git a/test/CodeGen/PowerPC/vsx-minmax.ll b/test/CodeGen/PowerPC/vsx-minmax.ll
index 47f50abbc2a2..ad72cacae2a0 100644
--- a/test/CodeGen/PowerPC/vsx-minmax.ll
+++ b/test/CodeGen/PowerPC/vsx-minmax.ll
@@ -18,35 +18,35 @@ target triple = "powerpc64-unknown-linux-gnu"
define void @test1() #0 {
; CHECK-LABEL: @test1
entry:
- %0 = load volatile <4 x float>* @vf, align 16
- %1 = load volatile <4 x float>* @vf, align 16
+ %0 = load volatile <4 x float>, <4 x float>* @vf, align 16
+ %1 = load volatile <4 x float>, <4 x float>* @vf, align 16
%2 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %0, <4 x float> %1)
; CHECK: xvmaxsp
store <4 x float> %2, <4 x float>* @vf1, align 16
- %3 = load <2 x double>* @vd, align 16
+ %3 = load <2 x double>, <2 x double>* @vd, align 16
%4 = tail call <2 x double> @llvm.ppc.vsx.xvmaxdp(<2 x double> %3, <2 x double> %3)
; CHECK: xvmaxdp
store <2 x double> %4, <2 x double>* @vd1, align 16
- %5 = load volatile <4 x float>* @vf, align 16
- %6 = load volatile <4 x float>* @vf, align 16
+ %5 = load volatile <4 x float>, <4 x float>* @vf, align 16
+ %6 = load volatile <4 x float>, <4 x float>* @vf, align 16
%7 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %5, <4 x float> %6)
; CHECK: xvmaxsp
store <4 x float> %7, <4 x float>* @vf2, align 16
- %8 = load volatile <4 x float>* @vf, align 16
- %9 = load volatile <4 x float>* @vf, align 16
+ %8 = load volatile <4 x float>, <4 x float>* @vf, align 16
+ %9 = load volatile <4 x float>, <4 x float>* @vf, align 16
%10 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %8, <4 x float> %9)
; CHECK: xvminsp
store <4 x float> %10, <4 x float>* @vf3, align 16
- %11 = load <2 x double>* @vd, align 16
+ %11 = load <2 x double>, <2 x double>* @vd, align 16
%12 = tail call <2 x double> @llvm.ppc.vsx.xvmindp(<2 x double> %11, <2 x double> %11)
; CHECK: xvmindp
store <2 x double> %12, <2 x double>* @vd2, align 16
- %13 = load volatile <4 x float>* @vf, align 16
- %14 = load volatile <4 x float>* @vf, align 16
+ %13 = load volatile <4 x float>, <4 x float>* @vf, align 16
+ %14 = load volatile <4 x float>, <4 x float>* @vf, align 16
%15 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %13, <4 x float> %14)
; CHECK: xvminsp
store <4 x float> %15, <4 x float>* @vf4, align 16
- %16 = load double* @d, align 8
+ %16 = load double, double* @d, align 8
%17 = tail call double @llvm.ppc.vsx.xsmaxdp(double %16, double %16)
; CHECK: xsmaxdp
store double %17, double* @d1, align 8
diff --git a/test/CodeGen/PowerPC/vsx-p8.ll b/test/CodeGen/PowerPC/vsx-p8.ll
index d5a19059c60d..878714baab72 100644
--- a/test/CodeGen/PowerPC/vsx-p8.ll
+++ b/test/CodeGen/PowerPC/vsx-p8.ll
@@ -8,7 +8,7 @@ target triple = "powerpc64-unknown-linux-gnu"
; Unaligned loads/stores on P8 and later should use VSX where possible.
define <2 x double> @test28u(<2 x double>* %a) {
- %v = load <2 x double>* %a, align 8
+ %v = load <2 x double>, <2 x double>* %a, align 8
ret <2 x double> %v
; CHECK-LABEL: @test28u
@@ -26,7 +26,7 @@ define void @test29u(<2 x double>* %a, <2 x double> %b) {
}
define <4 x float> @test32u(<4 x float>* %a) {
- %v = load <4 x float>* %a, align 8
+ %v = load <4 x float>, <4 x float>* %a, align 8
ret <4 x float> %v
; CHECK-REG-LABEL: @test32u
diff --git a/test/CodeGen/PowerPC/vsx-recip-est.ll b/test/CodeGen/PowerPC/vsx-recip-est.ll
new file mode 100644
index 000000000000..f589c6c103e8
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx-recip-est.ll
@@ -0,0 +1,62 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -enable-unsafe-fp-math | FileCheck %s
+@a = global float 3.000000e+00, align 4
+@b = global float 4.000000e+00, align 4
+@c = global double 3.000000e+00, align 8
+@d = global double 4.000000e+00, align 8
+
+; Function Attrs: nounwind
+define float @emit_xsresp() {
+entry:
+ %0 = load float, float* @a, align 4
+ %1 = load float, float* @b, align 4
+ %div = fdiv fast float %0, %1
+ ret float %div
+; CHECK-LABEL: @emit_xsresp
+; CHECK: xsresp {{[0-9]+}}
+}
+
+; Function Attrs: nounwind
+define float @emit_xsrsqrtesp(float %f) {
+entry:
+ %f.addr = alloca float, align 4
+ store float %f, float* %f.addr, align 4
+ %0 = load float, float* %f.addr, align 4
+ %1 = load float, float* @b, align 4
+ %2 = call float @llvm.sqrt.f32(float %1)
+ %div = fdiv fast float %0, %2
+ ret float %div
+; CHECK-LABEL: @emit_xsrsqrtesp
+; CHECK: xsrsqrtesp {{[0-9]+}}
+}
+
+; Function Attrs: nounwind readnone
+declare float @llvm.sqrt.f32(float)
+
+; Function Attrs: nounwind
+define double @emit_xsredp() {
+entry:
+ %0 = load double, double* @c, align 8
+ %1 = load double, double* @d, align 8
+ %div = fdiv fast double %0, %1
+ ret double %div
+; CHECK-LABEL: @emit_xsredp
+; CHECK: xsredp {{[0-9]+}}
+}
+
+; Function Attrs: nounwind
+define double @emit_xsrsqrtedp(double %f) {
+entry:
+ %f.addr = alloca double, align 8
+ store double %f, double* %f.addr, align 8
+ %0 = load double, double* %f.addr, align 8
+ %1 = load double, double* @d, align 8
+ %2 = call double @llvm.sqrt.f64(double %1)
+ %div = fdiv fast double %0, %2
+ ret double %div
+; CHECK-LABEL: @emit_xsrsqrtedp
+; CHECK: xsrsqrtedp {{[0-9]+}}
+}
+
+; Function Attrs: nounwind readnone
+declare double @llvm.sqrt.f64(double) #1
diff --git a/test/CodeGen/PowerPC/vsx-spill-norwstore.ll b/test/CodeGen/PowerPC/vsx-spill-norwstore.ll
new file mode 100644
index 000000000000..77b6cb29b24b
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx-spill-norwstore.ll
@@ -0,0 +1,63 @@
+; RUN: llc -mcpu=pwr7 -verify-machineinstrs < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+@.str1 = external unnamed_addr constant [5 x i8], align 1
+@.str10 = external unnamed_addr constant [9 x i8], align 1
+
+; Function Attrs: nounwind
+define void @main() #0 {
+; CHECK-LABEL: @main
+; Make sure that the stxvd2x passes -verify-machineinstrs
+; CHECK: stxvd2x
+
+entry:
+ %0 = tail call <8 x i16> @llvm.ppc.altivec.vupkhsb(<16 x i8> <i8 0, i8 -1, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1>) #0
+ %1 = tail call <8 x i16> @llvm.ppc.altivec.vupklsb(<16 x i8> <i8 0, i8 -1, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1>) #0
+ br i1 false, label %if.then.i68.i, label %check.exit69.i
+
+if.then.i68.i: ; preds = %entry
+ unreachable
+
+check.exit69.i: ; preds = %entry
+ br i1 undef, label %if.then.i63.i, label %check.exit64.i
+
+if.then.i63.i: ; preds = %check.exit69.i
+ tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str10, i64 0, i64 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str1, i64 0, i64 0)) #0
+ br label %check.exit64.i
+
+check.exit64.i: ; preds = %if.then.i63.i, %check.exit69.i
+ %2 = tail call i32 @llvm.ppc.altivec.vcmpequh.p(i32 2, <8 x i16> %0, <8 x i16> <i16 0, i16 -1, i16 -1, i16 0, i16 0, i16 0, i16 -1, i16 0>) #0
+ %tobool.i55.i = icmp eq i32 %2, 0
+ br i1 %tobool.i55.i, label %if.then.i58.i, label %check.exit59.i
+
+if.then.i58.i: ; preds = %check.exit64.i
+ unreachable
+
+check.exit59.i: ; preds = %check.exit64.i
+ %3 = tail call i32 @llvm.ppc.altivec.vcmpequh.p(i32 2, <8 x i16> %1, <8 x i16> <i16 -1, i16 0, i16 0, i16 -1, i16 -1, i16 -1, i16 0, i16 -1>) #0
+ %tobool.i50.i = icmp eq i32 %3, 0
+ br i1 %tobool.i50.i, label %if.then.i53.i, label %check.exit54.i
+
+if.then.i53.i: ; preds = %check.exit59.i
+ unreachable
+
+check.exit54.i: ; preds = %check.exit59.i
+ unreachable
+}
+
+; Function Attrs: nounwind readnone
+declare <8 x i16> @llvm.ppc.altivec.vupkhsb(<16 x i8>) #1
+
+; Function Attrs: nounwind readnone
+declare <8 x i16> @llvm.ppc.altivec.vupklsb(<16 x i8>) #1
+
+; Function Attrs: nounwind
+declare void @printf(i8* nocapture readonly, ...) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.ppc.altivec.vcmpequh.p(i32, <8 x i16>, <8 x i16>) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+
diff --git a/test/CodeGen/PowerPC/vsx.ll b/test/CodeGen/PowerPC/vsx.ll
index f91ffdb960bb..b185fed4cd5b 100644
--- a/test/CodeGen/PowerPC/vsx.ll
+++ b/test/CodeGen/PowerPC/vsx.ll
@@ -501,7 +501,7 @@ define <2 x i64> @test27(<2 x i64> %a, <2 x i64> %b) {
}
define <2 x double> @test28(<2 x double>* %a) {
- %v = load <2 x double>* %a, align 16
+ %v = load <2 x double>, <2 x double>* %a, align 16
ret <2 x double> %v
; CHECK-LABEL: @test28
@@ -519,7 +519,7 @@ define void @test29(<2 x double>* %a, <2 x double> %b) {
}
define <2 x double> @test28u(<2 x double>* %a) {
- %v = load <2 x double>* %a, align 8
+ %v = load <2 x double>, <2 x double>* %a, align 8
ret <2 x double> %v
; CHECK-LABEL: @test28u
@@ -537,7 +537,7 @@ define void @test29u(<2 x double>* %a, <2 x double> %b) {
}
define <2 x i64> @test30(<2 x i64>* %a) {
- %v = load <2 x i64>* %a, align 16
+ %v = load <2 x i64>, <2 x i64>* %a, align 16
ret <2 x i64> %v
; CHECK-REG-LABEL: @test30
@@ -562,7 +562,7 @@ define void @test31(<2 x i64>* %a, <2 x i64> %b) {
}
define <4 x float> @test32(<4 x float>* %a) {
- %v = load <4 x float>* %a, align 16
+ %v = load <4 x float>, <4 x float>* %a, align 16
ret <4 x float> %v
; CHECK-REG-LABEL: @test32
@@ -590,7 +590,7 @@ define void @test33(<4 x float>* %a, <4 x float> %b) {
}
define <4 x float> @test32u(<4 x float>* %a) {
- %v = load <4 x float>* %a, align 8
+ %v = load <4 x float>, <4 x float>* %a, align 8
ret <4 x float> %v
; CHECK-LABEL: @test32u
@@ -616,7 +616,7 @@ define void @test33u(<4 x float>* %a, <4 x float> %b) {
}
define <4 x i32> @test34(<4 x i32>* %a) {
- %v = load <4 x i32>* %a, align 16
+ %v = load <4 x i32>, <4 x i32>* %a, align 16
ret <4 x i32> %v
; CHECK-REG-LABEL: @test34
@@ -718,7 +718,7 @@ define <2 x i64> @test47(<2 x float> %a) {
}
define <2 x double> @test50(double* %a) {
- %v = load double* %a, align 8
+ %v = load double, double* %a, align 8
%w = insertelement <2 x double> undef, double %v, i32 0
%x = insertelement <2 x double> %w, double %v, i32 1
ret <2 x double> %x
@@ -733,7 +733,7 @@ define <2 x double> @test51(<2 x double> %a, <2 x double> %b) {
ret <2 x double> %v
; CHECK-LABEL: @test51
-; CHECK: xxpermdi 34, 34, 34, 0
+; CHECK: xxspltd 34, 34, 0
; CHECK: blr
}
@@ -742,7 +742,7 @@ define <2 x double> @test52(<2 x double> %a, <2 x double> %b) {
ret <2 x double> %v
; CHECK-LABEL: @test52
-; CHECK: xxpermdi 34, 34, 35, 0
+; CHECK: xxmrghd 34, 34, 35
; CHECK: blr
}
@@ -751,7 +751,7 @@ define <2 x double> @test53(<2 x double> %a, <2 x double> %b) {
ret <2 x double> %v
; CHECK-LABEL: @test53
-; CHECK: xxpermdi 34, 35, 34, 0
+; CHECK: xxmrghd 34, 35, 34
; CHECK: blr
}
@@ -769,7 +769,7 @@ define <2 x double> @test55(<2 x double> %a, <2 x double> %b) {
ret <2 x double> %v
; CHECK-LABEL: @test55
-; CHECK: xxpermdi 34, 34, 35, 3
+; CHECK: xxmrgld 34, 34, 35
; CHECK: blr
}
@@ -778,7 +778,7 @@ define <2 x i64> @test56(<2 x i64> %a, <2 x i64> %b) {
ret <2 x i64> %v
; CHECK-LABEL: @test56
-; CHECK: xxpermdi 34, 34, 35, 3
+; CHECK: xxmrgld 34, 34, 35
; CHECK: blr
}
@@ -843,11 +843,11 @@ define double @test64(<2 x double> %a) {
ret double %v
; CHECK-REG-LABEL: @test64
-; CHECK-REG: xxpermdi 1, 34, 34, 2
+; CHECK-REG: xxswapd 1, 34
; CHECK-REG: blr
; CHECK-FISL-LABEL: @test64
-; CHECK-FISL: xxpermdi 34, 34, 34, 2
+; CHECK-FISL: xxswapd 34, 34
; CHECK-FISL: xxlor 0, 34, 34
; CHECK-FISL: fmr 1, 0
; CHECK-FISL: blr
diff --git a/test/CodeGen/PowerPC/vsx_insert_extract_le.ll b/test/CodeGen/PowerPC/vsx_insert_extract_le.ll
index 0a9df3779116..84bbdd75b0f7 100644
--- a/test/CodeGen/PowerPC/vsx_insert_extract_le.ll
+++ b/test/CodeGen/PowerPC/vsx_insert_extract_le.ll
@@ -1,35 +1,35 @@
; RUN: llc -mcpu=pwr8 -mattr=+vsx -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
define <2 x double> @testi0(<2 x double>* %p1, double* %p2) {
- %v = load <2 x double>* %p1
- %s = load double* %p2
+ %v = load <2 x double>, <2 x double>* %p1
+ %s = load double, double* %p2
%r = insertelement <2 x double> %v, double %s, i32 0
ret <2 x double> %r
; CHECK-LABEL: testi0
; CHECK: lxvd2x 0, 0, 3
; CHECK: lxsdx 34, 0, 4
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 1, 34, 34, 0
+; CHECK: xxswapd 0, 0
+; CHECK: xxspltd 1, 34, 0
; CHECK: xxpermdi 34, 0, 1, 1
}
define <2 x double> @testi1(<2 x double>* %p1, double* %p2) {
- %v = load <2 x double>* %p1
- %s = load double* %p2
+ %v = load <2 x double>, <2 x double>* %p1
+ %s = load double, double* %p2
%r = insertelement <2 x double> %v, double %s, i32 1
ret <2 x double> %r
; CHECK-LABEL: testi1
; CHECK: lxvd2x 0, 0, 3
; CHECK: lxsdx 34, 0, 4
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 1, 34, 34, 0
-; CHECK: xxpermdi 34, 1, 0, 3
+; CHECK: xxswapd 0, 0
+; CHECK: xxspltd 1, 34, 0
+; CHECK: xxmrgld 34, 1, 0
}
define double @teste0(<2 x double>* %p1) {
- %v = load <2 x double>* %p1
+ %v = load <2 x double>, <2 x double>* %p1
%r = extractelement <2 x double> %v, i32 0
ret double %r
@@ -37,16 +37,16 @@ define double @teste0(<2 x double>* %p1) {
; CHECK-LABEL: teste0
; CHECK: lxvd2x 0, 0, 3
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 1, 0, 0, 2
+; CHECK: xxswapd 0, 0
+; CHECK: xxswapd 1, 0
}
define double @teste1(<2 x double>* %p1) {
- %v = load <2 x double>* %p1
+ %v = load <2 x double>, <2 x double>* %p1
%r = extractelement <2 x double> %v, i32 1
ret double %r
; CHECK-LABEL: teste1
; CHECK: lxvd2x 0, 0, 3
-; CHECK: xxpermdi 1, 0, 0, 2
+; CHECK: xxswapd 1, 0
}
diff --git a/test/CodeGen/PowerPC/vsx_scalar_ld_st.ll b/test/CodeGen/PowerPC/vsx_scalar_ld_st.ll
new file mode 100644
index 000000000000..102970885963
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx_scalar_ld_st.ll
@@ -0,0 +1,139 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-direct-move | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -mattr=-direct-move | FileCheck %s
+
+@d = common global double 0.000000e+00, align 8
+@f = common global float 0.000000e+00, align 4
+@i = common global i32 0, align 4
+@ui = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define void @dblToInt() #0 {
+entry:
+ %ii = alloca i32, align 4
+ %0 = load double, double* @d, align 8
+ %conv = fptosi double %0 to i32
+ store volatile i32 %conv, i32* %ii, align 4
+ ret void
+; CHECK-LABEL: @dblToInt
+; CHECK: xscvdpsxws [[REGCONV1:[0-9]+]],
+; CHECK: stxsiwx [[REGCONV1]],
+}
+
+; Function Attrs: nounwind
+define void @fltToInt() #0 {
+entry:
+ %ii = alloca i32, align 4
+ %0 = load float, float* @f, align 4
+ %conv = fptosi float %0 to i32
+ store volatile i32 %conv, i32* %ii, align 4
+ ret void
+; CHECK-LABEL: @fltToInt
+; CHECK: xscvdpsxws [[REGCONV2:[0-9]+]],
+; CHECK: stxsiwx [[REGCONV2]],
+}
+
+; Function Attrs: nounwind
+define void @intToDbl() #0 {
+entry:
+ %dd = alloca double, align 8
+ %0 = load i32, i32* @i, align 4
+ %conv = sitofp i32 %0 to double
+ store volatile double %conv, double* %dd, align 8
+ ret void
+; CHECK-LABEL: @intToDbl
+; CHECK: lxsiwax [[REGLD1:[0-9]+]],
+; CHECK: xscvsxddp {{[0-9]+}}, [[REGLD1]]
+}
+
+; Function Attrs: nounwind
+define void @intToFlt() #0 {
+entry:
+ %ff = alloca float, align 4
+ %0 = load i32, i32* @i, align 4
+ %conv = sitofp i32 %0 to float
+ store volatile float %conv, float* %ff, align 4
+ ret void
+; CHECK-LABEL: @intToFlt
+; CHECK: lxsiwax [[REGLD2:[0-9]+]],
+; FIXME: the below will change when the VSX form is implemented
+; CHECK: fcfids {{[0-9]}}, [[REGLD2]]
+}
+
+; Function Attrs: nounwind
+define void @dblToUInt() #0 {
+entry:
+ %uiui = alloca i32, align 4
+ %0 = load double, double* @d, align 8
+ %conv = fptoui double %0 to i32
+ store volatile i32 %conv, i32* %uiui, align 4
+ ret void
+; CHECK-LABEL: @dblToUInt
+; CHECK: xscvdpuxws [[REGCONV3:[0-9]+]],
+; CHECK: stxsiwx [[REGCONV3]],
+}
+
+; Function Attrs: nounwind
+define void @fltToUInt() #0 {
+entry:
+ %uiui = alloca i32, align 4
+ %0 = load float, float* @f, align 4
+ %conv = fptoui float %0 to i32
+ store volatile i32 %conv, i32* %uiui, align 4
+ ret void
+; CHECK-LABEL: @fltToUInt
+; CHECK: xscvdpuxws [[REGCONV4:[0-9]+]],
+; CHECK: stxsiwx [[REGCONV4]],
+}
+
+; Function Attrs: nounwind
+define void @uIntToDbl() #0 {
+entry:
+ %dd = alloca double, align 8
+ %0 = load i32, i32* @ui, align 4
+ %conv = uitofp i32 %0 to double
+ store volatile double %conv, double* %dd, align 8
+ ret void
+; CHECK-LABEL: @uIntToDbl
+; CHECK: lxsiwzx [[REGLD3:[0-9]+]],
+; CHECK: xscvuxddp {{[0-9]+}}, [[REGLD3]]
+}
+
+; Function Attrs: nounwind
+define void @uIntToFlt() #0 {
+entry:
+ %ff = alloca float, align 4
+ %0 = load i32, i32* @ui, align 4
+ %conv = uitofp i32 %0 to float
+ store volatile float %conv, float* %ff, align 4
+ ret void
+; CHECK-LABEL: @uIntToFlt
+; CHECK: lxsiwzx [[REGLD4:[0-9]+]],
+; FIXME: the below will change when the VSX form is implemented
+; CHECK: fcfidus {{[0-9]+}}, [[REGLD4]]
+}
+
+; Function Attrs: nounwind
+define void @dblToFloat() #0 {
+entry:
+ %ff = alloca float, align 4
+ %0 = load double, double* @d, align 8
+ %conv = fptrunc double %0 to float
+ store volatile float %conv, float* %ff, align 4
+ ret void
+; CHECK-LABEL: @dblToFloat
+; CHECK: lxsdx [[REGLD5:[0-9]+]],
+; CHECK: stxsspx [[REGLD5]],
+}
+
+; Function Attrs: nounwind
+define void @floatToDbl() #0 {
+entry:
+ %dd = alloca double, align 8
+ %0 = load float, float* @f, align 4
+ %conv = fpext float %0 to double
+ store volatile double %conv, double* %dd, align 8
+ ret void
+; CHECK-LABEL: @floatToDbl
+; CHECK: lxsspx [[REGLD5:[0-9]+]],
+; CHECK: stxsdx [[REGLD5]],
+}
diff --git a/test/CodeGen/PowerPC/vsx_shuffle_le.ll b/test/CodeGen/PowerPC/vsx_shuffle_le.ll
index 588cfdad7853..dcfa0e788867 100644
--- a/test/CodeGen/PowerPC/vsx_shuffle_le.ll
+++ b/test/CodeGen/PowerPC/vsx_shuffle_le.ll
@@ -1,207 +1,207 @@
; RUN: llc -mcpu=pwr8 -mattr=+vsx -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
define <2 x double> @test00(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 0>
ret <2 x double> %v3
; CHECK-LABEL: test00
; CHECK: lxvd2x 0, 0, 3
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 34, 0, 0, 3
+; CHECK: xxswapd 0, 0
+; CHECK: xxspltd 34, 0, 1
}
define <2 x double> @test01(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 1>
ret <2 x double> %v3
; CHECK-LABEL: test01
; CHECK: lxvd2x 0, 0, 3
-; CHECK: xxpermdi 34, 0, 0, 2
+; CHECK: xxswapd 34, 0
}
define <2 x double> @test02(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 2>
ret <2 x double> %v3
; CHECK-LABEL: @test02
; CHECK: lxvd2x 0, 0, 3
; CHECK: lxvd2x 1, 0, 4
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 1, 1, 1, 2
-; CHECK: xxpermdi 34, 1, 0, 3
+; CHECK: xxswapd 0, 0
+; CHECK: xxswapd 1, 1
+; CHECK: xxmrgld 34, 1, 0
}
define <2 x double> @test03(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 3>
ret <2 x double> %v3
; CHECK-LABEL: @test03
; CHECK: lxvd2x 0, 0, 3
; CHECK: lxvd2x 1, 0, 4
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 1, 1, 1, 2
+; CHECK: xxswapd 0, 0
+; CHECK: xxswapd 1, 1
; CHECK: xxpermdi 34, 1, 0, 1
}
define <2 x double> @test10(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 0>
ret <2 x double> %v3
; CHECK-LABEL: @test10
; CHECK: lxvd2x 0, 0, 3
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 34, 0, 0, 2
+; CHECK: xxswapd 0, 0
+; CHECK: xxswapd 34, 0
}
define <2 x double> @test11(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 1>
ret <2 x double> %v3
; CHECK-LABEL: @test11
; CHECK: lxvd2x 0, 0, 3
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 34, 0, 0, 0
+; CHECK: xxswapd 0, 0
+; CHECK: xxspltd 34, 0, 0
}
define <2 x double> @test12(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 2>
ret <2 x double> %v3
; CHECK-LABEL: @test12
; CHECK: lxvd2x 0, 0, 3
; CHECK: lxvd2x 1, 0, 4
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 1, 1, 1, 2
+; CHECK: xxswapd 0, 0
+; CHECK: xxswapd 1, 1
; CHECK: xxpermdi 34, 1, 0, 2
}
define <2 x double> @test13(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 3>
ret <2 x double> %v3
; CHECK-LABEL: @test13
; CHECK: lxvd2x 0, 0, 3
; CHECK: lxvd2x 1, 0, 4
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 1, 1, 1, 2
-; CHECK: xxpermdi 34, 1, 0, 0
+; CHECK: xxswapd 0, 0
+; CHECK: xxswapd 1, 1
+; CHECK: xxmrghd 34, 1, 0
}
define <2 x double> @test20(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 0>
ret <2 x double> %v3
; CHECK-LABEL: @test20
; CHECK: lxvd2x 0, 0, 3
; CHECK: lxvd2x 1, 0, 4
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 1, 1, 1, 2
-; CHECK: xxpermdi 34, 0, 1, 3
+; CHECK: xxswapd 0, 0
+; CHECK: xxswapd 1, 1
+; CHECK: xxmrgld 34, 0, 1
}
define <2 x double> @test21(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 1>
ret <2 x double> %v3
; CHECK-LABEL: @test21
; CHECK: lxvd2x 0, 0, 3
; CHECK: lxvd2x 1, 0, 4
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 1, 1, 1, 2
+; CHECK: xxswapd 0, 0
+; CHECK: xxswapd 1, 1
; CHECK: xxpermdi 34, 0, 1, 1
}
define <2 x double> @test22(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 2>
ret <2 x double> %v3
; CHECK-LABEL: @test22
; CHECK: lxvd2x 0, 0, 4
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 34, 0, 0, 3
+; CHECK: xxswapd 0, 0
+; CHECK: xxspltd 34, 0, 1
}
define <2 x double> @test23(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 3>
ret <2 x double> %v3
; CHECK-LABEL: @test23
; CHECK: lxvd2x 0, 0, 4
-; CHECK: xxpermdi 34, 0, 0, 2
+; CHECK: xxswapd 34, 0
}
define <2 x double> @test30(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 0>
ret <2 x double> %v3
; CHECK-LABEL: @test30
; CHECK: lxvd2x 0, 0, 3
; CHECK: lxvd2x 1, 0, 4
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 1, 1, 1, 2
+; CHECK: xxswapd 0, 0
+; CHECK: xxswapd 1, 1
; CHECK: xxpermdi 34, 0, 1, 2
}
define <2 x double> @test31(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 1>
ret <2 x double> %v3
; CHECK-LABEL: @test31
; CHECK: lxvd2x 0, 0, 3
; CHECK: lxvd2x 1, 0, 4
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 1, 1, 1, 2
-; CHECK: xxpermdi 34, 0, 1, 0
+; CHECK: xxswapd 0, 0
+; CHECK: xxswapd 1, 1
+; CHECK: xxmrghd 34, 0, 1
}
define <2 x double> @test32(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 2>
ret <2 x double> %v3
; CHECK-LABEL: @test32
; CHECK: lxvd2x 0, 0, 4
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 34, 0, 0, 2
+; CHECK: xxswapd 0, 0
+; CHECK: xxswapd 34, 0
}
define <2 x double> @test33(<2 x double>* %p1, <2 x double>* %p2) {
- %v1 = load <2 x double>* %p1
- %v2 = load <2 x double>* %p2
+ %v1 = load <2 x double>, <2 x double>* %p1
+ %v2 = load <2 x double>, <2 x double>* %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 3>
ret <2 x double> %v3
; CHECK-LABEL: @test33
; CHECK: lxvd2x 0, 0, 4
-; CHECK: xxpermdi 0, 0, 0, 2
-; CHECK: xxpermdi 34, 0, 0, 0
+; CHECK: xxswapd 0, 0
+; CHECK: xxspltd 34, 0, 0
}
diff --git a/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll b/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll
index e038b3f2fb25..0b87613bb4d8 100644
--- a/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll
+++ b/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll
@@ -11,7 +11,7 @@
; CHECK-D89: .weak_definition _v1
define i32 @f1() {
- %x = load i32 * @v1
+ %x = load i32 , i32 * @v1
ret i32 %x
}
@@ -45,6 +45,6 @@ define i32* @f3() {
; CHECK-D89: .weak_definition _v4
define i32 @f4() {
- %x = load i32 * @v4
+ %x = load i32 , i32 * @v4
ret i32 %x
}
diff --git a/test/CodeGen/PowerPC/xxleqv_xxlnand_xxlorc.ll b/test/CodeGen/PowerPC/xxleqv_xxlnand_xxlorc.ll
new file mode 100644
index 000000000000..4d929c627f1c
--- /dev/null
+++ b/test/CodeGen/PowerPC/xxleqv_xxlnand_xxlorc.ll
@@ -0,0 +1,52 @@
+; Check the miscellaneous logical vector operations added in P8
+;
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+; Test x eqv y
+define <4 x i32> @test_xxleqv(<4 x i32> %x, <4 x i32> %y) nounwind {
+ %tmp = xor <4 x i32> %x, %y
+ %ret_val = xor <4 x i32> %tmp, < i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %ret_val
+; CHECK: xxleqv 34, 34, 35
+}
+
+; Test x xxlnand y
+define <4 x i32> @test_xxlnand(<4 x i32> %x, <4 x i32> %y) nounwind {
+ %tmp = and <4 x i32> %x, %y
+ %ret_val = xor <4 x i32> %tmp, <i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %ret_val
+; CHECK: xxlnand 34, 34, 35
+}
+
+; Test x xxlorc y
+define <4 x i32> @test_xxlorc(<4 x i32> %x, <4 x i32> %y) nounwind {
+ %tmp = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %ret_val = or <4 x i32> %x, %tmp
+ ret <4 x i32> %ret_val
+; CHECK: xxlorc 34, 34, 35
+}
+
+; Test x eqv y
+define <8 x i16> @test_xxleqvv8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
+ %tmp = xor <8 x i16> %x, %y
+ %ret_val = xor <8 x i16> %tmp, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ret <8 x i16> %ret_val
+; CHECK: xxleqv 34, 34, 35
+}
+
+; Test x xxlnand y
+define <8 x i16> @test_xxlnandv8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
+ %tmp = and <8 x i16> %x, %y
+ %ret_val = xor <8 x i16> %tmp, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ret <8 x i16> %ret_val
+; CHECK: xxlnand 34, 34, 35
+}
+
+; Test x xxlorc y
+define <8 x i16> @test_xxlorcv8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
+ %tmp = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %ret_val = or <8 x i16> %x, %tmp
+ ret <8 x i16> %ret_val
+; CHECK: xxlorc 34, 34, 35
+}
+
diff --git a/test/CodeGen/PowerPC/zero-not-run.ll b/test/CodeGen/PowerPC/zero-not-run.ll
index 9df0d6e004ef..b3b7634f41a8 100644
--- a/test/CodeGen/PowerPC/zero-not-run.ll
+++ b/test/CodeGen/PowerPC/zero-not-run.ll
@@ -8,7 +8,7 @@ entry:
br i1 undef, label %for.body, label %for.end731
for.body: ; preds = %entry
- %0 = load i32* undef, align 4
+ %0 = load i32, i32* undef, align 4
%or31 = or i32 %0, 319143828
store i32 %or31, i32* undef, align 4
%cmp32 = icmp eq i32 319143828, %or31
diff --git a/test/CodeGen/PowerPC/zext-free.ll b/test/CodeGen/PowerPC/zext-free.ll
index 080dbaa58da1..ffbbb5445019 100644
--- a/test/CodeGen/PowerPC/zext-free.ll
+++ b/test/CodeGen/PowerPC/zext-free.ll
@@ -5,22 +5,22 @@ target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: noreturn nounwind
define signext i32 @_Z1fRPc(i8** nocapture dereferenceable(8) %p) #0 {
entry:
- %.pre = load i8** %p, align 8
+ %.pre = load i8*, i8** %p, align 8
br label %loop
loop: ; preds = %loop.backedge, %entry
%0 = phi i8* [ %.pre, %entry ], [ %.be, %loop.backedge ]
- %1 = load i8* %0, align 1
+ %1 = load i8, i8* %0, align 1
%tobool = icmp eq i8 %1, 0
- %incdec.ptr = getelementptr inbounds i8* %0, i64 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %0, i64 1
store i8* %incdec.ptr, i8** %p, align 8
- %2 = load i8* %incdec.ptr, align 1
+ %2 = load i8, i8* %incdec.ptr, align 1
%tobool2 = icmp ne i8 %2, 0
%or.cond = and i1 %tobool, %tobool2
br i1 %or.cond, label %if.then3, label %loop.backedge
if.then3: ; preds = %loop
- %incdec.ptr4 = getelementptr inbounds i8* %0, i64 2
+ %incdec.ptr4 = getelementptr inbounds i8, i8* %0, i64 2
store i8* %incdec.ptr4, i8** %p, align 8
br label %loop.backedge