summaryrefslogtreecommitdiffstats
path: root/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/ARM/2009-10-16-Scope.ll4
-rw-r--r--test/CodeGen/ARM/2010-11-30-reloc-movt.ll16
-rw-r--r--test/CodeGen/ARM/2010-12-15-elf-lcomm.ll21
-rw-r--r--test/CodeGen/ARM/2011-06-09-TailCallByVal.ll1
-rw-r--r--test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll124
-rw-r--r--test/CodeGen/ARM/2011-08-12-vmovqqqq-pseudo.ll12
-rw-r--r--test/CodeGen/ARM/2011-08-25-ldmia_ret.ll100
-rw-r--r--test/CodeGen/ARM/2011-08-29-SchedCycle.ll45
-rw-r--r--test/CodeGen/ARM/2011-08-29-ldr_pre_imm.ll34
-rw-r--r--test/CodeGen/ARM/2011-09-09-OddVectorDivision.ll23
-rw-r--r--test/CodeGen/ARM/2011-09-19-cpsr.ll54
-rw-r--r--test/CodeGen/ARM/2011-09-28-CMovCombineBug.ll30
-rw-r--r--test/CodeGen/ARM/atomic-64bit.ll128
-rw-r--r--test/CodeGen/ARM/atomic-cmp.ll8
-rw-r--r--test/CodeGen/ARM/atomic-load-store.ll56
-rw-r--r--test/CodeGen/ARM/atomic-op.ll48
-rw-r--r--test/CodeGen/ARM/avoid-cpsr-rmw.ll6
-rw-r--r--test/CodeGen/ARM/call-tc.ll4
-rw-r--r--test/CodeGen/ARM/carry.ll10
-rw-r--r--test/CodeGen/ARM/crash-greedy-v6.ll32
-rw-r--r--test/CodeGen/ARM/crash.ll44
-rw-r--r--test/CodeGen/ARM/debug-info-arg.ll65
-rw-r--r--test/CodeGen/ARM/debug-info-blocks.ll2
-rw-r--r--test/CodeGen/ARM/debug-info-sreg2.ll6
-rw-r--r--test/CodeGen/ARM/divmod.ll58
-rw-r--r--test/CodeGen/ARM/elf-lcomm-align.ll14
-rw-r--r--test/CodeGen/ARM/fabss.ll4
-rw-r--r--test/CodeGen/ARM/fast-isel.ll4
-rw-r--r--test/CodeGen/ARM/fp_convert.ll6
-rw-r--r--test/CodeGen/ARM/fpmem.ll18
-rw-r--r--test/CodeGen/ARM/hidden-vis-2.ll2
-rw-r--r--test/CodeGen/ARM/hidden-vis-3.ll2
-rw-r--r--test/CodeGen/ARM/iabs.ll8
-rw-r--r--test/CodeGen/ARM/ifcvt4.ll10
-rw-r--r--test/CodeGen/ARM/indirectbr.ll1
-rw-r--r--test/CodeGen/ARM/inlineasm3.ll12
-rw-r--r--test/CodeGen/ARM/inlineasm4.ll17
-rw-r--r--test/CodeGen/ARM/lsr-on-unrolled-loops.ll495
-rw-r--r--test/CodeGen/ARM/lsr-unfolded-offset.ll3
-rw-r--r--test/CodeGen/ARM/mulhi.ll44
-rw-r--r--test/CodeGen/ARM/select.ll12
-rw-r--r--test/CodeGen/ARM/shifter_operand.ll9
-rw-r--r--test/CodeGen/ARM/str_pre-2.ll4
-rw-r--r--test/CodeGen/ARM/subreg-remat.ll52
-rw-r--r--test/CodeGen/ARM/sxt_rot.ll41
-rw-r--r--test/CodeGen/ARM/tail-opts.ll2
-rw-r--r--test/CodeGen/ARM/thumb2-it-block.ll20
-rw-r--r--test/CodeGen/ARM/va_arg.ll1
-rw-r--r--test/CodeGen/ARM/vext.ll17
-rw-r--r--test/CodeGen/ARM/widen-vmovs.ll35
-rw-r--r--test/CodeGen/Alpha/2006-04-04-zextload.ll4
-rw-r--r--test/CodeGen/Alpha/mb.ll4
-rw-r--r--test/CodeGen/Alpha/wmb.ll8
-rw-r--r--test/CodeGen/CBackend/X86/dg.exp2
-rw-r--r--test/CodeGen/CellSPU/jumptable.ll12
-rw-r--r--test/CodeGen/CellSPU/or_ops.ll13
-rw-r--r--test/CodeGen/Generic/2004-02-08-UnwindSupport.ll17
-rw-r--r--test/CodeGen/Generic/2007-02-25-invoke.ll6
-rw-r--r--test/CodeGen/Generic/2007-04-30-LandingPadBranchFolding.ll6
-rw-r--r--test/CodeGen/Generic/2007-12-17-InvokeAsm.ll12
-rw-r--r--test/CodeGen/Generic/2007-12-31-UnusedSelector.ll5
-rw-r--r--test/CodeGen/Generic/2009-06-03-UnreachableSplitPad.ll4
-rw-r--r--test/CodeGen/Generic/2009-11-16-BadKillsCrash.ll8
-rw-r--r--test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll3
-rw-r--r--test/CodeGen/Generic/exception-handling.ll29
-rw-r--r--test/CodeGen/Generic/multiple-return-values-cross-block-with-invoke.ll3
-rw-r--r--test/CodeGen/Generic/promote-integers.ll15
-rw-r--r--test/CodeGen/Mips/2008-07-05-ByVal.ll18
-rw-r--r--test/CodeGen/Mips/2008-07-06-fadd64.ll6
-rw-r--r--test/CodeGen/Mips/2008-07-07-FPExtend.ll6
-rw-r--r--test/CodeGen/Mips/2008-07-07-IntDoubleConvertions.ll17
-rw-r--r--test/CodeGen/Mips/2008-07-15-InternalConstant.ll19
-rw-r--r--test/CodeGen/Mips/2008-07-15-SmallSection.ll23
-rw-r--r--test/CodeGen/Mips/2008-07-16-SignExtInReg.ll8
-rw-r--r--test/CodeGen/Mips/2008-08-03-fabs64.ll8
-rw-r--r--test/CodeGen/Mips/2008-08-07-FPRound.ll6
-rw-r--r--test/CodeGen/Mips/2008-08-08-bswap.ll5
-rw-r--r--test/CodeGen/Mips/2010-07-20-Select.ll22
-rw-r--r--test/CodeGen/Mips/2010-11-09-CountLeading.ll2
-rw-r--r--test/CodeGen/Mips/2010-11-09-Mul.ll2
-rw-r--r--test/CodeGen/Mips/alloca.ll2
-rw-r--r--test/CodeGen/Mips/atomic.ll125
-rw-r--r--test/CodeGen/Mips/brdelayslot.ll15
-rwxr-xr-xtest/CodeGen/Mips/cmov.ll4
-rw-r--r--test/CodeGen/Mips/constantfp0.ll11
-rw-r--r--test/CodeGen/Mips/cprestore.ll20
-rw-r--r--test/CodeGen/Mips/double2int.ll2
-rw-r--r--test/CodeGen/Mips/eh.ll15
-rw-r--r--test/CodeGen/Mips/extins.ll21
-rw-r--r--test/CodeGen/Mips/fcopysign.ll4
-rw-r--r--test/CodeGen/Mips/fpcmp.ll15
-rw-r--r--test/CodeGen/Mips/frame-address.ll2
-rw-r--r--test/CodeGen/Mips/i64arg.ll2
-rw-r--r--test/CodeGen/Mips/inlineasmmemop.ll2
-rw-r--r--test/CodeGen/Mips/internalfunc.ll2
-rw-r--r--test/CodeGen/Mips/largeimmprinting.ll6
-rw-r--r--test/CodeGen/Mips/madd-msub.ll14
-rw-r--r--test/CodeGen/Mips/mips64fpldst.ll58
-rw-r--r--test/CodeGen/Mips/mips64instrs.ll143
-rw-r--r--test/CodeGen/Mips/mips64intldst.ll157
-rw-r--r--test/CodeGen/Mips/mips64shift.ll104
-rw-r--r--test/CodeGen/Mips/mipslopat.ll19
-rw-r--r--test/CodeGen/Mips/o32_cc.ll2
-rw-r--r--test/CodeGen/Mips/o32_cc_byval.ll2
-rw-r--r--test/CodeGen/Mips/o32_cc_vararg.ll2
-rw-r--r--test/CodeGen/Mips/rotate.ll2
-rw-r--r--test/CodeGen/Mips/select.ll96
-rw-r--r--test/CodeGen/Mips/tls.ll4
-rw-r--r--test/CodeGen/Mips/unalignedload.ll41
-rw-r--r--test/CodeGen/PTX/20110926-sitofp.ll24
-rw-r--r--test/CodeGen/PTX/add.ll40
-rw-r--r--test/CodeGen/PTX/aggregates.ll1
-rw-r--r--test/CodeGen/PTX/bitwise.ll6
-rw-r--r--test/CodeGen/PTX/bra.ll8
-rw-r--r--test/CodeGen/PTX/cvt.ll186
-rw-r--r--test/CodeGen/PTX/fdiv-sm10.ll8
-rw-r--r--test/CodeGen/PTX/fdiv-sm13.ll8
-rw-r--r--test/CodeGen/PTX/fneg.ll8
-rw-r--r--test/CodeGen/PTX/intrinsic.ll134
-rw-r--r--test/CodeGen/PTX/ld.ll255
-rw-r--r--test/CodeGen/PTX/llvm-intrinsic.ll24
-rw-r--r--test/CodeGen/PTX/mad.ll8
-rw-r--r--test/CodeGen/PTX/mov.ll24
-rw-r--r--test/CodeGen/PTX/mul.ll16
-rw-r--r--test/CodeGen/PTX/parameter-order.ll4
-rw-r--r--test/CodeGen/PTX/selp.ll8
-rw-r--r--test/CodeGen/PTX/setp.ll136
-rw-r--r--test/CodeGen/PTX/shl.ll6
-rw-r--r--test/CodeGen/PTX/shr.ll12
-rw-r--r--test/CodeGen/PTX/simple-call.ll27
-rw-r--r--test/CodeGen/PTX/st.ll235
-rw-r--r--test/CodeGen/PTX/stack-object.ll19
-rw-r--r--test/CodeGen/PTX/sub.ll40
-rw-r--r--test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll17
-rw-r--r--test/CodeGen/PowerPC/2009-01-16-DeclareISelBug.ll7
-rw-r--r--test/CodeGen/PowerPC/2009-08-23-linkerprivate.ll2
-rw-r--r--test/CodeGen/PowerPC/Atomics-32.ll1402
-rw-r--r--test/CodeGen/PowerPC/Atomics-64.ll1433
-rw-r--r--test/CodeGen/PowerPC/atomic-1.ll25
-rw-r--r--test/CodeGen/PowerPC/atomic-2.ll25
-rw-r--r--test/CodeGen/PowerPC/cr1eq.ll18
-rw-r--r--test/CodeGen/PowerPC/trampoline.ll6
-rw-r--r--test/CodeGen/Thumb/2011-05-11-DAGLegalizer.ll6
-rw-r--r--test/CodeGen/Thumb/barrier.ll14
-rw-r--r--test/CodeGen/Thumb/iabs.ll11
-rw-r--r--test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll4
-rw-r--r--test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll18
-rw-r--r--test/CodeGen/Thumb2/machine-licm.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-barrier.ll31
-rw-r--r--test/CodeGen/Thumb2/thumb2-bcc.ll23
-rw-r--r--test/CodeGen/Thumb2/thumb2-branch.ll18
-rw-r--r--test/CodeGen/Thumb2/thumb2-ifcvt1.ll8
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldm.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-mls.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-mul.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-sxt-uxt.ll29
-rw-r--r--test/CodeGen/X86/2006-05-11-InstrSched.ll2
-rw-r--r--test/CodeGen/X86/2006-07-19-ATTAsm.ll49
-rw-r--r--test/CodeGen/X86/2007-05-07-InvokeSRet.ll4
-rw-r--r--test/CodeGen/X86/2008-01-08-SchedulerCrash.ll10
-rw-r--r--test/CodeGen/X86/2008-04-17-CoalescerBug.ll6
-rw-r--r--test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll6
-rw-r--r--test/CodeGen/X86/2008-08-19-SubAndFetch.ll4
-rw-r--r--test/CodeGen/X86/2008-09-18-inline-asm-2.ll2
-rw-r--r--test/CodeGen/X86/2008-10-02-Atomics32-2.ll969
-rw-r--r--test/CodeGen/X86/2009-03-13-PHIElimBug.ll4
-rw-r--r--test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll4
-rw-r--r--test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll2
-rw-r--r--test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll3
-rw-r--r--test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll4
-rw-r--r--test/CodeGen/X86/2009-10-16-Scope.ll4
-rw-r--r--test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll6
-rw-r--r--test/CodeGen/X86/2009-11-25-ImpDefBug.ll4
-rw-r--r--test/CodeGen/X86/2010-01-08-Atomic64Bug.ll8
-rw-r--r--test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll4
-rw-r--r--test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll2
-rw-r--r--test/CodeGen/X86/2010-10-08-cmpxchg8b.ll4
-rw-r--r--test/CodeGen/X86/2011-08-23-PerformSubCombine128.ll18
-rw-r--r--test/CodeGen/X86/2011-08-23-Trampoline.ll16
-rw-r--r--test/CodeGen/X86/2011-08-29-BlockConstant.ll34
-rw-r--r--test/CodeGen/X86/2011-08-29-InitOrder.ll28
-rw-r--r--test/CodeGen/X86/2011-09-14-valcoalesce.ll174
-rw-r--r--test/CodeGen/X86/2011-09-18-sse2cmp.ll12
-rw-r--r--test/CodeGen/X86/2011-09-21-setcc-bug.ll27
-rw-r--r--test/CodeGen/X86/2011-10-11-SpillDead.ll19
-rw-r--r--test/CodeGen/X86/2011-10-11-srl.ll11
-rw-r--r--test/CodeGen/X86/2011-10-12-MachineCSE.ll116
-rw-r--r--test/CodeGen/X86/Atomics-32.ll818
-rw-r--r--test/CodeGen/X86/Atomics-64.ll1919
-rw-r--r--test/CodeGen/X86/MachineSink-DbgValue.ll49
-rw-r--r--test/CodeGen/X86/MachineSink-eflags.ll74
-rw-r--r--test/CodeGen/X86/SIMD/dg.exp5
-rw-r--r--test/CodeGen/X86/SIMD/notvunpcklpd.ll20
-rw-r--r--test/CodeGen/X86/SIMD/notvunpcklps.ll20
-rw-r--r--test/CodeGen/X86/SIMD/vunpcklpd.ll20
-rw-r--r--test/CodeGen/X86/SIMD/vunpcklps.ll20
-rw-r--r--test/CodeGen/X86/alignment-2.ll28
-rw-r--r--test/CodeGen/X86/alignment.ll2
-rw-r--r--test/CodeGen/X86/asm-label2.ll4
-rw-r--r--test/CodeGen/X86/atomic-load-store-wide.ll19
-rw-r--r--test/CodeGen/X86/atomic-load-store.ll23
-rw-r--r--test/CodeGen/X86/atomic-or.ll12
-rw-r--r--test/CodeGen/X86/atomic_add.ll116
-rw-r--r--test/CodeGen/X86/atomic_op.ll62
-rw-r--r--test/CodeGen/X86/avx-256-arith.s0
-rw-r--r--test/CodeGen/X86/avx-256.ll15
-rw-r--r--test/CodeGen/X86/avx-arith.ll (renamed from test/CodeGen/X86/avx-256-arith.ll)145
-rw-r--r--test/CodeGen/X86/avx-basic.ll107
-rw-r--r--test/CodeGen/X86/avx-bitcast.ll10
-rw-r--r--test/CodeGen/X86/avx-blend.ll104
-rw-r--r--test/CodeGen/X86/avx-cast.ll47
-rw-r--r--test/CodeGen/X86/avx-cmp.ll150
-rw-r--r--test/CodeGen/X86/avx-cvt.ll (renamed from test/CodeGen/X86/avx-128.ll)61
-rw-r--r--test/CodeGen/X86/avx-load-store.ll85
-rw-r--r--test/CodeGen/X86/avx-logic.ll (renamed from test/CodeGen/X86/avx-256-logic.ll)18
-rw-r--r--test/CodeGen/X86/avx-minmax.ll65
-rw-r--r--test/CodeGen/X86/avx-movdup.ll34
-rw-r--r--test/CodeGen/X86/avx-select.ll22
-rw-r--r--test/CodeGen/X86/avx-shift.ll75
-rw-r--r--test/CodeGen/X86/avx-shuffle.ll10
-rw-r--r--test/CodeGen/X86/avx-splat.ll103
-rw-r--r--test/CodeGen/X86/avx-unpack.ll89
-rw-r--r--test/CodeGen/X86/avx-vbroadcast.ll94
-rw-r--r--test/CodeGen/X86/avx-vextractf128.ll18
-rw-r--r--test/CodeGen/X86/avx-vinsertf128.ll58
-rw-r--r--test/CodeGen/X86/avx-vmovddup.ll14
-rw-r--r--test/CodeGen/X86/avx-vperm2f128.ll62
-rw-r--r--test/CodeGen/X86/avx-vpermil.ll45
-rw-r--r--test/CodeGen/X86/avx-vshufp.ll29
-rw-r--r--test/CodeGen/X86/avx-vzeroupper.ll26
-rw-r--r--test/CodeGen/X86/barrier-sse.ll18
-rw-r--r--test/CodeGen/X86/barrier.ll5
-rw-r--r--test/CodeGen/X86/bmi.ll53
-rw-r--r--test/CodeGen/X86/bswap.ll2
-rw-r--r--test/CodeGen/X86/change-compare-stride-0.ll5
-rw-r--r--test/CodeGen/X86/change-compare-stride-1.ll5
-rw-r--r--test/CodeGen/X86/cmov.ll4
-rw-r--r--test/CodeGen/X86/cmpxchg16b.ll13
-rw-r--r--test/CodeGen/X86/coalescer-dce.ll80
-rw-r--r--test/CodeGen/X86/coalescer-remat.ll12
-rw-r--r--test/CodeGen/X86/code_placement_eh.ll10
-rw-r--r--test/CodeGen/X86/crash-nosse.ll27
-rw-r--r--test/CodeGen/X86/crash.ll75
-rw-r--r--test/CodeGen/X86/dbg-at-specficiation.ll20
-rw-r--r--test/CodeGen/X86/dbg-inline.ll140
-rw-r--r--test/CodeGen/X86/dbg-large-unsigned-const.ll61
-rw-r--r--test/CodeGen/X86/dbg-value-isel.ll4
-rw-r--r--test/CodeGen/X86/extractelement-load.ll20
-rw-r--r--test/CodeGen/X86/fast-isel-atomic.ll6
-rw-r--r--test/CodeGen/X86/fast-isel-cmp-branch.ll4
-rw-r--r--test/CodeGen/X86/fast-isel-gep.ll30
-rw-r--r--test/CodeGen/X86/fast-isel-tls.ll18
-rw-r--r--test/CodeGen/X86/fast-isel-x86-64.ll25
-rw-r--r--test/CodeGen/X86/fp-stack-O0-crash.ll21
-rw-r--r--test/CodeGen/X86/global-sections.ll2
-rw-r--r--test/CodeGen/X86/haddsub.ll194
-rw-r--r--test/CodeGen/X86/hidden-vis.ll19
-rw-r--r--test/CodeGen/X86/inline-asm-fpstack.ll11
-rw-r--r--test/CodeGen/X86/iv-users-in-other-loops.ll6
-rw-r--r--test/CodeGen/X86/lfence.ll6
-rw-r--r--test/CodeGen/X86/licm-dominance.ll36
-rw-r--r--test/CodeGen/X86/licm-nested.ll2
-rw-r--r--test/CodeGen/X86/lock-inst-encoding.ll43
-rw-r--r--test/CodeGen/X86/loop-strength-reduce3.ll4
-rw-r--r--test/CodeGen/X86/lzcnt.ll38
-rw-r--r--test/CodeGen/X86/membarrier.ll9
-rw-r--r--test/CodeGen/X86/mfence.ll16
-rw-r--r--test/CodeGen/X86/movbe.ll36
-rw-r--r--test/CodeGen/X86/movgs.ll2
-rw-r--r--test/CodeGen/X86/movmsk.ll (renamed from test/CodeGen/X86/2011-05-31-movmsk.ll)31
-rw-r--r--test/CodeGen/X86/nofence.ll27
-rw-r--r--test/CodeGen/X86/norex-subreg.ll80
-rw-r--r--test/CodeGen/X86/opt-shuff-tstore.ll39
-rw-r--r--test/CodeGen/X86/or-address.ll8
-rw-r--r--test/CodeGen/X86/palignr.ll31
-rw-r--r--test/CodeGen/X86/personality.ll10
-rw-r--r--test/CodeGen/X86/pr10420.ll21
-rw-r--r--test/CodeGen/X86/pr3495.ll8
-rw-r--r--test/CodeGen/X86/pr3522.ll4
-rw-r--r--test/CodeGen/X86/ptr-rotate.ll11
-rw-r--r--test/CodeGen/X86/scev-interchange.ll44
-rw-r--r--test/CodeGen/X86/segmented-stacks.ll87
-rw-r--r--test/CodeGen/X86/sfence.ll6
-rw-r--r--test/CodeGen/X86/sink-hoist.ll10
-rw-r--r--test/CodeGen/X86/split-eh-lpad-edges.ll4
-rw-r--r--test/CodeGen/X86/split-vector-bitcast.ll12
-rw-r--r--test/CodeGen/X86/sse-minmax.ll38
-rw-r--r--test/CodeGen/X86/sse2-blend.ll55
-rw-r--r--test/CodeGen/X86/sse41-blend.ll82
-rw-r--r--test/CodeGen/X86/sub.ll11
-rw-r--r--test/CodeGen/X86/tail-call-got.ll24
-rw-r--r--test/CodeGen/X86/tlv-1.ll17
-rw-r--r--test/CodeGen/X86/trunc-ext-ld-st.ll82
-rw-r--r--test/CodeGen/X86/twoaddr-sink-terminator.ll43
-rw-r--r--test/CodeGen/X86/uint64-to-float.ll33
-rw-r--r--test/CodeGen/X86/uint_to_fp-2.ll31
-rw-r--r--test/CodeGen/X86/v2f32.ll2
-rw-r--r--test/CodeGen/X86/vec_compare-sse4.ll35
-rw-r--r--test/CodeGen/X86/vec_set-C.ll6
-rw-r--r--test/CodeGen/X86/vec_shuffle-37.ll24
-rw-r--r--test/CodeGen/X86/vec_shuffle-38.ll59
-rw-r--r--test/CodeGen/X86/widen_shuffle-1.ll8
-rw-r--r--test/CodeGen/XCore/2011-08-01-DynamicAllocBug.ll20
-rw-r--r--test/CodeGen/XCore/2011-08-01-VarargsBug.ll17
-rw-r--r--test/CodeGen/XCore/licm-ldwcp.ll18
-rw-r--r--test/CodeGen/XCore/misc-intrinsics.ll48
-rw-r--r--test/CodeGen/XCore/resources.ll41
-rw-r--r--test/CodeGen/XCore/trampoline.ll6
308 files changed, 9575 insertions, 6184 deletions
diff --git a/test/CodeGen/ARM/2009-10-16-Scope.ll b/test/CodeGen/ARM/2009-10-16-Scope.ll
index ce440e9..a2e7ff7 100644
--- a/test/CodeGen/ARM/2009-10-16-Scope.ll
+++ b/test/CodeGen/ARM/2009-10-16-Scope.ll
@@ -23,10 +23,10 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
declare i32 @foo(i32) ssp
!0 = metadata !{i32 5, i32 2, metadata !1, null}
-!1 = metadata !{i32 458763, metadata !2}; [DW_TAG_lexical_block ]
+!1 = metadata !{i32 458763, metadata !2, i32 1, i32 1}; [DW_TAG_lexical_block ]
!2 = metadata !{i32 458798, i32 0, metadata !3, metadata !"bar", metadata !"bar", metadata !"bar", metadata !3, i32 4, null, i1 false, i1 true}; [DW_TAG_subprogram ]
!3 = metadata !{i32 458769, i32 0, i32 12, metadata !"genmodes.i", metadata !"/Users/yash/Downloads", metadata !"clang 1.1", i1 true, i1 false, metadata !"", i32 0}; [DW_TAG_compile_unit ]
!4 = metadata !{i32 459008, metadata !5, metadata !"count_", metadata !3, i32 5, metadata !6}; [ DW_TAG_auto_variable ]
-!5 = metadata !{i32 458763, metadata !1}; [DW_TAG_lexical_block ]
+!5 = metadata !{i32 458763, metadata !1, i32 1, i32 1}; [DW_TAG_lexical_block ]
!6 = metadata !{i32 458788, metadata !3, metadata !"int", metadata !3, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5}; [DW_TAG_base_type ]
!7 = metadata !{i32 6, i32 1, metadata !2, null}
diff --git a/test/CodeGen/ARM/2010-11-30-reloc-movt.ll b/test/CodeGen/ARM/2010-11-30-reloc-movt.ll
index 930cd8d..8b164c5 100644
--- a/test/CodeGen/ARM/2010-11-30-reloc-movt.ll
+++ b/test/CodeGen/ARM/2010-11-30-reloc-movt.ll
@@ -21,20 +21,20 @@ entry:
; OBJ-NEXT: 'sh_entsize'
; OBJ-NEXT: '_section_data', '00482de9 000000e3 000040e3 feffffeb 0088bde8'
-; OBJ: Relocation 0x00000000
+; OBJ: Relocation 0
; OBJ-NEXT: 'r_offset', 0x00000004
-; OBJ-NEXT: 'r_sym', 0x00000007
-; OBJ-NEXT: 'r_type', 0x0000002b
+; OBJ-NEXT: 'r_sym', 0x000007
+; OBJ-NEXT: 'r_type', 0x2b
-; OBJ: Relocation 0x00000001
+; OBJ: Relocation 1
; OBJ-NEXT: 'r_offset', 0x00000008
; OBJ-NEXT: 'r_sym'
-; OBJ-NEXT: 'r_type', 0x0000002c
+; OBJ-NEXT: 'r_type', 0x2c
-; OBJ: # Relocation 0x00000002
+; OBJ: # Relocation 2
; OBJ-NEXT: 'r_offset', 0x0000000c
-; OBJ-NEXT: 'r_sym', 0x00000008
-; OBJ-NEXT: 'r_type', 0x0000001c
+; OBJ-NEXT: 'r_sym', 0x000008
+; OBJ-NEXT: 'r_type', 0x1c
}
diff --git a/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll b/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll
index 69d4a14..5cfbb4f 100644
--- a/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll
+++ b/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll
@@ -5,7 +5,7 @@
@dummy = internal global i32 666
-@array00 = internal global [20 x i32] zeroinitializer
+@array00 = internal global [80 x i8] zeroinitializer, align 1
@sum = internal global i32 55
@STRIDE = internal global i32 8
@@ -15,21 +15,22 @@
-; OBJ: Section 0x00000004
+; OBJ: Section 4
; OBJ-NEXT: '.bss'
; OBJ: 'array00'
; OBJ-NEXT: 'st_value', 0x00000000
; OBJ-NEXT: 'st_size', 0x00000050
-; OBJ-NEXT: 'st_bind', 0x00000000
-; OBJ-NEXT: 'st_type', 0x00000001
-; OBJ-NEXT: 'st_other', 0x00000000
-; OBJ-NEXT: 'st_shndx', 0x00000004
+; OBJ-NEXT: 'st_bind', 0x0
+; OBJ-NEXT: 'st_type', 0x1
+; OBJ-NEXT: 'st_other', 0x00
+; OBJ-NEXT: 'st_shndx', 0x0004
define i32 @main(i32 %argc) nounwind {
%1 = load i32* @sum, align 4
- %2 = getelementptr [20 x i32]* @array00, i32 0, i32 %argc
- %3 = load i32* %2, align 4
- %4 = add i32 %1, %3
- ret i32 %4;
+ %2 = getelementptr [80 x i8]* @array00, i32 0, i32 %argc
+ %3 = load i8* %2
+ %4 = zext i8 %3 to i32
+ %5 = add i32 %1, %4
+ ret i32 %5
}
diff --git a/test/CodeGen/ARM/2011-06-09-TailCallByVal.ll b/test/CodeGen/ARM/2011-06-09-TailCallByVal.ll
index 4db3acf..7f0f795 100644
--- a/test/CodeGen/ARM/2011-06-09-TailCallByVal.ll
+++ b/test/CodeGen/ARM/2011-06-09-TailCallByVal.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -relocation-model=pic -mcpu=cortex-a8 -arm-tail-calls=1 | FileCheck %s
+
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
target triple = "thumbv7-apple-darwin10"
diff --git a/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll b/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
new file mode 100644
index 0000000..f681c34
--- /dev/null
+++ b/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
@@ -0,0 +1,124 @@
+; RUN: llc < %s | FileCheck %s
+
+; Check debug info output for merged global.
+; DW_AT_location
+; DW_OP_addr
+; DW_OP_plus
+; .long __MergedGlobals
+; DW_OP_constu
+; offset
+
+;CHECK: .ascii "x2" @ DW_AT_name
+;CHECK-NEXT: .byte 0
+;CHECK-NEXT: @ DW_AT_type
+;CHECK-NEXT: @ DW_AT_decl_file
+;CHECK-NEXT: @ DW_AT_decl_line
+;CHECK-NEXT: @ DW_AT_location
+;CHECK-NEXT: .byte 3
+;CHECK-NEXT: .long __MergedGlobals
+;CHECK-NEXT: .byte 16
+; 4 is byte offset of x2 in __MergedGobals
+;CHECK-NEXT: .byte 4
+;CHECK-NEXT: .byte 34
+
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32"
+target triple = "thumbv7-apple-macosx10.7.0"
+
+@x1 = internal unnamed_addr global i32 1, align 4
+@x2 = internal unnamed_addr global i32 2, align 4
+@x3 = internal unnamed_addr global i32 3, align 4
+@x4 = internal unnamed_addr global i32 4, align 4
+@x5 = global i32 0, align 4
+
+define i32 @get1(i32 %a) nounwind optsize ssp {
+ tail call void @llvm.dbg.value(metadata !{i32 %a}, i64 0, metadata !10), !dbg !30
+ %1 = load i32* @x1, align 4, !dbg !31
+ tail call void @llvm.dbg.value(metadata !{i32 %1}, i64 0, metadata !11), !dbg !31
+ store i32 %a, i32* @x1, align 4, !dbg !31
+ ret i32 %1, !dbg !31
+}
+
+define i32 @get2(i32 %a) nounwind optsize ssp {
+ tail call void @llvm.dbg.value(metadata !{i32 %a}, i64 0, metadata !13), !dbg !32
+ %1 = load i32* @x2, align 4, !dbg !33
+ tail call void @llvm.dbg.value(metadata !{i32 %1}, i64 0, metadata !14), !dbg !33
+ store i32 %a, i32* @x2, align 4, !dbg !33
+ ret i32 %1, !dbg !33
+}
+
+define i32 @get3(i32 %a) nounwind optsize ssp {
+ tail call void @llvm.dbg.value(metadata !{i32 %a}, i64 0, metadata !16), !dbg !34
+ %1 = load i32* @x3, align 4, !dbg !35
+ tail call void @llvm.dbg.value(metadata !{i32 %1}, i64 0, metadata !17), !dbg !35
+ store i32 %a, i32* @x3, align 4, !dbg !35
+ ret i32 %1, !dbg !35
+}
+
+define i32 @get4(i32 %a) nounwind optsize ssp {
+ tail call void @llvm.dbg.value(metadata !{i32 %a}, i64 0, metadata !19), !dbg !36
+ %1 = load i32* @x4, align 4, !dbg !37
+ tail call void @llvm.dbg.value(metadata !{i32 %1}, i64 0, metadata !20), !dbg !37
+ store i32 %a, i32* @x4, align 4, !dbg !37
+ ret i32 %1, !dbg !37
+}
+
+define i32 @get5(i32 %a) nounwind optsize ssp {
+ tail call void @llvm.dbg.value(metadata !{i32 %a}, i64 0, metadata !27), !dbg !38
+ %1 = load i32* @x5, align 4, !dbg !39
+ tail call void @llvm.dbg.value(metadata !{i32 %1}, i64 0, metadata !28), !dbg !39
+ store i32 %a, i32* @x5, align 4, !dbg !39
+ ret i32 %1, !dbg !39
+}
+
+declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
+
+!llvm.dbg.cu = !{!0}
+!llvm.dbg.sp = !{!1, !6, !7, !8, !9}
+!llvm.dbg.lv.get1 = !{!10, !11}
+!llvm.dbg.lv.get2 = !{!13, !14}
+!llvm.dbg.lv.get3 = !{!16, !17}
+!llvm.dbg.lv.get4 = !{!19, !20}
+!llvm.dbg.gv = !{!22, !23, !24, !25, !26}
+!llvm.dbg.lv.get5 = !{!27, !28}
+
+!0 = metadata !{i32 589841, i32 0, i32 12, metadata !"ss3.c", metadata !"/private/tmp", metadata !"clang", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{i32 589870, i32 0, metadata !2, metadata !"get1", metadata !"get1", metadata !"", metadata !2, i32 5, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32)* @get1, null, null} ; [ DW_TAG_subprogram ]
+!2 = metadata !{i32 589865, metadata !"ss3.c", metadata !"/private/tmp", metadata !0} ; [ DW_TAG_file_type ]
+!3 = metadata !{i32 589845, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!4 = metadata !{metadata !5}
+!5 = metadata !{i32 589860, metadata !0, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
+!6 = metadata !{i32 589870, i32 0, metadata !2, metadata !"get2", metadata !"get2", metadata !"", metadata !2, i32 8, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32)* @get2, null, null} ; [ DW_TAG_subprogram ]
+!7 = metadata !{i32 589870, i32 0, metadata !2, metadata !"get3", metadata !"get3", metadata !"", metadata !2, i32 11, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32)* @get3, null, null} ; [ DW_TAG_subprogram ]
+!8 = metadata !{i32 589870, i32 0, metadata !2, metadata !"get4", metadata !"get4", metadata !"", metadata !2, i32 14, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32)* @get4, null, null} ; [ DW_TAG_subprogram ]
+!9 = metadata !{i32 589870, i32 0, metadata !2, metadata !"get5", metadata !"get5", metadata !"", metadata !2, i32 17, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32)* @get5, null, null} ; [ DW_TAG_subprogram ]
+!10 = metadata !{i32 590081, metadata !1, metadata !"a", metadata !2, i32 16777221, metadata !5, i32 0} ; [ DW_TAG_arg_variable ]
+!11 = metadata !{i32 590080, metadata !12, metadata !"b", metadata !2, i32 5, metadata !5, i32 0} ; [ DW_TAG_auto_variable ]
+!12 = metadata !{i32 589835, metadata !1, i32 5, i32 19, metadata !2, i32 0} ; [ DW_TAG_lexical_block ]
+!13 = metadata !{i32 590081, metadata !6, metadata !"a", metadata !2, i32 16777224, metadata !5, i32 0} ; [ DW_TAG_arg_variable ]
+!14 = metadata !{i32 590080, metadata !15, metadata !"b", metadata !2, i32 8, metadata !5, i32 0} ; [ DW_TAG_auto_variable ]
+!15 = metadata !{i32 589835, metadata !6, i32 8, i32 17, metadata !2, i32 1} ; [ DW_TAG_lexical_block ]
+!16 = metadata !{i32 590081, metadata !7, metadata !"a", metadata !2, i32 16777227, metadata !5, i32 0} ; [ DW_TAG_arg_variable ]
+!17 = metadata !{i32 590080, metadata !18, metadata !"b", metadata !2, i32 11, metadata !5, i32 0} ; [ DW_TAG_auto_variable ]
+!18 = metadata !{i32 589835, metadata !7, i32 11, i32 19, metadata !2, i32 2} ; [ DW_TAG_lexical_block ]
+!19 = metadata !{i32 590081, metadata !8, metadata !"a", metadata !2, i32 16777230, metadata !5, i32 0} ; [ DW_TAG_arg_variable ]
+!20 = metadata !{i32 590080, metadata !21, metadata !"b", metadata !2, i32 14, metadata !5, i32 0} ; [ DW_TAG_auto_variable ]
+!21 = metadata !{i32 589835, metadata !8, i32 14, i32 19, metadata !2, i32 3} ; [ DW_TAG_lexical_block ]
+!22 = metadata !{i32 589876, i32 0, metadata !0, metadata !"x5", metadata !"x5", metadata !"", metadata !2, i32 16, metadata !5, i32 0, i32 1, i32* @x5} ; [ DW_TAG_variable ]
+!23 = metadata !{i32 589876, i32 0, metadata !0, metadata !"x4", metadata !"x4", metadata !"", metadata !2, i32 13, metadata !5, i32 1, i32 1, i32* @x4} ; [ DW_TAG_variable ]
+!24 = metadata !{i32 589876, i32 0, metadata !0, metadata !"x3", metadata !"x3", metadata !"", metadata !2, i32 10, metadata !5, i32 1, i32 1, i32* @x3} ; [ DW_TAG_variable ]
+!25 = metadata !{i32 589876, i32 0, metadata !0, metadata !"x2", metadata !"x2", metadata !"", metadata !2, i32 7, metadata !5, i32 1, i32 1, i32* @x2} ; [ DW_TAG_variable ]
+!26 = metadata !{i32 589876, i32 0, metadata !0, metadata !"x1", metadata !"x1", metadata !"", metadata !2, i32 4, metadata !5, i32 1, i32 1, i32* @x1} ; [ DW_TAG_variable ]
+!27 = metadata !{i32 590081, metadata !9, metadata !"a", metadata !2, i32 16777233, metadata !5, i32 0} ; [ DW_TAG_arg_variable ]
+!28 = metadata !{i32 590080, metadata !29, metadata !"b", metadata !2, i32 17, metadata !5, i32 0} ; [ DW_TAG_auto_variable ]
+!29 = metadata !{i32 589835, metadata !9, i32 17, i32 19, metadata !2, i32 4} ; [ DW_TAG_lexical_block ]
+!30 = metadata !{i32 5, i32 16, metadata !1, null}
+!31 = metadata !{i32 5, i32 32, metadata !12, null}
+!32 = metadata !{i32 8, i32 14, metadata !6, null}
+!33 = metadata !{i32 8, i32 29, metadata !15, null}
+!34 = metadata !{i32 11, i32 16, metadata !7, null}
+!35 = metadata !{i32 11, i32 32, metadata !18, null}
+!36 = metadata !{i32 14, i32 16, metadata !8, null}
+!37 = metadata !{i32 14, i32 32, metadata !21, null}
+!38 = metadata !{i32 17, i32 16, metadata !9, null}
+!39 = metadata !{i32 17, i32 32, metadata !29, null}
diff --git a/test/CodeGen/ARM/2011-08-12-vmovqqqq-pseudo.ll b/test/CodeGen/ARM/2011-08-12-vmovqqqq-pseudo.ll
new file mode 100644
index 0000000..3cbc4cd
--- /dev/null
+++ b/test/CodeGen/ARM/2011-08-12-vmovqqqq-pseudo.ll
@@ -0,0 +1,12 @@
+; RUN: llc %s -mtriple=thumbv7-apple-darwin -verify-machineinstrs -mcpu=cortex-a9 -O0 -o -
+; Make sure that the VMOVQQQQ pseudo instruction is handled properly
+; by codegen.
+
+define void @test_vmovqqqq_pseudo() nounwind ssp {
+entry:
+ %vld3_lane = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3lane.v8i16(i8* undef, <8 x i16> undef, <8 x i16> undef, <8 x i16> zeroinitializer, i32 7, i32 2)
+ store { <8 x i16>, <8 x i16>, <8 x i16> } %vld3_lane, { <8 x i16>, <8 x i16>, <8 x i16> }* undef
+ ret void
+}
+
+declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32) nounwind readonly
diff --git a/test/CodeGen/ARM/2011-08-25-ldmia_ret.ll b/test/CodeGen/ARM/2011-08-25-ldmia_ret.ll
new file mode 100644
index 0000000..17264ee
--- /dev/null
+++ b/test/CodeGen/ARM/2011-08-25-ldmia_ret.ll
@@ -0,0 +1,100 @@
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a9 | FileCheck %s
+; Test that ldmia_ret preserves implicit operands for return values.
+;
+; This CFG is reduced from a benchmark miscompile. With current
+; if-conversion heuristics, one of the return paths is if-converted
+; into sw.bb18 resulting in an ldmia_ret in the middle of the
+; block. The postra scheduler needs to know that the return implicitly
+; uses the return register, otherwise its antidep breaker scavenges
+; the register in order to hoist the constant load required to test
+; the switch.
+
+declare i32 @getint()
+declare i1 @getbool()
+declare void @foo(i32)
+declare i32 @bar(i32)
+
+define i32 @test(i32 %in1, i32 %in2) nounwind {
+entry:
+ %call = tail call zeroext i1 @getbool() nounwind
+ br i1 %call, label %sw.bb18, label %sw.bb2
+
+sw.bb2: ; preds = %entry
+ %cmp = tail call zeroext i1 @getbool() nounwind
+ br i1 %cmp, label %sw.epilog58, label %land.lhs.true
+
+land.lhs.true: ; preds = %sw.bb2
+ %cmp13 = tail call zeroext i1 @getbool() nounwind
+ br i1 %cmp13, label %if.then, label %sw.epilog58
+
+if.then: ; preds = %land.lhs.true
+ tail call void @foo(i32 %in1) nounwind
+ br label %sw.epilog58
+
+; load the return value
+; CHECK: movs [[RRET:r.]], #2
+; hoist the switch constant without clobbering RRET
+; CHECK: movw
+; CHECK-NOT: [[RRET]]
+; CHECK: , #63707
+; CHECK-NOT: [[RRET]]
+; CHECK: tst
+; If-convert the return
+; CHECK: it ne
+; Fold the CSR+return into a pop
+; CHECK: popne {r4, r5, r7, pc}
+sw.bb18:
+ %call20 = tail call i32 @bar(i32 %in2) nounwind
+ switch i32 %call20, label %sw.default56 [
+ i32 168, label %sw.bb21
+ i32 165, label %sw.bb21
+ i32 261, label %sw.epilog58
+ i32 188, label %sw.epilog58
+ i32 187, label %sw.epilog58
+ i32 186, label %sw.epilog58
+ i32 185, label %sw.epilog58
+ i32 184, label %sw.epilog58
+ i32 175, label %sw.epilog58
+ i32 174, label %sw.epilog58
+ i32 173, label %sw.epilog58
+ i32 172, label %sw.epilog58
+ i32 171, label %sw.epilog58
+ i32 167, label %sw.epilog58
+ i32 166, label %sw.epilog58
+ i32 164, label %sw.epilog58
+ i32 163, label %sw.epilog58
+ i32 161, label %sw.epilog58
+ i32 160, label %sw.epilog58
+ i32 -1, label %sw.bb33
+ ]
+
+sw.bb21: ; preds = %sw.bb18, %sw.bb18
+ tail call void @foo(i32 %in2) nounwind
+ %call28 = tail call i32 @getint() nounwind
+ %tobool = icmp eq i32 %call28, 0
+ br i1 %tobool, label %if.then29, label %sw.epilog58
+
+if.then29: ; preds = %sw.bb21
+ tail call void @foo(i32 %in2) nounwind
+ br label %sw.epilog58
+
+sw.bb33: ; preds = %sw.bb18
+ %cmp42 = tail call zeroext i1 @getbool() nounwind
+ br i1 %cmp42, label %sw.default56, label %land.lhs.true44
+
+land.lhs.true44: ; preds = %sw.bb33
+ %call50 = tail call i32 @getint() nounwind
+ %cmp51 = icmp slt i32 %call50, 0
+ br i1 %cmp51, label %if.then53, label %sw.default56
+
+if.then53: ; preds = %land.lhs.true44
+ tail call void @foo(i32 %in2) nounwind
+ br label %sw.default56
+
+sw.default56: ; preds = %sw.bb33, %land.lhs.true44, %if.then53, %sw.bb18
+ br label %sw.epilog58
+
+sw.epilog58:
+ %retval.0 = phi i32 [ 4, %sw.default56 ], [ 2, %sw.bb21 ], [ 2, %if.then29 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb18 ], [ 2, %sw.bb2 ], [ 2, %land.lhs.true ], [ 2, %if.then ]
+ ret i32 %retval.0
+}
diff --git a/test/CodeGen/ARM/2011-08-29-SchedCycle.ll b/test/CodeGen/ARM/2011-08-29-SchedCycle.ll
new file mode 100644
index 0000000..be188ef
--- /dev/null
+++ b/test/CodeGen/ARM/2011-08-29-SchedCycle.ll
@@ -0,0 +1,45 @@
+; RUN: llc %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -o -
+
+; When a i64 sub is expanded to subc + sube.
+; libcall #1
+; \
+; \ subc
+; \ / \
+; \ / \
+; \ / libcall #2
+; sube
+;
+; If the libcalls are not serialized (i.e. both have chains which are dag
+; entry), legalizer can serialize them in arbitrary orders. If it's
+; unlucky, it can force libcall #2 before libcall #1 in the above case.
+;
+; subc
+; |
+; libcall #2
+; |
+; libcall #1
+; |
+; sube
+;
+; However since subc and sube are "glued" together, this ends up being a
+; cycle when the scheduler combine subc and sube as a single scheduling
+; unit.
+;
+; The right solution is to fix LegalizeType too chains the libcalls together.
+; However, LegalizeType is not processing nodes in order. The fix now is to
+; fix subc / sube (and addc / adde) to use physical register dependency instead.
+; rdar://10019576
+
+define void @t() nounwind {
+entry:
+ %tmp = load i64* undef, align 4
+ %tmp5 = udiv i64 %tmp, 30
+ %tmp13 = and i64 %tmp5, 64739244643450880
+ %tmp16 = sub i64 0, %tmp13
+ %tmp19 = and i64 %tmp16, 63
+ %tmp20 = urem i64 %tmp19, 3
+ %tmp22 = and i64 %tmp16, -272346829004752
+ store i64 %tmp22, i64* undef, align 4
+ store i64 %tmp20, i64* undef, align 4
+ ret void
+}
diff --git a/test/CodeGen/ARM/2011-08-29-ldr_pre_imm.ll b/test/CodeGen/ARM/2011-08-29-ldr_pre_imm.ll
new file mode 100644
index 0000000..6647ed8
--- /dev/null
+++ b/test/CodeGen/ARM/2011-08-29-ldr_pre_imm.ll
@@ -0,0 +1,34 @@
+; RUN: llc -O3 -mtriple=armv6-apple-darwin -relocation-model=pic < %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:64-n32"
+
+define void @compdecomp() nounwind {
+entry:
+ %heap = alloca [256 x i32], align 4
+ br i1 undef, label %bb25.lr.ph, label %bb17
+
+bb17: ; preds = %bb17, %entry
+ br label %bb17
+
+bb25.lr.ph: ; preds = %entry
+ %0 = sdiv i32 undef, 2
+ br label %bb5.i
+
+bb.i: ; preds = %bb5.i
+ %1 = shl nsw i32 %k_addr.0.i, 1
+ %.sum8.i = add i32 %1, -1
+ %2 = getelementptr inbounds [256 x i32]* %heap, i32 0, i32 %.sum8.i
+ %3 = load i32* %2, align 4
+ br i1 false, label %bb5.i, label %bb4.i
+
+bb4.i: ; preds = %bb.i
+ %.sum10.i = add i32 %k_addr.0.i, -1
+ %4 = getelementptr inbounds [256 x i32]* %heap, i32 0, i32 %.sum10.i
+ store i32 %3, i32* %4, align 4
+ br label %bb5.i
+
+bb5.i: ; preds = %bb5.i, %bb4.i, %bb.i, %bb25.lr.ph
+ %k_addr.0.i = phi i32 [ %1, %bb4.i ], [ undef, %bb25.lr.ph ], [ undef, %bb5.i ], [ undef, %bb.i ]
+ %5 = icmp slt i32 %0, %k_addr.0.i
+ br i1 %5, label %bb5.i, label %bb.i
+}
diff --git a/test/CodeGen/ARM/2011-09-09-OddVectorDivision.ll b/test/CodeGen/ARM/2011-09-09-OddVectorDivision.ll
new file mode 100644
index 0000000..8fe9102
--- /dev/null
+++ b/test/CodeGen/ARM/2011-09-09-OddVectorDivision.ll
@@ -0,0 +1,23 @@
+; RUN: llc -mtriple=armv7-- < %s -mattr=-neon
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32"
+target triple = "armv7-none-linux-gnueabi"
+
+@x1 = common global <3 x i16> zeroinitializer
+@y1 = common global <3 x i16> zeroinitializer
+@z1 = common global <3 x i16> zeroinitializer
+@x2 = common global <4 x i16> zeroinitializer
+@y2 = common global <4 x i16> zeroinitializer
+@z2 = common global <4 x i16> zeroinitializer
+
+define void @f() {
+ %1 = load <3 x i16>* @x1
+ %2 = load <3 x i16>* @y1
+ %3 = sdiv <3 x i16> %1, %2
+ store <3 x i16> %3, <3 x i16>* @z1
+ %4 = load <4 x i16>* @x2
+ %5 = load <4 x i16>* @y2
+ %6 = sdiv <4 x i16> %4, %5
+ store <4 x i16> %6, <4 x i16>* @z2
+ ret void
+}
diff --git a/test/CodeGen/ARM/2011-09-19-cpsr.ll b/test/CodeGen/ARM/2011-09-19-cpsr.ll
new file mode 100644
index 0000000..749a6d2
--- /dev/null
+++ b/test/CodeGen/ARM/2011-09-19-cpsr.ll
@@ -0,0 +1,54 @@
+; RUN: llc -march=thumb -mcpu=cortex-a8 < %s
+; rdar://problem/10137436: sqlite3 miscompile
+;
+; CHECK: subs
+; CHECK: cmp
+; CHECK: it
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32"
+target triple = "thumbv7-apple-ios4.0.0"
+
+declare i8* @__memset_chk(i8*, i32, i32, i32) nounwind
+
+define hidden fastcc i32 @sqlite3VdbeExec(i32* %p) nounwind {
+entry:
+ br label %sqlite3VarintLen.exit7424
+
+sqlite3VarintLen.exit7424: ; preds = %do.body.i7423
+ br label %do.body.i
+
+do.body.i: ; preds = %do.body.i, %sqlite3VarintLen.exit7424
+ br i1 undef, label %do.body.i, label %sqlite3VarintLen.exit
+
+sqlite3VarintLen.exit: ; preds = %do.body.i
+ %sub2322 = add i64 undef, undef
+ br i1 undef, label %too_big, label %if.end2327
+
+if.end2327: ; preds = %sqlite3VarintLen.exit
+ br i1 undef, label %if.end2341, label %no_mem
+
+if.end2341: ; preds = %if.end2327
+ br label %for.body2355
+
+for.body2355: ; preds = %for.body2355, %if.end2341
+ %add2366 = add nsw i32 undef, undef
+ br i1 undef, label %for.body2377, label %for.body2355
+
+for.body2377: ; preds = %for.body2355
+ %conv23836154 = zext i32 %add2366 to i64
+ %sub2384 = sub i64 %sub2322, %conv23836154
+ %conv2385 = trunc i64 %sub2384 to i32
+ %len.0.i = select i1 undef, i32 %conv2385, i32 undef
+ %sub.i7384 = sub nsw i32 %len.0.i, 0
+ %call.i.i7385 = call i8* @__memset_chk(i8* undef, i32 0, i32 %sub.i7384, i32 undef) nounwind
+ unreachable
+
+too_big: ; preds = %sqlite3VarintLen.exit
+ unreachable
+
+no_mem: ; preds = %if.end2327, %for.body, %entry.no_mem_crit_edge
+ unreachable
+
+sqlite3ErrStr.exit: ; preds = %if.then82
+ unreachable
+}
diff --git a/test/CodeGen/ARM/2011-09-28-CMovCombineBug.ll b/test/CodeGen/ARM/2011-09-28-CMovCombineBug.ll
new file mode 100644
index 0000000..c6f4a93
--- /dev/null
+++ b/test/CodeGen/ARM/2011-09-28-CMovCombineBug.ll
@@ -0,0 +1,30 @@
+; RUN: llc -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 < %s
+
+; rdar://10196296
+; ARM target specific dag combine created a cycle in DAG.
+
+define void @t() nounwind ssp {
+ %1 = load i64* undef, align 4
+ %2 = shl i32 5, 0
+ %3 = zext i32 %2 to i64
+ %4 = and i64 %1, %3
+ %5 = lshr i64 %4, undef
+ switch i64 %5, label %8 [
+ i64 0, label %9
+ i64 1, label %6
+ i64 4, label %9
+ i64 5, label %7
+ ]
+
+; <label>:6 ; preds = %0
+ unreachable
+
+; <label>:7 ; preds = %0
+ unreachable
+
+; <label>:8 ; preds = %0
+ unreachable
+
+; <label>:9 ; preds = %0, %0
+ ret void
+}
diff --git a/test/CodeGen/ARM/atomic-64bit.ll b/test/CodeGen/ARM/atomic-64bit.ll
new file mode 100644
index 0000000..e9609ac
--- /dev/null
+++ b/test/CodeGen/ARM/atomic-64bit.ll
@@ -0,0 +1,128 @@
+; RUN: llc < %s -mtriple=armv7-apple-ios | FileCheck %s
+
+define i64 @test1(i64* %ptr, i64 %val) {
+; CHECK: test1
+; CHECK: dmb ish
+; CHECK: ldrexd r2, r3
+; CHECK: adds r0, r2
+; CHECK: adc r1, r3
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
+; CHECK: cmp
+; CHECK: bne
+; CHECK: dmb ish
+ %r = atomicrmw add i64* %ptr, i64 %val seq_cst
+ ret i64 %r
+}
+
+define i64 @test2(i64* %ptr, i64 %val) {
+; CHECK: test2
+; CHECK: dmb ish
+; CHECK: ldrexd r2, r3
+; CHECK: subs r0, r2
+; CHECK: sbc r1, r3
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
+; CHECK: cmp
+; CHECK: bne
+; CHECK: dmb ish
+ %r = atomicrmw sub i64* %ptr, i64 %val seq_cst
+ ret i64 %r
+}
+
+define i64 @test3(i64* %ptr, i64 %val) {
+; CHECK: test3
+; CHECK: dmb ish
+; CHECK: ldrexd r2, r3
+; CHECK: and r0, r2
+; CHECK: and r1, r3
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
+; CHECK: cmp
+; CHECK: bne
+; CHECK: dmb ish
+ %r = atomicrmw and i64* %ptr, i64 %val seq_cst
+ ret i64 %r
+}
+
+define i64 @test4(i64* %ptr, i64 %val) {
+; CHECK: test4
+; CHECK: dmb ish
+; CHECK: ldrexd r2, r3
+; CHECK: orr r0, r2
+; CHECK: orr r1, r3
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
+; CHECK: cmp
+; CHECK: bne
+; CHECK: dmb ish
+ %r = atomicrmw or i64* %ptr, i64 %val seq_cst
+ ret i64 %r
+}
+
+define i64 @test5(i64* %ptr, i64 %val) {
+; CHECK: test5
+; CHECK: dmb ish
+; CHECK: ldrexd r2, r3
+; CHECK: eor r0, r2
+; CHECK: eor r1, r3
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
+; CHECK: cmp
+; CHECK: bne
+; CHECK: dmb ish
+ %r = atomicrmw xor i64* %ptr, i64 %val seq_cst
+ ret i64 %r
+}
+
+define i64 @test6(i64* %ptr, i64 %val) {
+; CHECK: test6
+; CHECK: dmb ish
+; CHECK: ldrexd r2, r3
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
+; CHECK: cmp
+; CHECK: bne
+; CHECK: dmb ish
+ %r = atomicrmw xchg i64* %ptr, i64 %val seq_cst
+ ret i64 %r
+}
+
+define i64 @test7(i64* %ptr, i64 %val1, i64 %val2) {
+; CHECK: test7
+; CHECK: dmb ish
+; CHECK: ldrexd r2, r3
+; CHECK: cmp r2
+; CHECK: cmpeq r3
+; CHECK: bne
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
+; CHECK: cmp
+; CHECK: bne
+; CHECK: dmb ish
+ %r = cmpxchg i64* %ptr, i64 %val1, i64 %val2 seq_cst
+ ret i64 %r
+}
+
+; Compiles down to cmpxchg
+; FIXME: Should compile to a single ldrexd
+define i64 @test8(i64* %ptr) {
+; CHECK: test8
+; CHECK: ldrexd r2, r3
+; CHECK: cmp r2
+; CHECK: cmpeq r3
+; CHECK: bne
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
+; CHECK: cmp
+; CHECK: bne
+; CHECK: dmb ish
+ %r = load atomic i64* %ptr seq_cst, align 8
+ ret i64 %r
+}
+
+; Compiles down to atomicrmw xchg; there really isn't any more efficient
+; way to write it.
+define void @test9(i64* %ptr, i64 %val) {
+; CHECK: test9
+; CHECK: dmb ish
+; CHECK: ldrexd r2, r3
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
+; CHECK: cmp
+; CHECK: bne
+; CHECK: dmb ish
+ store atomic i64 %val, i64* %ptr seq_cst, align 8
+ ret void
+}
diff --git a/test/CodeGen/ARM/atomic-cmp.ll b/test/CodeGen/ARM/atomic-cmp.ll
index f31aa7b..82726da 100644
--- a/test/CodeGen/ARM/atomic-cmp.ll
+++ b/test/CodeGen/ARM/atomic-cmp.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s -check-prefix=ARM
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin | FileCheck %s -check-prefix=T2
+; RUN: llc < %s -mtriple=armv7-apple-darwin -verify-machineinstrs | FileCheck %s -check-prefix=ARM
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -verify-machineinstrs | FileCheck %s -check-prefix=T2
; rdar://8964854
define i8 @t(i8* %a, i8 %b, i8 %c) nounwind {
@@ -10,8 +10,6 @@ define i8 @t(i8* %a, i8 %b, i8 %c) nounwind {
; T2: t:
; T2: ldrexb
; T2: strexb
- %tmp0 = tail call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* %a, i8 %b, i8 %c)
+ %tmp0 = cmpxchg i8* %a, i8 %b, i8 %c monotonic
ret i8 %tmp0
}
-
-declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* nocapture, i8, i8) nounwind
diff --git a/test/CodeGen/ARM/atomic-load-store.ll b/test/CodeGen/ARM/atomic-load-store.ll
new file mode 100644
index 0000000..12a8fe4
--- /dev/null
+++ b/test/CodeGen/ARM/atomic-load-store.ll
@@ -0,0 +1,56 @@
+; RUN: llc < %s -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s -check-prefix=ARM
+; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s -check-prefix=ARM
+; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s -check-prefix=THUMBTWO
+; RUN: llc < %s -mtriple=thumbv6-apple-ios | FileCheck %s -check-prefix=THUMBONE
+
+define void @test1(i32* %ptr, i32 %val1) {
+; ARM: test1
+; ARM: dmb ish
+; ARM-NEXT: str
+; ARM-NEXT: dmb ish
+; THUMBONE: test1
+; THUMBONE: __sync_lock_test_and_set_4
+; THUMBTWO: test1
+; THUMBTWO: dmb ish
+; THUMBTWO-NEXT: str
+; THUMBTWO-NEXT: dmb ish
+ store atomic i32 %val1, i32* %ptr seq_cst, align 4
+ ret void
+}
+
+define i32 @test2(i32* %ptr) {
+; ARM: test2
+; ARM: ldr
+; ARM-NEXT: dmb ish
+; THUMBONE: test2
+; THUMBONE: __sync_val_compare_and_swap_4
+; THUMBTWO: test2
+; THUMBTWO: ldr
+; THUMBTWO-NEXT: dmb ish
+ %val = load atomic i32* %ptr seq_cst, align 4
+ ret i32 %val
+}
+
+define void @test3(i8* %ptr1, i8* %ptr2) {
+; ARM: test3
+; ARM: ldrb
+; ARM: strb
+; THUMBTWO: test3
+; THUMBTWO: ldrb
+; THUMBTWO: strb
+; THUMBONE: test3
+; THUMBONE: ldrb
+; THUMBONE: strb
+ %val = load atomic i8* %ptr1 unordered, align 1
+ store atomic i8 %val, i8* %ptr2 unordered, align 1
+ ret void
+}
+
+define void @test4(i8* %ptr1, i8* %ptr2) {
+; THUMBONE: test4
+; THUMBONE: ___sync_val_compare_and_swap_1
+; THUMBONE: ___sync_lock_test_and_set_1
+ %val = load atomic i8* %ptr1 seq_cst, align 1
+ store atomic i8 %val, i8* %ptr2 seq_cst, align 1
+ ret void
+}
diff --git a/test/CodeGen/ARM/atomic-op.ll b/test/CodeGen/ARM/atomic-op.ll
index 03940e3..02ce5a1 100644
--- a/test/CodeGen/ARM/atomic-op.ll
+++ b/test/CodeGen/ARM/atomic-op.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=armv7-apple-darwin10 | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -mtriple=armv7-apple-darwin10 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -verify-machineinstrs | FileCheck %s
define void @func(i32 %argc, i8** %argv) nounwind {
entry:
@@ -24,80 +24,58 @@ entry:
; CHECK: ldrex
; CHECK: add
; CHECK: strex
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val1, i32 %tmp ) ; <i32>:0 [#uses=1]
+ %0 = atomicrmw add i32* %val1, i32 %tmp monotonic
store i32 %0, i32* %old
; CHECK: ldrex
; CHECK: sub
; CHECK: strex
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 30 ) ; <i32>:1 [#uses=1]
+ %1 = atomicrmw sub i32* %val2, i32 30 monotonic
store i32 %1, i32* %old
; CHECK: ldrex
; CHECK: add
; CHECK: strex
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:2 [#uses=1]
+ %2 = atomicrmw add i32* %val2, i32 1 monotonic
store i32 %2, i32* %old
; CHECK: ldrex
; CHECK: sub
; CHECK: strex
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:3 [#uses=1]
+ %3 = atomicrmw sub i32* %val2, i32 1 monotonic
store i32 %3, i32* %old
; CHECK: ldrex
; CHECK: and
; CHECK: strex
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %andt, i32 4080 ) ; <i32>:4 [#uses=1]
+ %4 = atomicrmw and i32* %andt, i32 4080 monotonic
store i32 %4, i32* %old
; CHECK: ldrex
; CHECK: or
; CHECK: strex
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %ort, i32 4080 ) ; <i32>:5 [#uses=1]
+ %5 = atomicrmw or i32* %ort, i32 4080 monotonic
store i32 %5, i32* %old
; CHECK: ldrex
; CHECK: eor
; CHECK: strex
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %xort, i32 4080 ) ; <i32>:6 [#uses=1]
+ %6 = atomicrmw xor i32* %xort, i32 4080 monotonic
store i32 %6, i32* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
- call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 16 ) ; <i32>:7 [#uses=1]
+ %7 = atomicrmw min i32* %val2, i32 16 monotonic
store i32 %7, i32* %old
%neg = sub i32 0, 1 ; <i32> [#uses=1]
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
- call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 %neg ) ; <i32>:8 [#uses=1]
+ %8 = atomicrmw min i32* %val2, i32 %neg monotonic
store i32 %8, i32* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
- call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:9 [#uses=1]
+ %9 = atomicrmw max i32* %val2, i32 1 monotonic
store i32 %9, i32* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
- call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 0 ) ; <i32>:10 [#uses=1]
+ %10 = atomicrmw max i32* %val2, i32 0 monotonic
store i32 %10, i32* %old
ret void
}
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.min.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.max.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.umax.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.umin.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
diff --git a/test/CodeGen/ARM/avoid-cpsr-rmw.ll b/test/CodeGen/ARM/avoid-cpsr-rmw.ll
index d0c4f3a..92aff70 100644
--- a/test/CodeGen/ARM/avoid-cpsr-rmw.ll
+++ b/test/CodeGen/ARM/avoid-cpsr-rmw.ll
@@ -6,9 +6,9 @@
define i32 @t(i32 %a, i32 %b, i32 %c, i32 %d) nounwind readnone {
entry:
; CHECK: t:
-; CHECK: muls r2, r3, r2
-; CHECK-NEXT: mul r0, r0, r1
-; CHECK-NEXT: muls r0, r2, r0
+; CHECK: muls [[REG:(r[0-9]+)]], r2, r3
+; CHECK-NEXT: mul [[REG2:(r[0-9]+)]], r0, r1
+; CHECK-NEXT: muls r0, [[REG2]], [[REG]]
%0 = mul nsw i32 %a, %b
%1 = mul nsw i32 %c, %d
%2 = mul nsw i32 %0, %1
diff --git a/test/CodeGen/ARM/call-tc.ll b/test/CodeGen/ARM/call-tc.ll
index e01750b..f78d998 100644
--- a/test/CodeGen/ARM/call-tc.ll
+++ b/test/CodeGen/ARM/call-tc.ll
@@ -1,6 +1,10 @@
; RUN: llc < %s -mtriple=armv6-apple-darwin -mattr=+vfp2 -arm-tail-calls | FileCheck %s -check-prefix=CHECKV6
; RUN: llc < %s -mtriple=armv6-linux-gnueabi -relocation-model=pic -mattr=+vfp2 -arm-tail-calls | FileCheck %s -check-prefix=CHECKELF
; RUN: llc < %s -mtriple=thumbv7-apple-darwin -arm-tail-calls | FileCheck %s -check-prefix=CHECKT2D
+; RUN: llc < %s -mtriple=thumbv7-apple-ios5.0 | FileCheck %s -check-prefix=CHECKT2D
+
+; Enable tailcall optimization for iOS 5.0
+; rdar://9120031
@t = weak global i32 ()* null ; <i32 ()**> [#uses=1]
diff --git a/test/CodeGen/ARM/carry.ll b/test/CodeGen/ARM/carry.ll
index 06b459e..f84774d 100644
--- a/test/CodeGen/ARM/carry.ll
+++ b/test/CodeGen/ARM/carry.ll
@@ -35,3 +35,13 @@ entry:
%dw = add i64 %ch, %bw
ret i64 %dw
}
+
+; rdar://10073745
+define i64 @f4(i64 %x) nounwind readnone {
+entry:
+; CHECK: f4:
+; CHECK: rsbs r
+; CHECK: rsc r
+ %0 = sub nsw i64 0, %x
+ ret i64 %0
+}
diff --git a/test/CodeGen/ARM/crash-greedy-v6.ll b/test/CodeGen/ARM/crash-greedy-v6.ll
new file mode 100644
index 0000000..fd42254
--- /dev/null
+++ b/test/CodeGen/ARM/crash-greedy-v6.ll
@@ -0,0 +1,32 @@
+; RUN: llc -disable-fp-elim -relocation-model=pic < %s
+target triple = "armv6-apple-ios"
+
+; Reduced from 177.mesa. This test causes a live range split before an LDR_POST instruction.
+; That requires leaveIntvBefore to be very accurate about the redefined value number.
+define internal void @sample_nearest_3d(i8* nocapture %tObj, i32 %n, float* nocapture %s, float* nocapture %t, float* nocapture %u, float* nocapture %lambda, i8* nocapture %red, i8* nocapture %green, i8* nocapture %blue, i8* nocapture %alpha) nounwind ssp {
+entry:
+ br i1 undef, label %for.end, label %for.body.lr.ph
+
+for.body.lr.ph: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %for.body, %for.body.lr.ph
+ %i.031 = phi i32 [ 0, %for.body.lr.ph ], [ %0, %for.body ]
+ %arrayidx11 = getelementptr float* %t, i32 %i.031
+ %arrayidx15 = getelementptr float* %u, i32 %i.031
+ %arrayidx19 = getelementptr i8* %red, i32 %i.031
+ %arrayidx22 = getelementptr i8* %green, i32 %i.031
+ %arrayidx25 = getelementptr i8* %blue, i32 %i.031
+ %arrayidx28 = getelementptr i8* %alpha, i32 %i.031
+ %tmp12 = load float* %arrayidx11, align 4
+ tail call fastcc void @sample_3d_nearest(i8* %tObj, i8* undef, float undef, float %tmp12, float undef, i8* %arrayidx19, i8* %arrayidx22, i8* %arrayidx25, i8* %arrayidx28)
+ %0 = add i32 %i.031, 1
+ %exitcond = icmp eq i32 %0, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare fastcc void @sample_3d_nearest(i8* nocapture, i8* nocapture, float, float, float, i8* nocapture, i8* nocapture, i8* nocapture, i8* nocapture) nounwind ssp
+
diff --git a/test/CodeGen/ARM/crash.ll b/test/CodeGen/ARM/crash.ll
index 4b6876d..0f6f33e 100644
--- a/test/CodeGen/ARM/crash.ll
+++ b/test/CodeGen/ARM/crash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -verify-arm-pseudo-expand
; <rdar://problem/8529919>
%struct.foo = type { i32, i32 }
@@ -27,3 +27,45 @@ bb3:
exit:
ret void
}
+
+; PR10520 - REG_SEQUENCE with implicit-def operands.
+define arm_aapcs_vfpcc void @foo() nounwind align 2 {
+bb:
+ %tmp = shufflevector <2 x i64> undef, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp8 = bitcast <1 x i64> %tmp to <2 x float>
+ %tmp9 = shufflevector <2 x float> %tmp8, <2 x float> %tmp8, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp10 = fmul <4 x float> undef, %tmp9
+ %tmp11 = fadd <4 x float> %tmp10, undef
+ %tmp12 = fadd <4 x float> undef, %tmp11
+ %tmp13 = bitcast <4 x float> %tmp12 to i128
+ %tmp14 = bitcast i128 %tmp13 to <4 x float>
+ %tmp15 = bitcast <4 x float> %tmp14 to i128
+ %tmp16 = bitcast i128 %tmp15 to <4 x float>
+ %tmp17 = bitcast <4 x float> %tmp16 to i128
+ %tmp18 = bitcast i128 %tmp17 to <4 x float>
+ %tmp19 = bitcast <4 x float> %tmp18 to i128
+ %tmp20 = bitcast i128 %tmp19 to <4 x float>
+ store <4 x float> %tmp20, <4 x float>* undef, align 16
+ ret void
+}
+
+; PR10520, second bug. NEONMoveFixPass needs to preserve implicit operands.
+define arm_aapcs_vfpcc void @pr10520_2() nounwind align 2 {
+bb:
+ %tmp76 = shufflevector <2 x i64> zeroinitializer, <2 x i64> zeroinitializer, <1 x i32> <i32 1>
+ %tmp77 = bitcast <1 x i64> %tmp76 to <2 x float>
+ %tmp78 = shufflevector <2 x float> %tmp77, <2 x float> %tmp77, <4 x i32> zeroinitializer
+ %tmp81 = fmul <4 x float> undef, %tmp78
+ %tmp82 = fadd <4 x float> %tmp81, undef
+ %tmp85 = fadd <4 x float> %tmp82, undef
+ %tmp86 = bitcast <4 x float> %tmp85 to i128
+ %tmp136 = bitcast i128 %tmp86 to <4 x float>
+ %tmp137 = bitcast <4 x float> %tmp136 to i128
+ %tmp138 = bitcast i128 %tmp137 to <4 x float>
+ %tmp139 = bitcast <4 x float> %tmp138 to i128
+ %tmp152 = bitcast i128 %tmp139 to <4 x float>
+ %tmp153 = bitcast <4 x float> %tmp152 to i128
+ %tmp154 = bitcast i128 %tmp153 to <4 x float>
+ store <4 x float> %tmp154, <4 x float>* undef, align 16
+ ret void
+}
diff --git a/test/CodeGen/ARM/debug-info-arg.ll b/test/CodeGen/ARM/debug-info-arg.ll
new file mode 100644
index 0000000..b0270f9
--- /dev/null
+++ b/test/CodeGen/ARM/debug-info-arg.ll
@@ -0,0 +1,65 @@
+; RUN: llc < %s | FileCheck %s
+; Test to check argument y's debug info uses FI
+; Radar 10048772
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32"
+target triple = "thumbv7-apple-macosx10.7.0"
+
+%struct.tag_s = type { i32, i32, i32 }
+
+define void @foo(%struct.tag_s* nocapture %this, %struct.tag_s* %c, i64 %x, i64 %y, %struct.tag_s* nocapture %ptr1, %struct.tag_s* nocapture %ptr2) nounwind ssp {
+ tail call void @llvm.dbg.value(metadata !{%struct.tag_s* %this}, i64 0, metadata !5), !dbg !20
+ tail call void @llvm.dbg.value(metadata !{%struct.tag_s* %c}, i64 0, metadata !13), !dbg !21
+ tail call void @llvm.dbg.value(metadata !{i64 %x}, i64 0, metadata !14), !dbg !22
+ tail call void @llvm.dbg.value(metadata !{i64 %y}, i64 0, metadata !17), !dbg !23
+;CHECK: @DEBUG_VALUE: foo:y <- R7+4294967295
+ tail call void @llvm.dbg.value(metadata !{%struct.tag_s* %ptr1}, i64 0, metadata !18), !dbg !24
+ tail call void @llvm.dbg.value(metadata !{%struct.tag_s* %ptr2}, i64 0, metadata !19), !dbg !25
+ %1 = icmp eq %struct.tag_s* %c, null, !dbg !26
+ br i1 %1, label %3, label %2, !dbg !26
+
+; <label>:2 ; preds = %0
+ tail call void @foobar(i64 %x, i64 %y) nounwind, !dbg !28
+ br label %3, !dbg !28
+
+; <label>:3 ; preds = %0, %2
+ ret void, !dbg !29
+}
+
+declare void @foobar(i64, i64)
+
+declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
+
+!llvm.dbg.cu = !{!0}
+!llvm.dbg.sp = !{!1}
+!llvm.dbg.lv.foo = !{!5, !13, !14, !17, !18, !19}
+
+!0 = metadata !{i32 589841, i32 0, i32 12, metadata !"one.c", metadata !"/Volumes/Athwagate/R10048772", metadata !"Apple clang version 3.0 (tags/Apple/clang-211.10.1) (based on LLVM 3.0svn)", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{i32 589870, i32 0, metadata !2, metadata !"foo", metadata !"foo", metadata !"", metadata !2, i32 11, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, void (%struct.tag_s*, %struct.tag_s*, i64, i64, %struct.tag_s*, %struct.tag_s*)* @foo, null, null} ; [ DW_TAG_subprogram ]
+!2 = metadata !{i32 589865, metadata !"one.c", metadata !"/Volumes/Athwagate/R10048772", metadata !0} ; [ DW_TAG_file_type ]
+!3 = metadata !{i32 589845, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!4 = metadata !{null}
+!5 = metadata !{i32 590081, metadata !1, metadata !"this", metadata !2, i32 16777227, metadata !6, i32 0} ; [ DW_TAG_arg_variable ]
+!6 = metadata !{i32 589839, metadata !0, metadata !"", null, i32 0, i64 32, i64 32, i64 0, i32 0, metadata !7} ; [ DW_TAG_pointer_type ]
+!7 = metadata !{i32 589843, metadata !0, metadata !"tag_s", metadata !2, i32 5, i64 96, i64 32, i32 0, i32 0, i32 0, metadata !8, i32 0, i32 0} ; [ DW_TAG_structure_type ]
+!8 = metadata !{metadata !9, metadata !11, metadata !12}
+!9 = metadata !{i32 589837, metadata !7, metadata !"x", metadata !2, i32 6, i64 32, i64 32, i64 0, i32 0, metadata !10} ; [ DW_TAG_member ]
+!10 = metadata !{i32 589860, metadata !0, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
+!11 = metadata !{i32 589837, metadata !7, metadata !"y", metadata !2, i32 7, i64 32, i64 32, i64 32, i32 0, metadata !10} ; [ DW_TAG_member ]
+!12 = metadata !{i32 589837, metadata !7, metadata !"z", metadata !2, i32 8, i64 32, i64 32, i64 64, i32 0, metadata !10} ; [ DW_TAG_member ]
+!13 = metadata !{i32 590081, metadata !1, metadata !"c", metadata !2, i32 33554443, metadata !6, i32 0} ; [ DW_TAG_arg_variable ]
+!14 = metadata !{i32 590081, metadata !1, metadata !"x", metadata !2, i32 50331659, metadata !15, i32 0} ; [ DW_TAG_arg_variable ]
+!15 = metadata !{i32 589846, metadata !0, metadata !"UInt64", metadata !2, i32 1, i64 0, i64 0, i64 0, i32 0, metadata !16} ; [ DW_TAG_typedef ]
+!16 = metadata !{i32 589860, metadata !0, metadata !"long long unsigned int", null, i32 0, i64 64, i64 32, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ]
+!17 = metadata !{i32 590081, metadata !1, metadata !"y", metadata !2, i32 67108875, metadata !15, i32 0} ; [ DW_TAG_arg_variable ]
+!18 = metadata !{i32 590081, metadata !1, metadata !"ptr1", metadata !2, i32 83886091, metadata !6, i32 0} ; [ DW_TAG_arg_variable ]
+!19 = metadata !{i32 590081, metadata !1, metadata !"ptr2", metadata !2, i32 100663307, metadata !6, i32 0} ; [ DW_TAG_arg_variable ]
+!20 = metadata !{i32 11, i32 24, metadata !1, null}
+!21 = metadata !{i32 11, i32 44, metadata !1, null}
+!22 = metadata !{i32 11, i32 54, metadata !1, null}
+!23 = metadata !{i32 11, i32 64, metadata !1, null}
+!24 = metadata !{i32 11, i32 81, metadata !1, null}
+!25 = metadata !{i32 11, i32 101, metadata !1, null}
+!26 = metadata !{i32 12, i32 3, metadata !27, null}
+!27 = metadata !{i32 589835, metadata !1, i32 11, i32 107, metadata !2, i32 0} ; [ DW_TAG_lexical_block ]
+!28 = metadata !{i32 13, i32 5, metadata !27, null}
+!29 = metadata !{i32 14, i32 1, metadata !27, null}
diff --git a/test/CodeGen/ARM/debug-info-blocks.ll b/test/CodeGen/ARM/debug-info-blocks.ll
index 519c40e..2c59316 100644
--- a/test/CodeGen/ARM/debug-info-blocks.ll
+++ b/test/CodeGen/ARM/debug-info-blocks.ll
@@ -1,5 +1,5 @@
; RUN: llc -O0 < %s | FileCheck %s
-; CHECK: @DEBUG_VALUE: mydata <- [sp+#8]+#0
+; CHECK: @DEBUG_VALUE: mydata <- [sp+#4]+#0
; Radar 9331779
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
target triple = "thumbv7-apple-macosx10.7.0"
diff --git a/test/CodeGen/ARM/debug-info-sreg2.ll b/test/CodeGen/ARM/debug-info-sreg2.ll
index 16aeab3..ee777ce 100644
--- a/test/CodeGen/ARM/debug-info-sreg2.ll
+++ b/test/CodeGen/ARM/debug-info-sreg2.ll
@@ -5,9 +5,9 @@ target triple = "thumbv7-apple-macosx10.6.7"
;CHECK: Ldebug_loc0:
;CHECK-NEXT: .long Ltmp1
-;CHECK-NEXT: .long Ltmp3
-;CHECK-NEXT: Lset9 = Ltmp10-Ltmp9 @ Loc expr size
-;CHECK-NEXT: .short Lset9
+;CHECK-NEXT: .long Ltmp2
+;CHECK-NEXT: Lset8 = Ltmp10-Ltmp9 @ Loc expr size
+;CHECK-NEXT: .short Lset8
;CHECK-NEXT: Ltmp9:
;CHECK-NEXT: .byte 144 @ DW_OP_regx for S register
diff --git a/test/CodeGen/ARM/divmod.ll b/test/CodeGen/ARM/divmod.ll
new file mode 100644
index 0000000..49c4103
--- /dev/null
+++ b/test/CodeGen/ARM/divmod.ll
@@ -0,0 +1,58 @@
+; RUN: llc < %s -mtriple=arm-apple-ios5.0 | FileCheck %s
+
+define void @foo(i32 %x, i32 %y, i32* nocapture %P) nounwind ssp {
+entry:
+; CHECK: foo:
+; CHECK: bl ___divmodsi4
+; CHECK-NOT: bl ___divmodsi4
+ %div = sdiv i32 %x, %y
+ store i32 %div, i32* %P, align 4
+ %rem = srem i32 %x, %y
+ %arrayidx6 = getelementptr inbounds i32* %P, i32 1
+ store i32 %rem, i32* %arrayidx6, align 4
+ ret void
+}
+
+define void @bar(i32 %x, i32 %y, i32* nocapture %P) nounwind ssp {
+entry:
+; CHECK: bar:
+; CHECK: bl ___udivmodsi4
+; CHECK-NOT: bl ___udivmodsi4
+ %div = udiv i32 %x, %y
+ store i32 %div, i32* %P, align 4
+ %rem = urem i32 %x, %y
+ %arrayidx6 = getelementptr inbounds i32* %P, i32 1
+ store i32 %rem, i32* %arrayidx6, align 4
+ ret void
+}
+
+; rdar://9280991
+@flags = external unnamed_addr global i32
+@tabsize = external unnamed_addr global i32
+
+define void @do_indent(i32 %cols) nounwind {
+entry:
+; CHECK: do_indent:
+ %0 = load i32* @flags, align 4
+ %1 = and i32 %0, 67108864
+ %2 = icmp eq i32 %1, 0
+ br i1 %2, label %bb1, label %bb
+
+bb:
+; CHECK: bl ___divmodsi4
+ %3 = load i32* @tabsize, align 4
+ %4 = srem i32 %cols, %3
+ %5 = sdiv i32 %cols, %3
+ %6 = tail call i32 @llvm.objectsize.i32(i8* null, i1 false)
+ %7 = tail call i8* @__memset_chk(i8* null, i32 9, i32 %5, i32 %6) nounwind
+ br label %bb1
+
+bb1:
+ %line_indent_len.0 = phi i32 [ %4, %bb ], [ 0, %entry ]
+ %8 = getelementptr inbounds i8* null, i32 %line_indent_len.0
+ store i8 0, i8* %8, align 1
+ ret void
+}
+
+declare i32 @llvm.objectsize.i32(i8*, i1) nounwind readnone
+declare i8* @__memset_chk(i8*, i32, i32, i32) nounwind
diff --git a/test/CodeGen/ARM/elf-lcomm-align.ll b/test/CodeGen/ARM/elf-lcomm-align.ll
new file mode 100644
index 0000000..4679299
--- /dev/null
+++ b/test/CodeGen/ARM/elf-lcomm-align.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -mtriple=arm-linux-gnueabi -O0 | FileCheck %s
+; run with -O0 to avoid arm global merging.
+
+@c = internal global i8 0, align 1
+@x = internal global i32 0, align 4
+
+; CHECK: .lcomm c,1
+; .lcomm doesn't support alignment.
+; CHECK: .local x
+; CHECK-NEXT: .comm x,4,4
+
+define i32 @foo() nounwind {
+ ret i32 sub (i32 ptrtoint (i8* @c to i32), i32 ptrtoint (i32* @x to i32))
+}
diff --git a/test/CodeGen/ARM/fabss.ll b/test/CodeGen/ARM/fabss.ll
index 51efe51..45c322d 100644
--- a/test/CodeGen/ARM/fabss.ll
+++ b/test/CodeGen/ARM/fabss.ll
@@ -22,6 +22,8 @@ declare float @fabsf(float)
; NFP0: vabs.f32 s1, s1
; CORTEXA8: test:
-; CORTEXA8: vabs.f32 d1, d1
+; CORTEXA8: vadd.f32 [[D1:d[0-9]+]]
+; CORTEXA8: vabs.f32 {{d[0-9]+}}, [[D1]]
+
; CORTEXA9: test:
; CORTEXA9: vabs.f32 s{{.}}, s{{.}}
diff --git a/test/CodeGen/ARM/fast-isel.ll b/test/CodeGen/ARM/fast-isel.ll
index eb0c5c8..465e85f 100644
--- a/test/CodeGen/ARM/fast-isel.ll
+++ b/test/CodeGen/ARM/fast-isel.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv7-apple-darwin | FileCheck %s --check-prefix=ARM
-; RUN: llc < %s -O0 -fast-isel-abort -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-darwin | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=THUMB
; Very basic fast-isel functionality.
define i32 @add(i32 %a, i32 %b) nounwind {
diff --git a/test/CodeGen/ARM/fp_convert.ll b/test/CodeGen/ARM/fp_convert.ll
index 86c06f1..7002cec 100644
--- a/test/CodeGen/ARM/fp_convert.ll
+++ b/test/CodeGen/ARM/fp_convert.ll
@@ -7,7 +7,8 @@ define i32 @test1(float %a, float %b) {
; VFP2: test1:
; VFP2: vcvt.s32.f32 s{{.}}, s{{.}}
; NEON: test1:
-; NEON: vcvt.s32.f32 d0, d0
+; NEON: vadd.f32 [[D0:d[0-9]+]]
+; NEON: vcvt.s32.f32 d0, [[D0]]
entry:
%0 = fadd float %a, %b
%1 = fptosi float %0 to i32
@@ -18,7 +19,8 @@ define i32 @test2(float %a, float %b) {
; VFP2: test2:
; VFP2: vcvt.u32.f32 s{{.}}, s{{.}}
; NEON: test2:
-; NEON: vcvt.u32.f32 d0, d0
+; NEON: vadd.f32 [[D0:d[0-9]+]]
+; NEON: vcvt.u32.f32 d0, [[D0]]
entry:
%0 = fadd float %a, %b
%1 = fptoui float %0 to i32
diff --git a/test/CodeGen/ARM/fpmem.ll b/test/CodeGen/ARM/fpmem.ll
index c3cff18..3833933 100644
--- a/test/CodeGen/ARM/fpmem.ll
+++ b/test/CodeGen/ARM/fpmem.ll
@@ -14,6 +14,24 @@ define float @f2(float* %v, float %u) {
ret float %tmp1
}
+define float @f2offset(float* %v, float %u) {
+; CHECK: f2offset:
+; CHECK: vldr.32{{.*}}, #4]
+ %addr = getelementptr float* %v, i32 1
+ %tmp = load float* %addr
+ %tmp1 = fadd float %tmp, %u
+ ret float %tmp1
+}
+
+define float @f2noffset(float* %v, float %u) {
+; CHECK: f2noffset:
+; CHECK: vldr.32{{.*}}, #-4]
+ %addr = getelementptr float* %v, i32 -1
+ %tmp = load float* %addr
+ %tmp1 = fadd float %tmp, %u
+ ret float %tmp1
+}
+
define void @f3(float %a, float %b, float* %v) {
; CHECK: f3:
; CHECK: vstr.32{{.*}}[
diff --git a/test/CodeGen/ARM/hidden-vis-2.ll b/test/CodeGen/ARM/hidden-vis-2.ll
index 90f5308..8bb2c6e 100644
--- a/test/CodeGen/ARM/hidden-vis-2.ll
+++ b/test/CodeGen/ARM/hidden-vis-2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s
+; RUN: llc < %s -relocation-model=dynamic-no-pic -mtriple=arm-apple-darwin | FileCheck %s
@x = weak hidden global i32 0 ; <i32*> [#uses=1]
diff --git a/test/CodeGen/ARM/hidden-vis-3.ll b/test/CodeGen/ARM/hidden-vis-3.ll
index fc8b2fe..3bc3312 100644
--- a/test/CodeGen/ARM/hidden-vis-3.ll
+++ b/test/CodeGen/ARM/hidden-vis-3.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin9 | FileCheck %s
+; RUN: llc < %s -relocation-model=dynamic-no-pic -mtriple=arm-apple-darwin9 | FileCheck %s
@x = external hidden global i32 ; <i32*> [#uses=1]
@y = extern_weak hidden global i32 ; <i32*> [#uses=1]
diff --git a/test/CodeGen/ARM/iabs.ll b/test/CodeGen/ARM/iabs.ll
index c01c041..89e309d 100644
--- a/test/CodeGen/ARM/iabs.ll
+++ b/test/CodeGen/ARM/iabs.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -march=arm -mattr=+v4t | FileCheck %s
;; Integer absolute value, should produce something as good as: ARM:
-;; add r3, r0, r0, asr #31
-;; eor r0, r3, r0, asr #31
+;; movs r0, r0
+;; rsbmi r0, r0, #0
;; bx lr
define i32 @test(i32 %a) {
@@ -10,7 +10,7 @@ define i32 @test(i32 %a) {
%b = icmp sgt i32 %a, -1
%abs = select i1 %b, i32 %a, i32 %tmp1neg
ret i32 %abs
-; CHECK: add r1, r0, r0, asr #31
-; CHECK: eor r0, r1, r0, asr #31
+; CHECK: movs r0, r0
+; CHECK: rsbmi r0, r0, #0
; CHECK: bx lr
}
diff --git a/test/CodeGen/ARM/ifcvt4.ll b/test/CodeGen/ARM/ifcvt4.ll
index f28c61b..d247f14 100644
--- a/test/CodeGen/ARM/ifcvt4.ll
+++ b/test/CodeGen/ARM/ifcvt4.ll
@@ -1,8 +1,10 @@
-; RUN: llc < %s -march=arm
-; RUN: llc < %s -march=arm | grep subgt | count 1
-; RUN: llc < %s -march=arm | grep suble | count 1
-; FIXME: Check for # of unconditional branch after adding branch folding post ifcvt.
+; RUN: llc < %s -march=arm | FileCheck %s
+; Do not if-convert when branches go to the different loops.
+; CHECK: t:
+; CHECK-NOT: subgt
+; CHECK-NOT: suble
+; Don't use
define i32 @t(i32 %a, i32 %b) {
entry:
%tmp1434 = icmp eq i32 %a, %b ; <i1> [#uses=1]
diff --git a/test/CodeGen/ARM/indirectbr.ll b/test/CodeGen/ARM/indirectbr.ll
index 25a0f93..341c33f 100644
--- a/test/CodeGen/ARM/indirectbr.ll
+++ b/test/CodeGen/ARM/indirectbr.ll
@@ -22,7 +22,6 @@ bb2: ; preds = %entry, %bb3
%gotovar.4.0 = phi i8* [ %gotovar.4.0.pre, %bb3 ], [ %0, %entry ] ; <i8*> [#uses=1]
; ARM: bx
; THUMB: mov pc,
-; THUMB2: mov pc,
indirectbr i8* %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
bb3: ; preds = %entry
diff --git a/test/CodeGen/ARM/inlineasm3.ll b/test/CodeGen/ARM/inlineasm3.ll
index 853585d..cb5243c 100644
--- a/test/CodeGen/ARM/inlineasm3.ll
+++ b/test/CodeGen/ARM/inlineasm3.ll
@@ -98,3 +98,15 @@ entry:
%0 = tail call i32 asm "movw $0, $1", "=r,j"(i32 27182) nounwind
ret i32 %0
}
+
+; Radar 9866494
+
+define void @t10(i8* %f, i32 %g) nounwind {
+entry:
+; CHECK: t10
+; CHECK: str r1, [r0]
+ %f.addr = alloca i8*, align 4
+ store i8* %f, i8** %f.addr, align 4
+ call void asm "str $1, $0", "=*Q,r"(i8** %f.addr, i32 %g) nounwind
+ ret void
+}
diff --git a/test/CodeGen/ARM/inlineasm4.ll b/test/CodeGen/ARM/inlineasm4.ll
new file mode 100644
index 0000000..9ed4b99
--- /dev/null
+++ b/test/CodeGen/ARM/inlineasm4.ll
@@ -0,0 +1,17 @@
+; RUN: llc < %s -march=arm | FileCheck %s
+
+define double @f(double %x) {
+entry:
+ %0 = tail call double asm "mov ${0:R}, #4\0A", "=&r"()
+ ret double %0
+; CHECK: f:
+; CHECK: mov r1, #4
+}
+
+define double @g(double %x) {
+entry:
+ %0 = tail call double asm "mov ${0:Q}, #4\0A", "=&r"()
+ ret double %0
+; CHECK: g:
+; CHECK: mov r0, #4
+}
diff --git a/test/CodeGen/ARM/lsr-on-unrolled-loops.ll b/test/CodeGen/ARM/lsr-on-unrolled-loops.ll
index c1318ec..4737901 100644
--- a/test/CodeGen/ARM/lsr-on-unrolled-loops.ll
+++ b/test/CodeGen/ARM/lsr-on-unrolled-loops.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=thumbv7-apple-darwin10 -mcpu=cortex-a8 < %s | FileCheck %s
+; RUN: llc -mtriple=thumbv7-apple-darwin10 -mcpu=cortex-a8 -enable-lsr-nested < %s | FileCheck %s
; LSR should recognize that this is an unrolled loop which can use
; constant offset addressing, so that each of the following stores
@@ -8,6 +8,9 @@
; CHECK: vstr.32 s{{.*}}, [{{(r[0-9]+)|(lr)}}, #64]
; CHECK: vstr.32 s{{.*}}, [{{(r[0-9]+)|(lr)}}, #96]
+; We can also save a register in the outer loop, but that requires
+; performing LSR on the outer loop.
+
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
%0 = type { %1*, %3*, %6*, i8*, i32, i32, %8*, i32, i32, i32, i32, i32, i32, i32, double, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8**, i32, i32, i32, i32, i32, [64 x i32]*, [4 x %9*], [4 x %10*], [4 x %10*], i32, %11*, i32, i32, [16 x i8], [16 x i8], [16 x i8], i32, i32, i8, i8, i8, i16, i16, i32, i8, i32, %12*, i32, i32, i32, i32, i8*, i32, [4 x %11*], i32, i32, i32, [10 x i32], i32, i32, i32, i32, i32, %13*, %14*, %15*, %16*, %17*, %18*, %19*, %20*, %21*, %22*, %23* }
@@ -37,107 +40,107 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-
define void @test(%0* nocapture %a0, %11* nocapture %a1, i16* nocapture %a2, i8** nocapture %a3, i32 %a4) nounwind {
bb:
- %t = alloca [64 x float], align 4
+ %t = alloca [64 x float], align 4
%t5 = getelementptr inbounds %0* %a0, i32 0, i32 65
- %t6 = load i8** %t5, align 4
+ %t6 = load i8** %t5, align 4
%t7 = getelementptr inbounds %11* %a1, i32 0, i32 20
- %t8 = load i8** %t7, align 4
+ %t8 = load i8** %t7, align 4
br label %bb9
-bb9:
+bb9:
%t10 = phi i32 [ 0, %bb ], [ %t157, %bb156 ]
- %t11 = add i32 %t10, 8
+ %t11 = add i32 %t10, 8
%t12 = getelementptr [64 x float]* %t, i32 0, i32 %t11
- %t13 = add i32 %t10, 16
+ %t13 = add i32 %t10, 16
%t14 = getelementptr [64 x float]* %t, i32 0, i32 %t13
- %t15 = add i32 %t10, 24
+ %t15 = add i32 %t10, 24
%t16 = getelementptr [64 x float]* %t, i32 0, i32 %t15
- %t17 = add i32 %t10, 32
+ %t17 = add i32 %t10, 32
%t18 = getelementptr [64 x float]* %t, i32 0, i32 %t17
- %t19 = add i32 %t10, 40
+ %t19 = add i32 %t10, 40
%t20 = getelementptr [64 x float]* %t, i32 0, i32 %t19
- %t21 = add i32 %t10, 48
+ %t21 = add i32 %t10, 48
%t22 = getelementptr [64 x float]* %t, i32 0, i32 %t21
- %t23 = add i32 %t10, 56
+ %t23 = add i32 %t10, 56
%t24 = getelementptr [64 x float]* %t, i32 0, i32 %t23
%t25 = getelementptr [64 x float]* %t, i32 0, i32 %t10
- %t26 = shl i32 %t10, 5
- %t27 = or i32 %t26, 8
- %t28 = getelementptr i8* %t8, i32 %t27
- %t29 = bitcast i8* %t28 to float*
- %t30 = or i32 %t26, 16
- %t31 = getelementptr i8* %t8, i32 %t30
- %t32 = bitcast i8* %t31 to float*
- %t33 = or i32 %t26, 24
- %t34 = getelementptr i8* %t8, i32 %t33
- %t35 = bitcast i8* %t34 to float*
- %t36 = or i32 %t26, 4
- %t37 = getelementptr i8* %t8, i32 %t36
- %t38 = bitcast i8* %t37 to float*
- %t39 = or i32 %t26, 12
- %t40 = getelementptr i8* %t8, i32 %t39
- %t41 = bitcast i8* %t40 to float*
- %t42 = or i32 %t26, 20
- %t43 = getelementptr i8* %t8, i32 %t42
- %t44 = bitcast i8* %t43 to float*
- %t45 = or i32 %t26, 28
- %t46 = getelementptr i8* %t8, i32 %t45
- %t47 = bitcast i8* %t46 to float*
- %t48 = getelementptr i8* %t8, i32 %t26
- %t49 = bitcast i8* %t48 to float*
- %t50 = shl i32 %t10, 3
- %t51 = or i32 %t50, 1
- %t52 = getelementptr i16* %a2, i32 %t51
- %t53 = or i32 %t50, 2
- %t54 = getelementptr i16* %a2, i32 %t53
- %t55 = or i32 %t50, 3
- %t56 = getelementptr i16* %a2, i32 %t55
- %t57 = or i32 %t50, 4
- %t58 = getelementptr i16* %a2, i32 %t57
- %t59 = or i32 %t50, 5
- %t60 = getelementptr i16* %a2, i32 %t59
- %t61 = or i32 %t50, 6
- %t62 = getelementptr i16* %a2, i32 %t61
- %t63 = or i32 %t50, 7
- %t64 = getelementptr i16* %a2, i32 %t63
- %t65 = getelementptr i16* %a2, i32 %t50
- %t66 = load i16* %t52, align 2
- %t67 = icmp eq i16 %t66, 0
- %t68 = load i16* %t54, align 2
- %t69 = icmp eq i16 %t68, 0
- %t70 = and i1 %t67, %t69
+ %t26 = shl i32 %t10, 5
+ %t27 = or i32 %t26, 8
+ %t28 = getelementptr i8* %t8, i32 %t27
+ %t29 = bitcast i8* %t28 to float*
+ %t30 = or i32 %t26, 16
+ %t31 = getelementptr i8* %t8, i32 %t30
+ %t32 = bitcast i8* %t31 to float*
+ %t33 = or i32 %t26, 24
+ %t34 = getelementptr i8* %t8, i32 %t33
+ %t35 = bitcast i8* %t34 to float*
+ %t36 = or i32 %t26, 4
+ %t37 = getelementptr i8* %t8, i32 %t36
+ %t38 = bitcast i8* %t37 to float*
+ %t39 = or i32 %t26, 12
+ %t40 = getelementptr i8* %t8, i32 %t39
+ %t41 = bitcast i8* %t40 to float*
+ %t42 = or i32 %t26, 20
+ %t43 = getelementptr i8* %t8, i32 %t42
+ %t44 = bitcast i8* %t43 to float*
+ %t45 = or i32 %t26, 28
+ %t46 = getelementptr i8* %t8, i32 %t45
+ %t47 = bitcast i8* %t46 to float*
+ %t48 = getelementptr i8* %t8, i32 %t26
+ %t49 = bitcast i8* %t48 to float*
+ %t50 = shl i32 %t10, 3
+ %t51 = or i32 %t50, 1
+ %t52 = getelementptr i16* %a2, i32 %t51
+ %t53 = or i32 %t50, 2
+ %t54 = getelementptr i16* %a2, i32 %t53
+ %t55 = or i32 %t50, 3
+ %t56 = getelementptr i16* %a2, i32 %t55
+ %t57 = or i32 %t50, 4
+ %t58 = getelementptr i16* %a2, i32 %t57
+ %t59 = or i32 %t50, 5
+ %t60 = getelementptr i16* %a2, i32 %t59
+ %t61 = or i32 %t50, 6
+ %t62 = getelementptr i16* %a2, i32 %t61
+ %t63 = or i32 %t50, 7
+ %t64 = getelementptr i16* %a2, i32 %t63
+ %t65 = getelementptr i16* %a2, i32 %t50
+ %t66 = load i16* %t52, align 2
+ %t67 = icmp eq i16 %t66, 0
+ %t68 = load i16* %t54, align 2
+ %t69 = icmp eq i16 %t68, 0
+ %t70 = and i1 %t67, %t69
br i1 %t70, label %bb71, label %bb91
-bb71:
- %t72 = load i16* %t56, align 2
- %t73 = icmp eq i16 %t72, 0
+bb71:
+ %t72 = load i16* %t56, align 2
+ %t73 = icmp eq i16 %t72, 0
br i1 %t73, label %bb74, label %bb91
-bb74:
- %t75 = load i16* %t58, align 2
- %t76 = icmp eq i16 %t75, 0
+bb74:
+ %t75 = load i16* %t58, align 2
+ %t76 = icmp eq i16 %t75, 0
br i1 %t76, label %bb77, label %bb91
-bb77:
- %t78 = load i16* %t60, align 2
- %t79 = icmp eq i16 %t78, 0
+bb77:
+ %t78 = load i16* %t60, align 2
+ %t79 = icmp eq i16 %t78, 0
br i1 %t79, label %bb80, label %bb91
-bb80:
- %t81 = load i16* %t62, align 2
- %t82 = icmp eq i16 %t81, 0
+bb80:
+ %t81 = load i16* %t62, align 2
+ %t82 = icmp eq i16 %t81, 0
br i1 %t82, label %bb83, label %bb91
-bb83:
- %t84 = load i16* %t64, align 2
- %t85 = icmp eq i16 %t84, 0
+bb83:
+ %t84 = load i16* %t64, align 2
+ %t85 = icmp eq i16 %t84, 0
br i1 %t85, label %bb86, label %bb91
-bb86:
- %t87 = load i16* %t65, align 2
- %t88 = sitofp i16 %t87 to float
- %t89 = load float* %t49, align 4
- %t90 = fmul float %t88, %t89
+bb86:
+ %t87 = load i16* %t65, align 2
+ %t88 = sitofp i16 %t87 to float
+ %t89 = load float* %t49, align 4
+ %t90 = fmul float %t88, %t89
store float %t90, float* %t25, align 4
store float %t90, float* %t12, align 4
store float %t90, float* %t14, align 4
@@ -148,235 +151,235 @@ bb86:
store float %t90, float* %t24, align 4
br label %bb156
-bb91:
- %t92 = load i16* %t65, align 2
- %t93 = sitofp i16 %t92 to float
- %t94 = load float* %t49, align 4
- %t95 = fmul float %t93, %t94
- %t96 = sitofp i16 %t68 to float
- %t97 = load float* %t29, align 4
- %t98 = fmul float %t96, %t97
- %t99 = load i16* %t58, align 2
- %t100 = sitofp i16 %t99 to float
- %t101 = load float* %t32, align 4
- %t102 = fmul float %t100, %t101
- %t103 = load i16* %t62, align 2
- %t104 = sitofp i16 %t103 to float
- %t105 = load float* %t35, align 4
- %t106 = fmul float %t104, %t105
- %t107 = fadd float %t95, %t102
- %t108 = fsub float %t95, %t102
- %t109 = fadd float %t98, %t106
- %t110 = fsub float %t98, %t106
+bb91:
+ %t92 = load i16* %t65, align 2
+ %t93 = sitofp i16 %t92 to float
+ %t94 = load float* %t49, align 4
+ %t95 = fmul float %t93, %t94
+ %t96 = sitofp i16 %t68 to float
+ %t97 = load float* %t29, align 4
+ %t98 = fmul float %t96, %t97
+ %t99 = load i16* %t58, align 2
+ %t100 = sitofp i16 %t99 to float
+ %t101 = load float* %t32, align 4
+ %t102 = fmul float %t100, %t101
+ %t103 = load i16* %t62, align 2
+ %t104 = sitofp i16 %t103 to float
+ %t105 = load float* %t35, align 4
+ %t106 = fmul float %t104, %t105
+ %t107 = fadd float %t95, %t102
+ %t108 = fsub float %t95, %t102
+ %t109 = fadd float %t98, %t106
+ %t110 = fsub float %t98, %t106
%t111 = fmul float %t110, 0x3FF6A09E60000000
- %t112 = fsub float %t111, %t109
- %t113 = fadd float %t107, %t109
- %t114 = fsub float %t107, %t109
- %t115 = fadd float %t108, %t112
- %t116 = fsub float %t108, %t112
- %t117 = sitofp i16 %t66 to float
- %t118 = load float* %t38, align 4
- %t119 = fmul float %t117, %t118
- %t120 = load i16* %t56, align 2
- %t121 = sitofp i16 %t120 to float
- %t122 = load float* %t41, align 4
- %t123 = fmul float %t121, %t122
- %t124 = load i16* %t60, align 2
- %t125 = sitofp i16 %t124 to float
- %t126 = load float* %t44, align 4
- %t127 = fmul float %t125, %t126
- %t128 = load i16* %t64, align 2
- %t129 = sitofp i16 %t128 to float
- %t130 = load float* %t47, align 4
- %t131 = fmul float %t129, %t130
- %t132 = fadd float %t127, %t123
- %t133 = fsub float %t127, %t123
- %t134 = fadd float %t119, %t131
- %t135 = fsub float %t119, %t131
- %t136 = fadd float %t134, %t132
- %t137 = fsub float %t134, %t132
+ %t112 = fsub float %t111, %t109
+ %t113 = fadd float %t107, %t109
+ %t114 = fsub float %t107, %t109
+ %t115 = fadd float %t108, %t112
+ %t116 = fsub float %t108, %t112
+ %t117 = sitofp i16 %t66 to float
+ %t118 = load float* %t38, align 4
+ %t119 = fmul float %t117, %t118
+ %t120 = load i16* %t56, align 2
+ %t121 = sitofp i16 %t120 to float
+ %t122 = load float* %t41, align 4
+ %t123 = fmul float %t121, %t122
+ %t124 = load i16* %t60, align 2
+ %t125 = sitofp i16 %t124 to float
+ %t126 = load float* %t44, align 4
+ %t127 = fmul float %t125, %t126
+ %t128 = load i16* %t64, align 2
+ %t129 = sitofp i16 %t128 to float
+ %t130 = load float* %t47, align 4
+ %t131 = fmul float %t129, %t130
+ %t132 = fadd float %t127, %t123
+ %t133 = fsub float %t127, %t123
+ %t134 = fadd float %t119, %t131
+ %t135 = fsub float %t119, %t131
+ %t136 = fadd float %t134, %t132
+ %t137 = fsub float %t134, %t132
%t138 = fmul float %t137, 0x3FF6A09E60000000
- %t139 = fadd float %t133, %t135
+ %t139 = fadd float %t133, %t135
%t140 = fmul float %t139, 0x3FFD906BC0000000
%t141 = fmul float %t135, 0x3FF1517A80000000
- %t142 = fsub float %t141, %t140
+ %t142 = fsub float %t141, %t140
%t143 = fmul float %t133, 0xC004E7AEA0000000
- %t144 = fadd float %t143, %t140
- %t145 = fsub float %t144, %t136
- %t146 = fsub float %t138, %t145
- %t147 = fadd float %t142, %t146
- %t148 = fadd float %t113, %t136
+ %t144 = fadd float %t143, %t140
+ %t145 = fsub float %t144, %t136
+ %t146 = fsub float %t138, %t145
+ %t147 = fadd float %t142, %t146
+ %t148 = fadd float %t113, %t136
store float %t148, float* %t25, align 4
- %t149 = fsub float %t113, %t136
+ %t149 = fsub float %t113, %t136
store float %t149, float* %t24, align 4
- %t150 = fadd float %t115, %t145
+ %t150 = fadd float %t115, %t145
store float %t150, float* %t12, align 4
- %t151 = fsub float %t115, %t145
+ %t151 = fsub float %t115, %t145
store float %t151, float* %t22, align 4
- %t152 = fadd float %t116, %t146
+ %t152 = fadd float %t116, %t146
store float %t152, float* %t14, align 4
- %t153 = fsub float %t116, %t146
+ %t153 = fsub float %t116, %t146
store float %t153, float* %t20, align 4
- %t154 = fadd float %t114, %t147
+ %t154 = fadd float %t114, %t147
store float %t154, float* %t18, align 4
- %t155 = fsub float %t114, %t147
+ %t155 = fsub float %t114, %t147
store float %t155, float* %t16, align 4
br label %bb156
-bb156:
- %t157 = add i32 %t10, 1
- %t158 = icmp eq i32 %t157, 8
+bb156:
+ %t157 = add i32 %t10, 1
+ %t158 = icmp eq i32 %t157, 8
br i1 %t158, label %bb159, label %bb9
-bb159:
- %t160 = add i32 %a4, 7
- %t161 = add i32 %a4, 1
- %t162 = add i32 %a4, 6
- %t163 = add i32 %a4, 2
- %t164 = add i32 %a4, 5
- %t165 = add i32 %a4, 4
- %t166 = add i32 %a4, 3
+bb159:
+ %t160 = add i32 %a4, 7
+ %t161 = add i32 %a4, 1
+ %t162 = add i32 %a4, 6
+ %t163 = add i32 %a4, 2
+ %t164 = add i32 %a4, 5
+ %t165 = add i32 %a4, 4
+ %t166 = add i32 %a4, 3
br label %bb167
-bb167:
+bb167:
%t168 = phi i32 [ 0, %bb159 ], [ %t293, %bb167 ]
%t169 = getelementptr i8** %a3, i32 %t168
- %t170 = shl i32 %t168, 3
- %t171 = or i32 %t170, 4
+ %t170 = shl i32 %t168, 3
+ %t171 = or i32 %t170, 4
%t172 = getelementptr [64 x float]* %t, i32 0, i32 %t171
- %t173 = or i32 %t170, 2
+ %t173 = or i32 %t170, 2
%t174 = getelementptr [64 x float]* %t, i32 0, i32 %t173
- %t175 = or i32 %t170, 6
+ %t175 = or i32 %t170, 6
%t176 = getelementptr [64 x float]* %t, i32 0, i32 %t175
- %t177 = or i32 %t170, 5
+ %t177 = or i32 %t170, 5
%t178 = getelementptr [64 x float]* %t, i32 0, i32 %t177
- %t179 = or i32 %t170, 3
+ %t179 = or i32 %t170, 3
%t180 = getelementptr [64 x float]* %t, i32 0, i32 %t179
- %t181 = or i32 %t170, 1
+ %t181 = or i32 %t170, 1
%t182 = getelementptr [64 x float]* %t, i32 0, i32 %t181
- %t183 = or i32 %t170, 7
+ %t183 = or i32 %t170, 7
%t184 = getelementptr [64 x float]* %t, i32 0, i32 %t183
%t185 = getelementptr [64 x float]* %t, i32 0, i32 %t170
- %t186 = load i8** %t169, align 4
+ %t186 = load i8** %t169, align 4
%t187 = getelementptr inbounds i8* %t186, i32 %a4
- %t188 = load float* %t185, align 4
- %t189 = load float* %t172, align 4
- %t190 = fadd float %t188, %t189
- %t191 = fsub float %t188, %t189
- %t192 = load float* %t174, align 4
- %t193 = load float* %t176, align 4
- %t194 = fadd float %t192, %t193
- %t195 = fsub float %t192, %t193
+ %t188 = load float* %t185, align 4
+ %t189 = load float* %t172, align 4
+ %t190 = fadd float %t188, %t189
+ %t191 = fsub float %t188, %t189
+ %t192 = load float* %t174, align 4
+ %t193 = load float* %t176, align 4
+ %t194 = fadd float %t192, %t193
+ %t195 = fsub float %t192, %t193
%t196 = fmul float %t195, 0x3FF6A09E60000000
- %t197 = fsub float %t196, %t194
- %t198 = fadd float %t190, %t194
- %t199 = fsub float %t190, %t194
- %t200 = fadd float %t191, %t197
- %t201 = fsub float %t191, %t197
- %t202 = load float* %t178, align 4
- %t203 = load float* %t180, align 4
- %t204 = fadd float %t202, %t203
- %t205 = fsub float %t202, %t203
- %t206 = load float* %t182, align 4
- %t207 = load float* %t184, align 4
- %t208 = fadd float %t206, %t207
- %t209 = fsub float %t206, %t207
- %t210 = fadd float %t208, %t204
- %t211 = fsub float %t208, %t204
+ %t197 = fsub float %t196, %t194
+ %t198 = fadd float %t190, %t194
+ %t199 = fsub float %t190, %t194
+ %t200 = fadd float %t191, %t197
+ %t201 = fsub float %t191, %t197
+ %t202 = load float* %t178, align 4
+ %t203 = load float* %t180, align 4
+ %t204 = fadd float %t202, %t203
+ %t205 = fsub float %t202, %t203
+ %t206 = load float* %t182, align 4
+ %t207 = load float* %t184, align 4
+ %t208 = fadd float %t206, %t207
+ %t209 = fsub float %t206, %t207
+ %t210 = fadd float %t208, %t204
+ %t211 = fsub float %t208, %t204
%t212 = fmul float %t211, 0x3FF6A09E60000000
- %t213 = fadd float %t205, %t209
+ %t213 = fadd float %t205, %t209
%t214 = fmul float %t213, 0x3FFD906BC0000000
%t215 = fmul float %t209, 0x3FF1517A80000000
- %t216 = fsub float %t215, %t214
+ %t216 = fsub float %t215, %t214
%t217 = fmul float %t205, 0xC004E7AEA0000000
- %t218 = fadd float %t217, %t214
- %t219 = fsub float %t218, %t210
- %t220 = fsub float %t212, %t219
- %t221 = fadd float %t216, %t220
- %t222 = fadd float %t198, %t210
- %t223 = fptosi float %t222 to i32
- %t224 = add nsw i32 %t223, 4
- %t225 = lshr i32 %t224, 3
- %t226 = and i32 %t225, 1023
- %t227 = add i32 %t226, 128
+ %t218 = fadd float %t217, %t214
+ %t219 = fsub float %t218, %t210
+ %t220 = fsub float %t212, %t219
+ %t221 = fadd float %t216, %t220
+ %t222 = fadd float %t198, %t210
+ %t223 = fptosi float %t222 to i32
+ %t224 = add nsw i32 %t223, 4
+ %t225 = lshr i32 %t224, 3
+ %t226 = and i32 %t225, 1023
+ %t227 = add i32 %t226, 128
%t228 = getelementptr inbounds i8* %t6, i32 %t227
- %t229 = load i8* %t228, align 1
+ %t229 = load i8* %t228, align 1
store i8 %t229, i8* %t187, align 1
- %t230 = fsub float %t198, %t210
- %t231 = fptosi float %t230 to i32
- %t232 = add nsw i32 %t231, 4
- %t233 = lshr i32 %t232, 3
- %t234 = and i32 %t233, 1023
- %t235 = add i32 %t234, 128
+ %t230 = fsub float %t198, %t210
+ %t231 = fptosi float %t230 to i32
+ %t232 = add nsw i32 %t231, 4
+ %t233 = lshr i32 %t232, 3
+ %t234 = and i32 %t233, 1023
+ %t235 = add i32 %t234, 128
%t236 = getelementptr inbounds i8* %t6, i32 %t235
- %t237 = load i8* %t236, align 1
+ %t237 = load i8* %t236, align 1
%t238 = getelementptr inbounds i8* %t186, i32 %t160
store i8 %t237, i8* %t238, align 1
- %t239 = fadd float %t200, %t219
- %t240 = fptosi float %t239 to i32
- %t241 = add nsw i32 %t240, 4
- %t242 = lshr i32 %t241, 3
- %t243 = and i32 %t242, 1023
- %t244 = add i32 %t243, 128
+ %t239 = fadd float %t200, %t219
+ %t240 = fptosi float %t239 to i32
+ %t241 = add nsw i32 %t240, 4
+ %t242 = lshr i32 %t241, 3
+ %t243 = and i32 %t242, 1023
+ %t244 = add i32 %t243, 128
%t245 = getelementptr inbounds i8* %t6, i32 %t244
- %t246 = load i8* %t245, align 1
+ %t246 = load i8* %t245, align 1
%t247 = getelementptr inbounds i8* %t186, i32 %t161
store i8 %t246, i8* %t247, align 1
- %t248 = fsub float %t200, %t219
- %t249 = fptosi float %t248 to i32
- %t250 = add nsw i32 %t249, 4
- %t251 = lshr i32 %t250, 3
- %t252 = and i32 %t251, 1023
- %t253 = add i32 %t252, 128
+ %t248 = fsub float %t200, %t219
+ %t249 = fptosi float %t248 to i32
+ %t250 = add nsw i32 %t249, 4
+ %t251 = lshr i32 %t250, 3
+ %t252 = and i32 %t251, 1023
+ %t253 = add i32 %t252, 128
%t254 = getelementptr inbounds i8* %t6, i32 %t253
- %t255 = load i8* %t254, align 1
+ %t255 = load i8* %t254, align 1
%t256 = getelementptr inbounds i8* %t186, i32 %t162
store i8 %t255, i8* %t256, align 1
- %t257 = fadd float %t201, %t220
- %t258 = fptosi float %t257 to i32
- %t259 = add nsw i32 %t258, 4
- %t260 = lshr i32 %t259, 3
- %t261 = and i32 %t260, 1023
- %t262 = add i32 %t261, 128
+ %t257 = fadd float %t201, %t220
+ %t258 = fptosi float %t257 to i32
+ %t259 = add nsw i32 %t258, 4
+ %t260 = lshr i32 %t259, 3
+ %t261 = and i32 %t260, 1023
+ %t262 = add i32 %t261, 128
%t263 = getelementptr inbounds i8* %t6, i32 %t262
- %t264 = load i8* %t263, align 1
+ %t264 = load i8* %t263, align 1
%t265 = getelementptr inbounds i8* %t186, i32 %t163
store i8 %t264, i8* %t265, align 1
- %t266 = fsub float %t201, %t220
- %t267 = fptosi float %t266 to i32
- %t268 = add nsw i32 %t267, 4
- %t269 = lshr i32 %t268, 3
- %t270 = and i32 %t269, 1023
- %t271 = add i32 %t270, 128
+ %t266 = fsub float %t201, %t220
+ %t267 = fptosi float %t266 to i32
+ %t268 = add nsw i32 %t267, 4
+ %t269 = lshr i32 %t268, 3
+ %t270 = and i32 %t269, 1023
+ %t271 = add i32 %t270, 128
%t272 = getelementptr inbounds i8* %t6, i32 %t271
- %t273 = load i8* %t272, align 1
+ %t273 = load i8* %t272, align 1
%t274 = getelementptr inbounds i8* %t186, i32 %t164
store i8 %t273, i8* %t274, align 1
- %t275 = fadd float %t199, %t221
- %t276 = fptosi float %t275 to i32
- %t277 = add nsw i32 %t276, 4
- %t278 = lshr i32 %t277, 3
- %t279 = and i32 %t278, 1023
- %t280 = add i32 %t279, 128
+ %t275 = fadd float %t199, %t221
+ %t276 = fptosi float %t275 to i32
+ %t277 = add nsw i32 %t276, 4
+ %t278 = lshr i32 %t277, 3
+ %t279 = and i32 %t278, 1023
+ %t280 = add i32 %t279, 128
%t281 = getelementptr inbounds i8* %t6, i32 %t280
- %t282 = load i8* %t281, align 1
+ %t282 = load i8* %t281, align 1
%t283 = getelementptr inbounds i8* %t186, i32 %t165
store i8 %t282, i8* %t283, align 1
- %t284 = fsub float %t199, %t221
- %t285 = fptosi float %t284 to i32
- %t286 = add nsw i32 %t285, 4
- %t287 = lshr i32 %t286, 3
- %t288 = and i32 %t287, 1023
- %t289 = add i32 %t288, 128
+ %t284 = fsub float %t199, %t221
+ %t285 = fptosi float %t284 to i32
+ %t286 = add nsw i32 %t285, 4
+ %t287 = lshr i32 %t286, 3
+ %t288 = and i32 %t287, 1023
+ %t289 = add i32 %t288, 128
%t290 = getelementptr inbounds i8* %t6, i32 %t289
- %t291 = load i8* %t290, align 1
+ %t291 = load i8* %t290, align 1
%t292 = getelementptr inbounds i8* %t186, i32 %t166
store i8 %t291, i8* %t292, align 1
- %t293 = add nsw i32 %t168, 1
- %t294 = icmp eq i32 %t293, 8
+ %t293 = add nsw i32 %t168, 1
+ %t294 = icmp eq i32 %t293, 8
br i1 %t294, label %bb295, label %bb167
-bb295:
+bb295:
ret void
}
diff --git a/test/CodeGen/ARM/lsr-unfolded-offset.ll b/test/CodeGen/ARM/lsr-unfolded-offset.ll
index 61b25bb..bf26a96 100644
--- a/test/CodeGen/ARM/lsr-unfolded-offset.ll
+++ b/test/CodeGen/ARM/lsr-unfolded-offset.ll
@@ -4,12 +4,11 @@
; register pressure and therefore spilling. There is more room for improvement
; here.
-; CHECK: sub sp, #{{32|28|24}}
+; CHECK: sub sp, #{{40|32|28|24}}
; CHECK: %for.inc
; CHECK: ldr{{(.w)?}} r{{.*}}, [sp, #
; CHECK: ldr{{(.w)?}} r{{.*}}, [sp, #
-; CHECK: ldr{{(.w)?}} r{{.*}}, [sp, #
; CHECK: add
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
diff --git a/test/CodeGen/ARM/mulhi.ll b/test/CodeGen/ARM/mulhi.ll
index 148f291..932004c 100644
--- a/test/CodeGen/ARM/mulhi.ll
+++ b/test/CodeGen/ARM/mulhi.ll
@@ -1,9 +1,16 @@
-; RUN: llc < %s -march=arm -mattr=+v6
-; RUN: llc < %s -march=arm -mattr=+v6 | \
-; RUN: grep smmul | count 1
-; RUN: llc < %s -march=arm | grep umull | count 1
+; RUN: llc < %s -march=arm -mattr=+v6 | FileCheck %s -check-prefix=V6
+; RUN: llc < %s -march=arm | FileCheck %s -check-prefix=V4
+; RUN: llc < %s -march=thumb -mcpu=cortex-m3 | FileCheck %s -check-prefix=M3
-define i32 @smulhi(i32 %x, i32 %y) {
+define i32 @smulhi(i32 %x, i32 %y) nounwind {
+; V6: smulhi:
+; V6: smmul
+
+; V4: smulhi:
+; V4: smull
+
+; M3: smulhi:
+; M3: smull
%tmp = sext i32 %x to i64 ; <i64> [#uses=1]
%tmp1 = sext i32 %y to i64 ; <i64> [#uses=1]
%tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1]
@@ -12,7 +19,15 @@ define i32 @smulhi(i32 %x, i32 %y) {
ret i32 %tmp3.upgrd.1
}
-define i32 @umulhi(i32 %x, i32 %y) {
+define i32 @umulhi(i32 %x, i32 %y) nounwind {
+; V6: umulhi:
+; V6: umull
+
+; V4: umulhi:
+; V4: umull
+
+; M3: umulhi:
+; M3: umull
%tmp = zext i32 %x to i64 ; <i64> [#uses=1]
%tmp1 = zext i32 %y to i64 ; <i64> [#uses=1]
%tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1]
@@ -20,3 +35,20 @@ define i32 @umulhi(i32 %x, i32 %y) {
%tmp3.upgrd.2 = trunc i64 %tmp3 to i32 ; <i32> [#uses=1]
ret i32 %tmp3.upgrd.2
}
+
+; rdar://r10152911
+define i32 @t3(i32 %a) nounwind {
+; V6: t3:
+; V6: smmla
+
+; V4: t3:
+; V4: smull
+
+; M3: t3:
+; M3-NOT: smmla
+; M3: smull
+entry:
+ %tmp1 = mul nsw i32 %a, 3
+ %tmp2 = sdiv i32 %tmp1, 23
+ ret i32 %tmp2
+}
diff --git a/test/CodeGen/ARM/select.ll b/test/CodeGen/ARM/select.ll
index d1493ee..f1bd7ee 100644
--- a/test/CodeGen/ARM/select.ll
+++ b/test/CodeGen/ARM/select.ll
@@ -76,12 +76,12 @@ define double @f7(double %a, double %b) {
; block generated, odds are good that we have close to the ideal code for this:
;
; CHECK-NEON: _f8:
-; CHECK-NEON: movw [[REGISTER_1:r[0-9]+]], #1123
-; CHECK-NEON-NEXT: movs [[REGISTER_2:r[0-9]+]], #0
-; CHECK-NEON-NEXT: cmp r0, [[REGISTER_1]]
-; CHECK-NEON-NEXT: it eq
-; CHECK-NEON-NEXT: moveq [[REGISTER_2]], #4
-; CHECK-NEON-NEXT: adr [[REGISTER_3:r[0-9]+]], #LCPI
+; CHECK-NEON: adr r2, LCPI7_0
+; CHECK-NEON-NEXT: movw r3, #1123
+; CHECK-NEON-NEXT: adds r1, r2, #4
+; CHECK-NEON-NEXT: cmp r0, r3
+; CHECK-NEON-NEXT: it ne
+; CHECK-NEON-NEXT: movne r1, r2
; CHECK-NEON-NEXT: ldr
; CHECK-NEON: bx
diff --git a/test/CodeGen/ARM/shifter_operand.ll b/test/CodeGen/ARM/shifter_operand.ll
index f0e2d10..964cef0 100644
--- a/test/CodeGen/ARM/shifter_operand.ll
+++ b/test/CodeGen/ARM/shifter_operand.ll
@@ -54,13 +54,12 @@ declare i8* @malloc(...)
define fastcc void @test4(i16 %addr) nounwind {
entry:
; A8: test4:
-; A8: ldr r2, [r0, r1, lsl #2]
-; A8: str r2, [r0, r1, lsl #2]
+; A8: ldr [[REG:r[0-9]+]], [r0, r1, lsl #2]
+; A8: str [[REG]], [r0, r1, lsl #2]
; A9: test4:
-; A9: add r0, r0, r{{[0-9]+}}, lsl #2
-; A9: ldr r1, [r0]
-; A9: str r1, [r0]
+; A9: ldr [[REG:r[0-9]+]], [r0, r1, lsl #2]
+; A9: str [[REG]], [r0, r1, lsl #2]
%0 = tail call i8* (...)* @malloc(i32 undef) nounwind
%1 = bitcast i8* %0 to i32*
%2 = sext i16 %addr to i32
diff --git a/test/CodeGen/ARM/str_pre-2.ll b/test/CodeGen/ARM/str_pre-2.ll
index b24f75a..f4e3a44 100644
--- a/test/CodeGen/ARM/str_pre-2.ll
+++ b/test/CodeGen/ARM/str_pre-2.ll
@@ -7,8 +7,8 @@
define i64 @t(i64 %a) nounwind readonly {
entry:
-; CHECK: str lr, [sp, #-4]!
-; CHECK: ldr lr, [sp], #4
+; CHECK: push {lr}
+; CHECK: pop {lr}
%0 = load i64** @b, align 4
%1 = load i64* %0, align 4
%2 = mul i64 %1, %a
diff --git a/test/CodeGen/ARM/subreg-remat.ll b/test/CodeGen/ARM/subreg-remat.ll
new file mode 100644
index 0000000..993d7ec
--- /dev/null
+++ b/test/CodeGen/ARM/subreg-remat.ll
@@ -0,0 +1,52 @@
+; RUN: llc < %s -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 -pre-RA-sched=source | FileCheck %s
+target triple = "thumbv7-apple-ios"
+; <rdar://problem/10032939>
+;
+; The vector %v2 is built like this:
+;
+; %vreg6:ssub_1<def> = VMOVSR %vreg0<kill>, pred:14, pred:%noreg, %vreg6<imp-def>; DPR_VFP2:%vreg6 GPR:%vreg0
+; %vreg6:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg; mem:LD4[ConstantPool] DPR_VFP2:%vreg6
+;
+; When %vreg6 spills, the VLDRS constant pool load cannot be rematerialized
+; since it implicitly reads the ssub_1 sub-register.
+;
+; CHECK: f1
+; CHECK: vmov s1, r0
+; CHECK: vldr.32 s0, LCPI
+; The vector must be spilled:
+; CHECK: vstr.64 d0,
+; CHECK: asm clobber d0
+; And reloaded after the asm:
+; CHECK: vldr.64 [[D16:d[0-9]+]],
+; CHECK: vstr.64 [[D16]], [r1]
+define void @f1(float %x, <2 x float>* %p) {
+ %v1 = insertelement <2 x float> undef, float %x, i32 1
+ %v2 = insertelement <2 x float> %v1, float 0x400921FB60000000, i32 0
+ %y = call double asm sideeffect "asm clobber $0", "=w,0,~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15},~{d16},~{d17},~{d18},~{d19},~{d20},~{d21},~{d22},~{d23},~{d24},~{d25},~{d26},~{d27},~{d28},~{d29},~{d30},~{d31}"(<2 x float> %v2) nounwind
+ store <2 x float> %v2, <2 x float>* %p, align 8
+ ret void
+}
+
+; On the other hand, when the partial redef doesn't read the full register
+; because the bits are undef, we should rematerialize. The vector is now built
+; like this:
+;
+; %vreg2:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg, %vreg2<imp-def>; mem:LD4[ConstantPool]
+;
+; The extra <imp-def> operand indicates that the instruction fully defines the
+; virtual register. It doesn't read the old value.
+;
+; CHECK: f2
+; CHECK: vldr.32 s0, LCPI
+; The vector must not be spilled:
+; CHECK-NOT: vstr.64
+; CHECK: asm clobber d0
+; But instead rematerialize after the asm:
+; CHECK: vldr.32 [[S0:s[0-9]+]], LCPI
+; CHECK: vstr.64 [[D0:d[0-9]+]], [r0]
+define void @f2(<2 x float>* %p) {
+ %v2 = insertelement <2 x float> undef, float 0x400921FB60000000, i32 0
+ %y = call double asm sideeffect "asm clobber $0", "=w,0,~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15},~{d16},~{d17},~{d18},~{d19},~{d20},~{d21},~{d22},~{d23},~{d24},~{d25},~{d26},~{d27},~{d28},~{d29},~{d30},~{d31}"(<2 x float> %v2) nounwind
+ store <2 x float> %v2, <2 x float>* %p, align 8
+ ret void
+}
diff --git a/test/CodeGen/ARM/sxt_rot.ll b/test/CodeGen/ARM/sxt_rot.ll
index 355fee3..656cd93 100644
--- a/test/CodeGen/ARM/sxt_rot.ll
+++ b/test/CodeGen/ARM/sxt_rot.ll
@@ -1,29 +1,30 @@
-; RUN: llc < %s -march=arm -mattr=+v6 | \
-; RUN: grep sxtb | count 2
-; RUN: llc < %s -march=arm -mattr=+v6 | \
-; RUN: grep sxtb | grep ror | count 1
-; RUN: llc < %s -march=arm -mattr=+v6 | \
-; RUN: grep sxtab | count 1
+; RUN: llc < %s -march=arm -mattr=+v6 | FileCheck %s
define i32 @test0(i8 %A) {
- %B = sext i8 %A to i32
- ret i32 %B
+; CHECK: test0
+; CHECK: sxtb r0, r0
+ %B = sext i8 %A to i32
+ ret i32 %B
}
define signext i8 @test1(i32 %A) {
- %B = lshr i32 %A, 8
- %C = shl i32 %A, 24
- %D = or i32 %B, %C
- %E = trunc i32 %D to i8
- ret i8 %E
+; CHECK: test1
+; CHECK: sxtb r0, r0, ror #8
+ %B = lshr i32 %A, 8
+ %C = shl i32 %A, 24
+ %D = or i32 %B, %C
+ %E = trunc i32 %D to i8
+ ret i8 %E
}
define signext i32 @test2(i32 %A, i32 %X) {
- %B = lshr i32 %A, 8
- %C = shl i32 %A, 24
- %D = or i32 %B, %C
- %E = trunc i32 %D to i8
- %F = sext i8 %E to i32
- %G = add i32 %F, %X
- ret i32 %G
+; CHECK: test2
+; CHECK: sxtab r0, r1, r0
+ %B = lshr i32 %A, 8
+ %C = shl i32 %A, 24
+ %D = or i32 %B, %C
+ %E = trunc i32 %D to i8
+ %F = sext i8 %E to i32
+ %G = add i32 %F, %X
+ ret i32 %G
}
diff --git a/test/CodeGen/ARM/tail-opts.ll b/test/CodeGen/ARM/tail-opts.ll
index 5b3dce3..3dc77e2 100644
--- a/test/CodeGen/ARM/tail-opts.ll
+++ b/test/CodeGen/ARM/tail-opts.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -mcpu=cortex-a8 -asm-verbose=false | FileCheck %s
+; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=dynamic-no-pic -mcpu=cortex-a8 -asm-verbose=false | FileCheck %s
declare void @bar(i32)
declare void @car(i32)
diff --git a/test/CodeGen/ARM/thumb2-it-block.ll b/test/CodeGen/ARM/thumb2-it-block.ll
new file mode 100644
index 0000000..28fd469
--- /dev/null
+++ b/test/CodeGen/ARM/thumb2-it-block.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; PR11107
+
+define i32 @test(i32 %a, i32 %b) {
+entry:
+; CHECK: movs.w
+; CHECK-NEXT: it mi
+; CHECK-NEXT: rsbmi
+; CHECK-NEXT: movs.w
+; CHECK-NEXT: it mi
+; CHECK-NEXT: rsbmi
+ %cmp1 = icmp slt i32 %a, 0
+ %sub1 = sub nsw i32 0, %a
+ %abs1 = select i1 %cmp1, i32 %sub1, i32 %a
+ %cmp2 = icmp slt i32 %b, 0
+ %sub2 = sub nsw i32 0, %b
+ %abs2 = select i1 %cmp2, i32 %sub2, i32 %b
+ %add = add nsw i32 %abs1, %abs2
+ ret i32 %add
+}
diff --git a/test/CodeGen/ARM/va_arg.ll b/test/CodeGen/ARM/va_arg.ll
index bb40453..af477b4 100644
--- a/test/CodeGen/ARM/va_arg.ll
+++ b/test/CodeGen/ARM/va_arg.ll
@@ -30,6 +30,7 @@ entry:
%ap1 = bitcast i8** %ap to i8* ; <i8*> [#uses=2]
call void @llvm.va_start(i8* %ap1)
%0 = va_arg i8** %ap, i32 ; <i32> [#uses=0]
+ store i32 %0, i32* undef
%1 = va_arg i8** %ap, double ; <double> [#uses=1]
call void @llvm.va_end(i8* %ap1)
ret double %1
diff --git a/test/CodeGen/ARM/vext.ll b/test/CodeGen/ARM/vext.ll
index 49a042b..65b5913 100644
--- a/test/CodeGen/ARM/vext.ll
+++ b/test/CodeGen/ARM/vext.ll
@@ -133,3 +133,20 @@ define <8 x i16> @test_illegal(<8 x i16>* %A, <8 x i16>* %B) nounwind {
%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 7, i32 5, i32 13, i32 3, i32 2, i32 2, i32 9>
ret <8 x i16> %tmp3
}
+
+; PR11129
+; Make sure this doesn't crash
+define arm_aapcscc void @test_elem_mismatch(<2 x i64>* nocapture %src, <4 x i16>* nocapture %dest) nounwind {
+; CHECK: test_elem_mismatch:
+; CHECK: vstr.64
+ %tmp0 = load <2 x i64>* %src, align 16
+ %tmp1 = bitcast <2 x i64> %tmp0 to <4 x i32>
+ %tmp2 = extractelement <4 x i32> %tmp1, i32 0
+ %tmp3 = extractelement <4 x i32> %tmp1, i32 2
+ %tmp4 = trunc i32 %tmp2 to i16
+ %tmp5 = trunc i32 %tmp3 to i16
+ %tmp6 = insertelement <4 x i16> undef, i16 %tmp4, i32 0
+ %tmp7 = insertelement <4 x i16> %tmp6, i16 %tmp5, i32 1
+ store <4 x i16> %tmp7, <4 x i16>* %dest, align 4
+ ret void
+}
diff --git a/test/CodeGen/ARM/widen-vmovs.ll b/test/CodeGen/ARM/widen-vmovs.ll
new file mode 100644
index 0000000..8fd99ba
--- /dev/null
+++ b/test/CodeGen/ARM/widen-vmovs.ll
@@ -0,0 +1,35 @@
+; RUN: llc < %s -widen-vmovs -mcpu=cortex-a8 -verify-machineinstrs | FileCheck %s
+target triple = "thumbv7-apple-ios"
+
+; The 0.0 constant is loaded from the constant pool and kept in a register.
+; CHECK: %entry
+; CHECK: vldr.32 s
+; The float loop variable is initialized with a vmovs from the constant register.
+; The vmovs is first widened to a vmovd, and then converted to a vorr because of the v2f32 vadd.f32.
+; CHECK: vorr [[DL:d[0-9]+]], [[DN:d[0-9]+]]
+; CHECK: , [[DN]]
+; CHECK: %for.body.i
+; CHECK: vadd.f32 [[DL]], [[DL]], [[DN]]
+;
+; This test is verifying:
+; - The VMOVS widening is happening.
+; - Register liveness is verified.
+; - The execution domain switch to vorr works across basic blocks.
+
+define void @Mm() nounwind {
+entry:
+ br label %for.body4
+
+for.body4:
+ br label %for.body.i
+
+for.body.i:
+ %tmp3.i = phi float [ 0.000000e+00, %for.body4 ], [ %add.i, %for.body.i ]
+ %add.i = fadd float %tmp3.i, 0.000000e+00
+ %exitcond.i = icmp eq i32 undef, 41
+ br i1 %exitcond.i, label %rInnerproduct.exit, label %for.body.i
+
+rInnerproduct.exit:
+ store float %add.i, float* undef, align 4
+ br label %for.body4
+}
diff --git a/test/CodeGen/Alpha/2006-04-04-zextload.ll b/test/CodeGen/Alpha/2006-04-04-zextload.ll
index 2b28903..671d39e 100644
--- a/test/CodeGen/Alpha/2006-04-04-zextload.ll
+++ b/test/CodeGen/Alpha/2006-04-04-zextload.ll
@@ -2,7 +2,6 @@
target datalayout = "e-p:64:64"
target triple = "alphaev67-unknown-linux-gnu"
- %llvm.dbg.compile_unit.type = type { i32, { }*, i32, i32, i8*, i8*, i8* }
%struct._Callback_list = type { %struct._Callback_list*, void (i32, %struct.ios_base*, i32)*, i32, i32 }
%struct._Impl = type { i32, %struct.facet**, i64, %struct.facet**, i8** }
%struct._Words = type { i8*, i64 }
@@ -12,7 +11,6 @@ target triple = "alphaev67-unknown-linux-gnu"
%struct.ios_base = type { i32 (...)**, i64, i64, i32, i32, i32, %struct._Callback_list*, %struct._Words, [8 x %struct._Words], i32, %struct._Words*, %struct.locale }
%struct.locale = type { %struct._Impl* }
%"struct.ostreambuf_iterator<char,std::char_traits<char> >" = type { %"struct.basic_streambuf<char,std::char_traits<char> >"*, i1 }
-@llvm.dbg.compile_unit1047 = external global %llvm.dbg.compile_unit.type ; <%llvm.dbg.compile_unit.type*> [#uses=1]
define void @_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE15_M_insert_floatIdEES3_S3_RSt8ios_baseccT_() {
entry:
@@ -26,9 +24,7 @@ cond_next243: ; preds = %entry
%tmp428 = load i64* null ; <i64> [#uses=1]
%tmp428.upgrd.1 = trunc i64 %tmp428 to i32 ; <i32> [#uses=1]
%tmp429 = alloca i8, i32 %tmp428.upgrd.1 ; <i8*> [#uses=0]
- call void @llvm.dbg.stoppoint( i32 1146, i32 0, { }* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit1047 to { }*) )
unreachable
}
-declare void @llvm.dbg.stoppoint(i32, i32, { }*)
diff --git a/test/CodeGen/Alpha/mb.ll b/test/CodeGen/Alpha/mb.ll
index 93e8b1b..3268c54 100644
--- a/test/CodeGen/Alpha/mb.ll
+++ b/test/CodeGen/Alpha/mb.ll
@@ -1,8 +1,6 @@
; RUN: llc < %s -march=alpha | grep mb
-declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1)
-
define void @test() {
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true , i1 true)
+ fence seq_cst
ret void
}
diff --git a/test/CodeGen/Alpha/wmb.ll b/test/CodeGen/Alpha/wmb.ll
deleted file mode 100644
index a3e2ccf..0000000
--- a/test/CodeGen/Alpha/wmb.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=alpha | grep wmb
-
-declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1)
-
-define void @test() {
- call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 true , i1 true)
- ret void
-}
diff --git a/test/CodeGen/CBackend/X86/dg.exp b/test/CodeGen/CBackend/X86/dg.exp
index 833bcc5..44e3a5e 100644
--- a/test/CodeGen/CBackend/X86/dg.exp
+++ b/test/CodeGen/CBackend/X86/dg.exp
@@ -1,5 +1,5 @@
load_lib llvm.exp
-if { [llvm_supports_target X86] && [llvm_gcc_supports c] } {
+if { [llvm_supports_target X86] && [llvm_supports_target CBackend] } {
RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp,s}]]
}
diff --git a/test/CodeGen/CellSPU/jumptable.ll b/test/CodeGen/CellSPU/jumptable.ll
index 87376ef..66c2fde 100644
--- a/test/CodeGen/CellSPU/jumptable.ll
+++ b/test/CodeGen/CellSPU/jumptable.ll
@@ -4,18 +4,18 @@ define i32 @test(i32 %param) {
entry:
;CHECK: ai {{\$.}}, $3, -1
;CHECK: clgti {{\$., \$.}}, 3
-;CHECK: brnz {{\$.}},.LBB0_2
- switch i32 %param, label %bb1 [
- i32 1, label %bb3
+;CHECK: brnz {{\$.}},.LBB0_
+ switch i32 %param, label %bb2 [
+ i32 1, label %bb1
i32 2, label %bb2
i32 3, label %bb3
- i32 4, label %bb1
+ i32 4, label %bb2
]
-
+;CHECK-NOT: # BB#2
bb1:
ret i32 1
bb2:
ret i32 2
bb3:
- ret i32 3
+ ret i32 %param
}
diff --git a/test/CodeGen/CellSPU/or_ops.ll b/test/CodeGen/CellSPU/or_ops.ll
index 46349b9..4f1febb 100644
--- a/test/CodeGen/CellSPU/or_ops.ll
+++ b/test/CodeGen/CellSPU/or_ops.ll
@@ -1,9 +1,11 @@
; RUN: llc < %s -march=cellspu > %t1.s
; RUN: grep and %t1.s | count 2
; RUN: grep orc %t1.s | count 85
-; RUN: grep ori %t1.s | count 30
+; RUN: grep ori %t1.s | count 34
; RUN: grep orhi %t1.s | count 30
; RUN: grep orbi %t1.s | count 15
+; RUN: FileCheck %s < %t1.s
+
target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128"
target triple = "spu"
@@ -210,6 +212,15 @@ define signext i32 @ori_i32(i32 signext %in) {
ret i32 %tmp38
}
+define i32 @ori_i32_600(i32 %in) {
+ ;600 does not fit into 'ori' immediate field
+ ;CHECK: ori_i32_600
+ ;CHECK: il
+ ;CHECK: ori
+ %tmp = or i32 %in, 600
+ ret i32 %tmp
+}
+
; ORHI instruction generation (i16 data type):
define <8 x i16> @orhi_v8i16_1(<8 x i16> %in) {
%tmp2 = or <8 x i16> %in, < i16 511, i16 511, i16 511, i16 511,
diff --git a/test/CodeGen/Generic/2004-02-08-UnwindSupport.ll b/test/CodeGen/Generic/2004-02-08-UnwindSupport.ll
deleted file mode 100644
index 393062a..0000000
--- a/test/CodeGen/Generic/2004-02-08-UnwindSupport.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -enable-correct-eh-support
-
-define i32 @test() {
- unwind
-}
-
-define i32 @main() {
- %X = invoke i32 @test( )
- to label %cont unwind label %EH ; <i32> [#uses=0]
-
-cont: ; preds = %0
- ret i32 1
-
-EH: ; preds = %0
- ret i32 0
-}
-
diff --git a/test/CodeGen/Generic/2007-02-25-invoke.ll b/test/CodeGen/Generic/2007-02-25-invoke.ll
index 6e20eaa..7850cec 100644
--- a/test/CodeGen/Generic/2007-02-25-invoke.ll
+++ b/test/CodeGen/Generic/2007-02-25-invoke.ll
@@ -8,5 +8,9 @@ define i32 @test2() {
invcont:
ret i32 %A
blat:
- ret i32 0
+ %lpad = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ cleanup
+ ret i32 0
}
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/Generic/2007-04-30-LandingPadBranchFolding.ll b/test/CodeGen/Generic/2007-04-30-LandingPadBranchFolding.ll
index 2a2cf6c..407696f 100644
--- a/test/CodeGen/Generic/2007-04-30-LandingPadBranchFolding.ll
+++ b/test/CodeGen/Generic/2007-04-30-LandingPadBranchFolding.ll
@@ -45,7 +45,9 @@ cond_next1328: ; preds = %cond_true235, %cond_true
ret void
cond_true1402: ; preds = %invcont282, %cond_false280, %cond_true235, %cond_true
- ret void
+ %lpad = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ cleanup
+ ret void
}
declare void @_ZNSs14_M_replace_auxEjjjc()
@@ -57,3 +59,5 @@ declare void @_ZNSs6assignEPKcj()
declare void @_ZNSs7reserveEj()
declare void @_ZNSs6appendEPKcj()
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll b/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll
index 5df2200..27c7162 100644
--- a/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll
+++ b/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll
@@ -2,12 +2,16 @@
define fastcc void @bc__support__high_resolution_time__initialize_clock_rate() {
entry:
- invoke void asm "rdtsc\0A\09movl %eax, $0\0A\09movl %edx, $1", "=*imr,=*imr,~{dirflag},~{fpsr},~{flags},~{dx},~{ax}"( i32* null, i32* null )
- to label %.noexc unwind label %cleanup144
+ invoke void asm "rdtsc\0A\09movl %eax, $0\0A\09movl %edx, $1", "=*imr,=*imr,~{dirflag},~{fpsr},~{flags},~{dx},~{ax}"( i32* null, i32* null )
+ to label %.noexc unwind label %cleanup144
.noexc: ; preds = %entry
- ret void
+ ret void
cleanup144: ; preds = %entry
- unwind
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
+ resume { i8*, i32 } %exn
}
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/Generic/2007-12-31-UnusedSelector.ll b/test/CodeGen/Generic/2007-12-31-UnusedSelector.ll
index 00e027b..943ed88 100644
--- a/test/CodeGen/Generic/2007-12-31-UnusedSelector.ll
+++ b/test/CodeGen/Generic/2007-12-31-UnusedSelector.ll
@@ -14,11 +14,14 @@ bb14: ; preds = %lpad
unreachable
lpad: ; preds = %entry
+ %lpad1 = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ catch i8* null
invoke void @__cxa_end_catch( )
to label %bb14 unwind label %lpad17
lpad17: ; preds = %lpad
- %eh_select20 = tail call i32 (i8*, i8*, ...)* @llvm.eh.selector.i32( i8* null, i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*), i8* null ) ; <i32> [#uses=0]
+ %lpad2 = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ catch i8* null
unreachable
UnifiedUnreachableBlock: ; preds = %entry
diff --git a/test/CodeGen/Generic/2009-06-03-UnreachableSplitPad.ll b/test/CodeGen/Generic/2009-06-03-UnreachableSplitPad.ll
index 112cac4..ad418f7 100644
--- a/test/CodeGen/Generic/2009-06-03-UnreachableSplitPad.ll
+++ b/test/CodeGen/Generic/2009-06-03-UnreachableSplitPad.ll
@@ -11,5 +11,9 @@ dummy:
invoke i32 @b() to label %reg unwind label %reg
reg:
+ %lpad = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ catch i8* null
ret void
}
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/Generic/2009-11-16-BadKillsCrash.ll b/test/CodeGen/Generic/2009-11-16-BadKillsCrash.ll
index 22bd4d7..3cbf4c5 100644
--- a/test/CodeGen/Generic/2009-11-16-BadKillsCrash.ll
+++ b/test/CodeGen/Generic/2009-11-16-BadKillsCrash.ll
@@ -64,10 +64,16 @@ invcont38: ; preds = %invcont25, %bb1.i,
lpad: ; preds = %bb.i93, %invcont24, %bb1.i, %invcont8
%__extracted.1 = phi i32 [ 0, %invcont8 ], [ %2, %bb1.i ], [ undef, %bb.i93 ], [ undef, %invcont24 ] ; <i32> [#uses=0]
- %eh_ptr = call i8* @llvm.eh.exception() ; <i8*> [#uses=1]
+ %lpad1 = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ catch i8* null
+ %eh_ptr = extractvalue { i8*, i32 } %lpad1, 0
%6 = call i8* @__cxa_begin_catch(i8* %eh_ptr) nounwind ; <i8*> [#uses=0]
unreachable
lpad74: ; preds = %entry
+ %lpad2 = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
}
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll b/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll
index cd446d5..da26504 100644
--- a/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll
+++ b/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll
@@ -3,6 +3,9 @@
; the uses of a copy to a physical register without ignoring non-data
; dependence, PR10220.
+; The ARM backend can't handle i256 math at the moment.
+; XFAIL: arm
+
define void @f(i256* nocapture %a, i256* nocapture %b, i256* nocapture %cc, i256* nocapture %dd) nounwind uwtable noinline ssp {
entry:
%c = load i256* %cc
diff --git a/test/CodeGen/Generic/exception-handling.ll b/test/CodeGen/Generic/exception-handling.ll
new file mode 100644
index 0000000..376e1f1
--- /dev/null
+++ b/test/CodeGen/Generic/exception-handling.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s
+; PR10733
+declare void @_Znam()
+
+define void @_ZNK14gIndexOdometer15AfterExcisionOfERi() uwtable align 2 {
+_ZN6Gambit5ArrayIiEC2Ej.exit36:
+ br label %"9"
+
+"9": ; preds = %"10", %_ZN6Gambit5ArrayIiEC2Ej.exit36
+ %indvar82 = phi i64 [ 0, %_ZN6Gambit5ArrayIiEC2Ej.exit36 ], [ %tmp85, %"10" ]
+ %tmp85 = add i64 %indvar82, 1
+ %tmp = trunc i64 %tmp85 to i32
+ invoke void @_ZNK14gIndexOdometer9NoIndicesEv()
+ to label %"10" unwind label %lpad27
+
+"10": ; preds = %"9"
+ invoke void @_Znam()
+ to label %"9" unwind label %lpad27
+
+lpad27: ; preds = %"10", %"9"
+ %0 = phi i32 [ undef, %"9" ], [ %tmp, %"10" ]
+ %1 = landingpad { i8*, i32 } personality i32 (i32, i64, i8*, i8*)* @__gxx_personality_v0
+ cleanup
+ resume { i8*, i32 } zeroinitializer
+}
+
+declare void @_ZNK14gIndexOdometer9NoIndicesEv()
+
+declare i32 @__gxx_personality_v0(i32, i64, i8*, i8*)
diff --git a/test/CodeGen/Generic/multiple-return-values-cross-block-with-invoke.ll b/test/CodeGen/Generic/multiple-return-values-cross-block-with-invoke.ll
index a3cab5d..e709080 100644
--- a/test/CodeGen/Generic/multiple-return-values-cross-block-with-invoke.ll
+++ b/test/CodeGen/Generic/multiple-return-values-cross-block-with-invoke.ll
@@ -13,6 +13,9 @@ normal:
ret void
handler:
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ catch i8* null
ret void
}
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/Generic/promote-integers.ll b/test/CodeGen/Generic/promote-integers.ll
deleted file mode 100644
index 5812592..0000000
--- a/test/CodeGen/Generic/promote-integers.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; Test that vectors are scalarized/lowered correctly.
-; RUN: llc -march=x86 -promote-elements < %s | FileCheck %s
-
-; This test is the poster-child for integer-element-promotion.
-; Until this feature is complete, we mark this test as expected to fail.
-; XFAIL: *
-; CHECK: vector_code
-; CHECK: ret
-define <4 x float> @vector_code(<4 x i64> %A, <4 x i64> %B, <4 x float> %R0, <4 x float> %R1 ) {
- %C = icmp eq <4 x i64> %A, %B
- %K = xor <4 x i1> <i1 1, i1 1, i1 1, i1 1>, %C
- %D = select <4 x i1> %K, <4 x float> %R1, <4 x float> %R0
- ret <4 x float> %D
-}
-
diff --git a/test/CodeGen/Mips/2008-07-05-ByVal.ll b/test/CodeGen/Mips/2008-07-05-ByVal.ll
deleted file mode 100644
index a1f0504..0000000
--- a/test/CodeGen/Mips/2008-07-05-ByVal.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=mips | grep {lw.*(\$4)} | count 2
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-unknown-psp-elf"
- %struct.byval0 = type { i32, i32 }
-
-define i64 @test0(%struct.byval0* byval %b, i64 %sum) nounwind {
-entry:
- getelementptr %struct.byval0* %b, i32 0, i32 0 ; <i32*>:0 [#uses=1]
- load i32* %0, align 4 ; <i32>:1 [#uses=1]
- getelementptr %struct.byval0* %b, i32 0, i32 1 ; <i32*>:2 [#uses=1]
- load i32* %2, align 4 ; <i32>:3 [#uses=1]
- add i32 %3, %1 ; <i32>:4 [#uses=1]
- sext i32 %4 to i64 ; <i64>:5 [#uses=1]
- add i64 %5, %sum ; <i64>:6 [#uses=1]
- ret i64 %6
-}
-
diff --git a/test/CodeGen/Mips/2008-07-06-fadd64.ll b/test/CodeGen/Mips/2008-07-06-fadd64.ll
index ecd8521..ff8ed4d 100644
--- a/test/CodeGen/Mips/2008-07-06-fadd64.ll
+++ b/test/CodeGen/Mips/2008-07-06-fadd64.ll
@@ -1,10 +1,8 @@
-; RUN: llc < %s -march=mips | grep __adddf3
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-unknown-psp-elf"
+; RUN: llc -march=mips -mattr=single-float < %s | FileCheck %s
define double @dofloat(double %a, double %b) nounwind {
entry:
+; CHECK: __adddf3
fadd double %a, %b ; <double>:0 [#uses=1]
ret double %0
}
diff --git a/test/CodeGen/Mips/2008-07-07-FPExtend.ll b/test/CodeGen/Mips/2008-07-07-FPExtend.ll
index 681788e..29c8e84 100644
--- a/test/CodeGen/Mips/2008-07-07-FPExtend.ll
+++ b/test/CodeGen/Mips/2008-07-07-FPExtend.ll
@@ -1,10 +1,8 @@
-; RUN: llc < %s -march=mips | grep __extendsfdf2
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-unknown-psp-elf"
+; RUN: llc -march=mips -mattr=single-float < %s | FileCheck %s
define double @dofloat(float %a) nounwind {
entry:
+; CHECK: __extendsfdf2
fpext float %a to double ; <double>:0 [#uses=1]
ret double %0
}
diff --git a/test/CodeGen/Mips/2008-07-07-IntDoubleConvertions.ll b/test/CodeGen/Mips/2008-07-07-IntDoubleConvertions.ll
index b8b4c5c..9a6bbdf 100644
--- a/test/CodeGen/Mips/2008-07-07-IntDoubleConvertions.ll
+++ b/test/CodeGen/Mips/2008-07-07-IntDoubleConvertions.ll
@@ -1,32 +1,33 @@
-; RUN: llc < %s -march=mips -o %t
-; RUN: grep __floatsidf %t | count 1
-; RUN: grep __floatunsidf %t | count 1
-; RUN: grep __fixdfsi %t | count 1
-; RUN: grep __fixunsdfsi %t | count 1
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-unknown-psp-elf"
+; RUN: llc -march=mips -mattr=single-float < %s | FileCheck %s
define double @int2fp(i32 %a) nounwind {
entry:
+; CHECK: int2fp
+; CHECK: __floatsidf
sitofp i32 %a to double ; <double>:0 [#uses=1]
ret double %0
}
define double @uint2double(i32 %a) nounwind {
entry:
+; CHECK: uint2double
+; CHECK: __floatunsidf
uitofp i32 %a to double ; <double>:0 [#uses=1]
ret double %0
}
define i32 @double2int(double %a) nounwind {
entry:
+; CHECK: double2int
+; CHECK: __fixdfsi
fptosi double %a to i32 ; <i32>:0 [#uses=1]
ret i32 %0
}
define i32 @double2uint(double %a) nounwind {
entry:
+; CHECK: double2uint
+; CHECK: __fixunsdfsi
fptoui double %a to i32 ; <i32>:0 [#uses=1]
ret i32 %0
}
diff --git a/test/CodeGen/Mips/2008-07-15-InternalConstant.ll b/test/CodeGen/Mips/2008-07-15-InternalConstant.ll
index c3db638..29a7b5c 100644
--- a/test/CodeGen/Mips/2008-07-15-InternalConstant.ll
+++ b/test/CodeGen/Mips/2008-07-15-InternalConstant.ll
@@ -1,22 +1,23 @@
-; RUN: llc < %s -march=mips -o %t
-; RUN: grep {rodata.str1.4,"aMS",@progbits} %t | count 1
-; RUN: grep {r.data,} %t | count 1
-; RUN: grep {\%hi} %t | count 2
-; RUN: grep {\%lo} %t | count 2
-; RUN: not grep {gp_rel} %t
+; RUN: llc -march=mips -relocation-model=static < %s | FileCheck %s
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-unknown-psp-elf"
@.str = internal unnamed_addr constant [10 x i8] c"AAAAAAAAA\00"
-@i0 = internal unnamed_addr constant [5 x i32] [ i32 0, i32 1, i32 2, i32 3, i32 4 ]
+@i0 = internal unnamed_addr constant [5 x i32] [ i32 0, i32 1, i32 2, i32 3, i32 4 ]
define i8* @foo() nounwind {
entry:
+; CHECK: foo
+; CHECK: %hi(.str)
+; CHECK: %lo(.str)
ret i8* getelementptr ([10 x i8]* @.str, i32 0, i32 0)
}
define i32* @bar() nounwind {
entry:
+; CHECK: bar
+; CHECK: %hi(i0)
+; CHECK: %lo(i0)
ret i32* getelementptr ([5 x i32]* @i0, i32 0, i32 0)
}
+; CHECK: rodata.str1.4,"aMS",@progbits
+; CHECK: rodata,"a",@progbits
diff --git a/test/CodeGen/Mips/2008-07-15-SmallSection.ll b/test/CodeGen/Mips/2008-07-15-SmallSection.ll
index 4795e47..cbc3ecf 100644
--- a/test/CodeGen/Mips/2008-07-15-SmallSection.ll
+++ b/test/CodeGen/Mips/2008-07-15-SmallSection.ll
@@ -1,13 +1,16 @@
-; RUN: llc < %s -mips-ssection-threshold=8 -march=mips -o %t0
-; RUN: llc < %s -mips-ssection-threshold=0 -march=mips -o %t1
-; RUN: grep {sdata} %t0 | count 1
-; RUN: grep {sbss} %t0 | count 1
-; RUN: grep {gp_rel} %t0 | count 2
-; RUN: not grep {sdata} %t1
-; RUN: not grep {sbss} %t1
-; RUN: not grep {gp_rel} %t1
-; RUN: grep {\%hi} %t1 | count 2
-; RUN: grep {\%lo} %t1 | count 3
+; DISABLED: llc < %s -mips-ssection-threshold=8 -march=mips -o %t0
+; DISABLED: llc < %s -mips-ssection-threshold=0 -march=mips -o %t1
+; DISABLED: grep {sdata} %t0 | count 1
+; DISABLED: grep {sbss} %t0 | count 1
+; DISABLED: grep {gp_rel} %t0 | count 2
+; DISABLED: not grep {sdata} %t1
+; DISABLED: not grep {sbss} %t1
+; DISABLED: not grep {gp_rel} %t1
+; DISABLED: grep {\%hi} %t1 | count 2
+; DISABLED: grep {\%lo} %t1 | count 3
+; RUN: false
+; XFAIL: *
+
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
target triple = "mipsallegrexel-unknown-psp-elf"
diff --git a/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll b/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll
index 855194a..e0c745f 100644
--- a/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll
+++ b/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll
@@ -1,6 +1,8 @@
-; RUN: llc < %s -march=mips -o %t
-; RUN: grep seh %t | count 1
-; RUN: grep seb %t | count 1
+; DISABLED: llc < %s -march=mips -o %t
+; DISABLED: grep seh %t | count 1
+; DISABLED: grep seb %t | count 1
+; RUN: false
+; XFAIL: *
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
target triple = "mipsallegrexel-unknown-psp-elf"
diff --git a/test/CodeGen/Mips/2008-08-03-fabs64.ll b/test/CodeGen/Mips/2008-08-03-fabs64.ll
index 0fc45f7..2b1713c 100644
--- a/test/CodeGen/Mips/2008-08-03-fabs64.ll
+++ b/test/CodeGen/Mips/2008-08-03-fabs64.ll
@@ -1,6 +1,8 @@
-; RUN: llc < %s -march=mips -o %t
-; RUN: grep {lui.*32767} %t | count 1
-; RUN: grep {ori.*65535} %t | count 1
+; DISABLED: llc < %s -march=mips -o %t
+; DISABLED: grep {lui.*32767} %t | count 1
+; DISABLED: grep {ori.*65535} %t | count 1
+; RUN: false
+; XFAIL: *
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
target triple = "mipsallegrexel-unknown-psp-elf"
diff --git a/test/CodeGen/Mips/2008-08-07-FPRound.ll b/test/CodeGen/Mips/2008-08-07-FPRound.ll
index 67f86d7..4fa43b6 100644
--- a/test/CodeGen/Mips/2008-08-07-FPRound.ll
+++ b/test/CodeGen/Mips/2008-08-07-FPRound.ll
@@ -1,10 +1,8 @@
-; RUN: llc < %s -march=mips | grep __truncdfsf2 | count 1
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-unknown-psp-elf"
+; RUN: llc -march=mips -mattr=single-float < %s | FileCheck %s
define float @round2float(double %a) nounwind {
entry:
+; CHECK: __truncdfsf2
fptrunc double %a to float ; <float>:0 [#uses=1]
ret float %0
}
diff --git a/test/CodeGen/Mips/2008-08-08-bswap.ll b/test/CodeGen/Mips/2008-08-08-bswap.ll
index 83289d9..596da24 100644
--- a/test/CodeGen/Mips/2008-08-08-bswap.ll
+++ b/test/CodeGen/Mips/2008-08-08-bswap.ll
@@ -1,4 +1,7 @@
-; RUN: llc < %s | grep wsbw | count 1
+; DISABLED: llc < %s | grep wsbw | count 1
+; RUN: false
+; XFAIL: *
+
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
target triple = "psp"
diff --git a/test/CodeGen/Mips/2010-07-20-Select.ll b/test/CodeGen/Mips/2010-07-20-Select.ll
deleted file mode 100644
index e5e2c54..0000000
--- a/test/CodeGen/Mips/2010-07-20-Select.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=mips -relocation-model=static | FileCheck %s
-; RUN: llc < %s -march=mips -relocation-model=static -regalloc=basic | FileCheck %s
-; Fix PR7473
-
-define i32 @main() nounwind readnone {
-entry:
- %a = alloca i32, align 4 ; <i32*> [#uses=2]
- %c = alloca i32, align 4 ; <i32*> [#uses=2]
- volatile store i32 1, i32* %a, align 4
- volatile store i32 0, i32* %c, align 4
- %0 = volatile load i32* %a, align 4 ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
-; CHECK: addiu $[[R1:[0-9]+]], $zero, 0
- %iftmp.0.0 = select i1 %1, i32 3, i32 0 ; <i32> [#uses=1]
- %2 = volatile load i32* %c, align 4 ; <i32> [#uses=1]
- %3 = icmp eq i32 %2, 0 ; <i1> [#uses=1]
-; CHECK: addiu $[[R1]], $zero, 3
-; CHECK: addu $2, ${{.}}, $[[R1]]
- %iftmp.2.0 = select i1 %3, i32 0, i32 5 ; <i32> [#uses=1]
- %4 = add nsw i32 %iftmp.2.0, %iftmp.0.0 ; <i32> [#uses=1]
- ret i32 %4
-}
diff --git a/test/CodeGen/Mips/2010-11-09-CountLeading.ll b/test/CodeGen/Mips/2010-11-09-CountLeading.ll
index d592fef..c592b31 100644
--- a/test/CodeGen/Mips/2010-11-09-CountLeading.ll
+++ b/test/CodeGen/Mips/2010-11-09-CountLeading.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mips -mcpu=4ke < %s | FileCheck %s
+; RUN: llc -march=mips < %s | FileCheck %s
; CHECK: clz $2, $4
define i32 @t1(i32 %X) nounwind readnone {
diff --git a/test/CodeGen/Mips/2010-11-09-Mul.ll b/test/CodeGen/Mips/2010-11-09-Mul.ll
index 65a10b5..dcade3c 100644
--- a/test/CodeGen/Mips/2010-11-09-Mul.ll
+++ b/test/CodeGen/Mips/2010-11-09-Mul.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mips -mcpu=4ke < %s | FileCheck %s
+; RUN: llc -march=mips < %s | FileCheck %s
; CHECK: mul $2, $5, $4
define i32 @mul1(i32 %a, i32 %b) nounwind readnone {
diff --git a/test/CodeGen/Mips/alloca.ll b/test/CodeGen/Mips/alloca.ll
index fb4f56c..15c73e2 100644
--- a/test/CodeGen/Mips/alloca.ll
+++ b/test/CodeGen/Mips/alloca.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mipsel -mcpu=4ke < %s | FileCheck %s
+; RUN: llc -march=mipsel < %s | FileCheck %s
define i32 @twoalloca(i32 %size) nounwind {
entry:
diff --git a/test/CodeGen/Mips/atomic.ll b/test/CodeGen/Mips/atomic.ll
index 2d5555b..a4763b1 100644
--- a/test/CodeGen/Mips/atomic.ll
+++ b/test/CodeGen/Mips/atomic.ll
@@ -1,30 +1,16 @@
-; RUN: llc -march=mipsel -mcpu=mips2 < %s | FileCheck %s
-
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32* nocapture, i32) nounwind
-declare i32 @llvm.atomic.load.nand.i32.p0i32(i32* nocapture, i32) nounwind
-declare i32 @llvm.atomic.swap.i32.p0i32(i32* nocapture, i32) nounwind
-declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* nocapture, i32, i32) nounwind
-
-declare i8 @llvm.atomic.load.add.i8.p0i8(i8* nocapture, i8) nounwind
-declare i8 @llvm.atomic.load.sub.i8.p0i8(i8* nocapture, i8) nounwind
-declare i8 @llvm.atomic.load.nand.i8.p0i8(i8* nocapture, i8) nounwind
-declare i8 @llvm.atomic.swap.i8.p0i8(i8* nocapture, i8) nounwind
-declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* nocapture, i8, i8) nounwind
-
+; RUN: llc -march=mipsel < %s | FileCheck %s
@x = common global i32 0, align 4
define i32 @AtomicLoadAdd32(i32 %incr) nounwind {
entry:
- %0 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* @x, i32 %incr)
+ %0 = atomicrmw add i32* @x, i32 %incr monotonic
ret i32 %0
; CHECK: AtomicLoadAdd32:
; CHECK: lw $[[R0:[0-9]+]], %got(x)($gp)
; CHECK: $[[BB0:[A-Z_0-9]+]]:
; CHECK: ll $[[R1:[0-9]+]], 0($[[R0]])
-; CHECK: or $2, $zero, $[[R1]]
; CHECK: addu $[[R2:[0-9]+]], $[[R1]], $4
; CHECK: sc $[[R2]], 0($[[R0]])
; CHECK: beq $[[R2]], $zero, $[[BB0]]
@@ -32,51 +18,49 @@ entry:
define i32 @AtomicLoadNand32(i32 %incr) nounwind {
entry:
- %0 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* @x, i32 %incr)
+ %0 = atomicrmw nand i32* @x, i32 %incr monotonic
ret i32 %0
; CHECK: AtomicLoadNand32:
; CHECK: lw $[[R0:[0-9]+]], %got(x)($gp)
; CHECK: $[[BB0:[A-Z_0-9]+]]:
; CHECK: ll $[[R1:[0-9]+]], 0($[[R0]])
-; CHECK: or $2, $zero, $[[R1]]
-; CHECK: and $[[R1]], $[[R1]], $4
-; CHECK: nor $[[R2:[0-9]+]], $zero, $[[R1]]
+; CHECK: and $[[R3:[0-9]+]], $[[R1]], $4
+; CHECK: nor $[[R2:[0-9]+]], $zero, $[[R3]]
; CHECK: sc $[[R2]], 0($[[R0]])
; CHECK: beq $[[R2]], $zero, $[[BB0]]
}
-define i32 @AtomicSwap32(i32 %oldval) nounwind {
+define i32 @AtomicSwap32(i32 %newval) nounwind {
entry:
- %0 = call i32 @llvm.atomic.swap.i32.p0i32(i32* @x, i32 %oldval)
+ %newval.addr = alloca i32, align 4
+ store i32 %newval, i32* %newval.addr, align 4
+ %tmp = load i32* %newval.addr, align 4
+ %0 = atomicrmw xchg i32* @x, i32 %tmp monotonic
ret i32 %0
; CHECK: AtomicSwap32:
; CHECK: lw $[[R0:[0-9]+]], %got(x)($gp)
-; CHECK: sw $4, [[OFFSET:[0-9]+]]($sp)
; CHECK: $[[BB0:[A-Z_0-9]+]]:
-; CHECK: ll $[[R1:[0-9]+]], 0($[[R0]])
-; CHECK: or $2, $zero, $[[R1]]
-; CHECK: lw $[[R2:[0-9]+]], [[OFFSET]]($sp)
-; CHECK: or $[[R3:[0-9]+]], $zero, $[[R2]]
-; CHECK: sc $[[R3]], 0($[[R0]])
-; CHECK: beq $[[R3]], $zero, $[[BB0]]
+; CHECK: ll ${{[0-9]+}}, 0($[[R0]])
+; CHECK: sc $[[R2:[0-9]+]], 0($[[R0]])
+; CHECK: beq $[[R2]], $zero, $[[BB0]]
}
define i32 @AtomicCmpSwap32(i32 %oldval, i32 %newval) nounwind {
entry:
- %0 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* @x, i32 %oldval, i32 %newval)
+ %newval.addr = alloca i32, align 4
+ store i32 %newval, i32* %newval.addr, align 4
+ %tmp = load i32* %newval.addr, align 4
+ %0 = cmpxchg i32* @x, i32 %oldval, i32 %tmp monotonic
ret i32 %0
; CHECK: AtomicCmpSwap32:
; CHECK: lw $[[R0:[0-9]+]], %got(x)($gp)
-; CHECK: sw $5, [[OFFSET:[0-9]+]]($sp)
; CHECK: $[[BB0:[A-Z_0-9]+]]:
; CHECK: ll $2, 0($[[R0]])
; CHECK: bne $2, $4, $[[BB1:[A-Z_0-9]+]]
-; CHECK: lw $[[R1:[0-9]+]], [[OFFSET]]($sp)
-; CHECK: or $[[R2:[0-9]+]], $zero, $[[R1]]
-; CHECK: sc $[[R2]], 0($[[R0]])
+; CHECK: sc $[[R2:[0-9]+]], 0($[[R0]])
; CHECK: beq $[[R2]], $zero, $[[BB0]]
; CHECK: $[[BB1]]:
}
@@ -87,7 +71,7 @@ entry:
define signext i8 @AtomicLoadAdd8(i8 signext %incr) nounwind {
entry:
- %0 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @y, i8 %incr)
+ %0 = atomicrmw add i8* @y, i8 %incr monotonic
ret i8 %0
; CHECK: AtomicLoadAdd8:
@@ -97,10 +81,9 @@ entry:
; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
; CHECK: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK: sll $[[R6:[0-9]+]], $[[R5]], $[[R4]]
+; CHECK: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK: andi $[[R8:[0-9]+]], $4, 255
-; CHECK: sll $[[R9:[0-9]+]], $[[R8]], $[[R4]]
+; CHECK: sllv $[[R9:[0-9]+]], $4, $[[R4]]
; CHECK: $[[BB0:[A-Z_0-9]+]]:
; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
@@ -112,14 +95,14 @@ entry:
; CHECK: beq $[[R14]], $zero, $[[BB0]]
; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK: srl $[[R16:[0-9]+]], $[[R15]], $[[R4]]
+; CHECK: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
; CHECK: sra $2, $[[R17]], 24
}
define signext i8 @AtomicLoadSub8(i8 signext %incr) nounwind {
entry:
- %0 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @y, i8 %incr)
+ %0 = atomicrmw sub i8* @y, i8 %incr monotonic
ret i8 %0
; CHECK: AtomicLoadSub8:
@@ -129,15 +112,13 @@ entry:
; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
; CHECK: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK: sll $[[R6:[0-9]+]], $[[R5]], $[[R4]]
+; CHECK: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK: subu $[[R18:[0-9]+]], $zero, $4
-; CHECK: andi $[[R8:[0-9]+]], $[[R18]], 255
-; CHECK: sll $[[R9:[0-9]+]], $[[R8]], $[[R4]]
+; CHECK: sllv $[[R9:[0-9]+]], $4, $[[R4]]
; CHECK: $[[BB0:[A-Z_0-9]+]]:
; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
+; CHECK: subu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
@@ -145,14 +126,14 @@ entry:
; CHECK: beq $[[R14]], $zero, $[[BB0]]
; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK: srl $[[R16:[0-9]+]], $[[R15]], $[[R4]]
+; CHECK: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
; CHECK: sra $2, $[[R17]], 24
}
define signext i8 @AtomicLoadNand8(i8 signext %incr) nounwind {
entry:
- %0 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @y, i8 %incr)
+ %0 = atomicrmw nand i8* @y, i8 %incr monotonic
ret i8 %0
; CHECK: AtomicLoadNand8:
@@ -162,10 +143,9 @@ entry:
; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
; CHECK: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK: sll $[[R6:[0-9]+]], $[[R5]], $[[R4]]
+; CHECK: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK: andi $[[R8:[0-9]+]], $4, 255
-; CHECK: sll $[[R9:[0-9]+]], $[[R8]], $[[R4]]
+; CHECK: sllv $[[R9:[0-9]+]], $4, $[[R4]]
; CHECK: $[[BB0:[A-Z_0-9]+]]:
; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
@@ -178,14 +158,14 @@ entry:
; CHECK: beq $[[R14]], $zero, $[[BB0]]
; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK: srl $[[R16:[0-9]+]], $[[R15]], $[[R4]]
+; CHECK: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
; CHECK: sra $2, $[[R17]], 24
}
-define signext i8 @AtomicSwap8(i8 signext %oldval) nounwind {
+define signext i8 @AtomicSwap8(i8 signext %newval) nounwind {
entry:
- %0 = call i8 @llvm.atomic.swap.i8.p0i8(i8* @y, i8 %oldval)
+ %0 = atomicrmw xchg i8* @y, i8 %newval monotonic
ret i8 %0
; CHECK: AtomicSwap8:
@@ -195,31 +175,26 @@ entry:
; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
; CHECK: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK: sll $[[R6:[0-9]+]], $[[R5]], $[[R4]]
+; CHECK: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK: andi $[[R8:[0-9]+]], $4, 255
-; CHECK: sll $[[R9:[0-9]+]], $[[R8]], $[[R4]]
-; CHECK: sw $[[R9]], [[OFFSET:[0-9]+]]($sp)
+; CHECK: sllv $[[R9:[0-9]+]], $4, $[[R4]]
; CHECK: $[[BB0:[A-Z_0-9]+]]:
; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK: lw $[[R18:[0-9]+]], [[OFFSET]]($sp)
-; CHECK: or $[[R11:[0-9]+]], $zero, $[[R18]]
-; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
+; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R9]]
; CHECK: sc $[[R14]], 0($[[R2]])
; CHECK: beq $[[R14]], $zero, $[[BB0]]
; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK: srl $[[R16:[0-9]+]], $[[R15]], $[[R4]]
+; CHECK: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
; CHECK: sra $2, $[[R17]], 24
}
define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind {
entry:
- %0 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @y, i8 %oldval, i8 %newval)
+ %0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic
ret i8 %0
; CHECK: AtomicCmpSwap8:
@@ -229,12 +204,12 @@ entry:
; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
; CHECK: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK: sll $[[R6:[0-9]+]], $[[R5]], $[[R4]]
+; CHECK: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
; CHECK: andi $[[R8:[0-9]+]], $4, 255
-; CHECK: sll $[[R9:[0-9]+]], $[[R8]], $[[R4]]
+; CHECK: sllv $[[R9:[0-9]+]], $[[R8]], $[[R4]]
; CHECK: andi $[[R10:[0-9]+]], $5, 255
-; CHECK: sll $[[R11:[0-9]+]], $[[R10]], $[[R4]]
+; CHECK: sllv $[[R11:[0-9]+]], $[[R10]], $[[R4]]
; CHECK: $[[BB0:[A-Z_0-9]+]]:
; CHECK: ll $[[R12:[0-9]+]], 0($[[R2]])
@@ -247,7 +222,23 @@ entry:
; CHECK: beq $[[R15]], $zero, $[[BB0]]
; CHECK: $[[BB1]]:
-; CHECK: srl $[[R16:[0-9]+]], $[[R13]], $[[R4]]
+; CHECK: srlv $[[R16:[0-9]+]], $[[R13]], $[[R4]]
; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
; CHECK: sra $2, $[[R17]], 24
}
+
+@countsint = common global i32 0, align 4
+
+define i32 @CheckSync(i32 %v) nounwind noinline {
+entry:
+ %0 = atomicrmw add i32* @countsint, i32 %v seq_cst
+ ret i32 %0
+
+; CHECK: CheckSync:
+; CHECK: sync 0
+; CHECK: ll
+; CHECK: sc
+; CHECK: beq
+; CHECK: sync 0
+}
+
diff --git a/test/CodeGen/Mips/brdelayslot.ll b/test/CodeGen/Mips/brdelayslot.ll
new file mode 100644
index 0000000..b266ce6
--- /dev/null
+++ b/test/CodeGen/Mips/brdelayslot.ll
@@ -0,0 +1,15 @@
+; RUN: llc -march=mipsel -enable-mips-delay-filler < %s | FileCheck %s
+
+define void @foo1() nounwind {
+entry:
+; CHECK: jalr
+; CHECK-NOT: nop
+; CHECK: jr
+; CHECK-NOT: nop
+; CHECK: .end
+
+ tail call void @foo2(i32 3) nounwind
+ ret void
+}
+
+declare void @foo2(i32)
diff --git a/test/CodeGen/Mips/cmov.ll b/test/CodeGen/Mips/cmov.ll
index ec37961..7851ba9 100755
--- a/test/CodeGen/Mips/cmov.ll
+++ b/test/CodeGen/Mips/cmov.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=mips -mcpu=4ke < %s | FileCheck %s
-; RUN: llc -march=mips -mcpu=4ke -regalloc=basic < %s | FileCheck %s
+; RUN: llc -march=mips < %s | FileCheck %s
+; RUN: llc -march=mips -regalloc=basic < %s | FileCheck %s
@i1 = global [3 x i32] [i32 1, i32 2, i32 3], align 4
@i3 = common global i32* null, align 4
diff --git a/test/CodeGen/Mips/constantfp0.ll b/test/CodeGen/Mips/constantfp0.ll
new file mode 100644
index 0000000..191f31d
--- /dev/null
+++ b/test/CodeGen/Mips/constantfp0.ll
@@ -0,0 +1,11 @@
+; RUN: llc -march=mips < %s | FileCheck %s
+
+define i32 @branch(double %d) nounwind readnone {
+entry:
+; CHECK: mtc1 $zero, $f[[R0:[0-9]+]]
+; CHECK: c.eq.d $f{{[0-9]+}}, $f[[R0]]
+
+ %tobool = fcmp une double %d, 0.000000e+00
+ %. = zext i1 %tobool to i32
+ ret i32 %.
+}
diff --git a/test/CodeGen/Mips/cprestore.ll b/test/CodeGen/Mips/cprestore.ll
new file mode 100644
index 0000000..391f5c7
--- /dev/null
+++ b/test/CodeGen/Mips/cprestore.ll
@@ -0,0 +1,20 @@
+; DISABLED: llc -march=mipsel < %s | FileCheck %s
+; RUN: false
+
+; byval is currently unsupported.
+; XFAIL: *
+
+; CHECK: .set macro
+; CHECK-NEXT: .cprestore
+; CHECK-NEXT: .set nomacro
+
+%struct.S = type { [16384 x i32] }
+
+define void @foo2() nounwind {
+entry:
+ %s = alloca %struct.S, align 4
+ call void @foo1(%struct.S* byval %s)
+ ret void
+}
+
+declare void @foo1(%struct.S* byval)
diff --git a/test/CodeGen/Mips/double2int.ll b/test/CodeGen/Mips/double2int.ll
index 3d033e1..445ccb3 100644
--- a/test/CodeGen/Mips/double2int.ll
+++ b/test/CodeGen/Mips/double2int.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mips -mcpu=4ke < %s | FileCheck %s
+; RUN: llc -march=mips < %s | FileCheck %s
define i32 @f1(double %d) nounwind readnone {
entry:
diff --git a/test/CodeGen/Mips/eh.ll b/test/CodeGen/Mips/eh.ll
index 765b778..9cd3413 100644
--- a/test/CodeGen/Mips/eh.ll
+++ b/test/CodeGen/Mips/eh.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=mipsel -mcpu=4ke | FileCheck %s -check-prefix=CHECK-EL
-; RUN: llc < %s -march=mips -mcpu=4ke | FileCheck %s -check-prefix=CHECK-EB
+; RUN: llc < %s -march=mipsel | FileCheck %s -check-prefix=CHECK-EL
+; RUN: llc < %s -march=mips | FileCheck %s -check-prefix=CHECK-EB
@g1 = global double 0.000000e+00, align 8
@_ZTId = external constant i8*
@@ -32,10 +32,12 @@ lpad: ; preds = %entry
; CHECK-EL: lw $gp
; CHECK-EL: beq $5
- %exn = tail call i8* @llvm.eh.exception() nounwind
- %eh.selector = tail call i32 (i8*, i8*, ...)* @llvm.eh.selector(i8* %exn, i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*), i8* bitcast (i8** @_ZTId to i8*)) nounwind
+ %exn.val = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ catch i8* bitcast (i8** @_ZTId to i8*)
+ %exn = extractvalue { i8*, i32 } %exn.val, 0
+ %sel = extractvalue { i8*, i32 } %exn.val, 1
%1 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTId to i8*)) nounwind
- %2 = icmp eq i32 %eh.selector, %1
+ %2 = icmp eq i32 %sel, %1
br i1 %2, label %catch, label %eh.resume
catch: ; preds = %lpad
@@ -48,8 +50,7 @@ catch: ; preds = %lpad
ret void
eh.resume: ; preds = %lpad
- tail call void @llvm.eh.resume(i8* %exn, i32 %eh.selector) noreturn
- unreachable
+ resume { i8*, i32 } %exn.val
unreachable: ; preds = %entry
unreachable
diff --git a/test/CodeGen/Mips/extins.ll b/test/CodeGen/Mips/extins.ll
new file mode 100644
index 0000000..69f53e5
--- /dev/null
+++ b/test/CodeGen/Mips/extins.ll
@@ -0,0 +1,21 @@
+; RUN: llc -march=mips -mcpu=4ke < %s | FileCheck %s
+
+define i32 @ext0_5_9(i32 %s, i32 %pos, i32 %sz) nounwind readnone {
+entry:
+; CHECK: ext ${{[0-9]+}}, $4, 5, 9
+ %shr = lshr i32 %s, 5
+ %and = and i32 %shr, 511
+ ret i32 %and
+}
+
+define void @ins2_5_9(i32 %s, i32* nocapture %d) nounwind {
+entry:
+; CHECK: ins ${{[0-9]+}}, $4, 5, 9
+ %and = shl i32 %s, 5
+ %shl = and i32 %and, 16352
+ %tmp3 = load i32* %d, align 4
+ %and5 = and i32 %tmp3, -16353
+ %or = or i32 %and5, %shl
+ store i32 %or, i32* %d, align 4
+ ret void
+}
diff --git a/test/CodeGen/Mips/fcopysign.ll b/test/CodeGen/Mips/fcopysign.ll
index 14c6507..79f956d 100644
--- a/test/CodeGen/Mips/fcopysign.ll
+++ b/test/CodeGen/Mips/fcopysign.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=mipsel -mcpu=4ke | FileCheck %s -check-prefix=CHECK-EL
-; RUN: llc < %s -march=mips -mcpu=4ke | FileCheck %s -check-prefix=CHECK-EB
+; RUN: llc < %s -march=mipsel | FileCheck %s -check-prefix=CHECK-EL
+; RUN: llc < %s -march=mips | FileCheck %s -check-prefix=CHECK-EB
define double @func0(double %d0, double %d1) nounwind readnone {
entry:
diff --git a/test/CodeGen/Mips/fpcmp.ll b/test/CodeGen/Mips/fpcmp.ll
index c89ffe6..86545e3 100644
--- a/test/CodeGen/Mips/fpcmp.ll
+++ b/test/CodeGen/Mips/fpcmp.ll
@@ -1,18 +1,13 @@
-; RUN: llc < %s -march=mipsel -mcpu=4ke | FileCheck %s -check-prefix=CHECK-MIPS32R2
-; RUN: llc < %s -march=mipsel | FileCheck %s -check-prefix=CHECK-MIPS1
+; RUN: llc < %s -march=mipsel | FileCheck %s -check-prefix=CHECK-MIPS32
@g1 = external global i32
define i32 @f(float %f0, float %f1) nounwind {
entry:
-; CHECK-MIPS32R2: c.olt.s
-; CHECK-MIPS32R2: movt
-; CHECK-MIPS32R2: c.olt.s
-; CHECK-MIPS32R2: movt
-; CHECK-MIPS1: c.olt.s
-; CHECK-MIPS1: bc1t
-; CHECK-MIPS1: c.olt.s
-; CHECK-MIPS1: bc1t
+; CHECK-MIPS32: c.olt.s
+; CHECK-MIPS32: movt
+; CHECK-MIPS32: c.olt.s
+; CHECK-MIPS32: movt
%cmp = fcmp olt float %f0, %f1
%conv = zext i1 %cmp to i32
%tmp2 = load i32* @g1, align 4
diff --git a/test/CodeGen/Mips/frame-address.ll b/test/CodeGen/Mips/frame-address.ll
index c48ce7e..9df1808 100644
--- a/test/CodeGen/Mips/frame-address.ll
+++ b/test/CodeGen/Mips/frame-address.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mipsel -mcpu=mips2 < %s | FileCheck %s
+; RUN: llc -march=mipsel < %s | FileCheck %s
declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/test/CodeGen/Mips/i64arg.ll b/test/CodeGen/Mips/i64arg.ll
index 560f2e9..87cf2a6 100644
--- a/test/CodeGen/Mips/i64arg.ll
+++ b/test/CodeGen/Mips/i64arg.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mips -mcpu=4ke < %s | FileCheck %s
+; RUN: llc -march=mips < %s | FileCheck %s
define void @f1(i64 %ll1, float %f, i64 %ll, i32 %i, float %f2) nounwind {
entry:
diff --git a/test/CodeGen/Mips/inlineasmmemop.ll b/test/CodeGen/Mips/inlineasmmemop.ll
index c565892..b5db58a 100644
--- a/test/CodeGen/Mips/inlineasmmemop.ll
+++ b/test/CodeGen/Mips/inlineasmmemop.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mipsel -mcpu=4ke < %s | FileCheck %s
+; RUN: llc -march=mipsel < %s | FileCheck %s
@g1 = external global i32
diff --git a/test/CodeGen/Mips/internalfunc.ll b/test/CodeGen/Mips/internalfunc.ll
index c2a4e5c..434b386 100644
--- a/test/CodeGen/Mips/internalfunc.ll
+++ b/test/CodeGen/Mips/internalfunc.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=mipsel -mcpu=4ke | FileCheck %s
+; RUN: llc < %s -march=mipsel | FileCheck %s
@caller.sf1 = internal unnamed_addr global void (...)* null, align 4
@gf1 = external global void (...)*
diff --git a/test/CodeGen/Mips/largeimmprinting.ll b/test/CodeGen/Mips/largeimmprinting.ll
index fcc20f7..579a319 100644
--- a/test/CodeGen/Mips/largeimmprinting.ll
+++ b/test/CodeGen/Mips/largeimmprinting.ll
@@ -1,4 +1,8 @@
-; RUN: llc -march=mipsel -mcpu=4ke < %s | FileCheck %s
+; DISABLED: llc -march=mipsel -mcpu=4ke < %s | FileCheck %s
+; RUN: false
+
+; byval is currently unsupported.
+; XFAIL: *
%struct.S1 = type { [65536 x i8] }
diff --git a/test/CodeGen/Mips/madd-msub.ll b/test/CodeGen/Mips/madd-msub.ll
index 4a205b1..0aeabb3 100644
--- a/test/CodeGen/Mips/madd-msub.ll
+++ b/test/CodeGen/Mips/madd-msub.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=mips -mcpu=4ke < %s | FileCheck %s
+; RUN: llc -march=mips < %s | FileCheck %s
-; CHECK: madd $5, $4
+; CHECK: madd
define i64 @madd1(i32 %a, i32 %b, i32 %c) nounwind readnone {
entry:
%conv = sext i32 %a to i64
@@ -11,7 +11,7 @@ entry:
ret i64 %add
}
-; CHECK: maddu $5, $4
+; CHECK: maddu
define i64 @madd2(i32 %a, i32 %b, i32 %c) nounwind readnone {
entry:
%conv = zext i32 %a to i64
@@ -22,7 +22,7 @@ entry:
ret i64 %add
}
-; CHECK: madd $5, $4
+; CHECK: madd
define i64 @madd3(i32 %a, i32 %b, i64 %c) nounwind readnone {
entry:
%conv = sext i32 %a to i64
@@ -32,7 +32,7 @@ entry:
ret i64 %add
}
-; CHECK: msub $5, $4
+; CHECK: msub
define i64 @msub1(i32 %a, i32 %b, i32 %c) nounwind readnone {
entry:
%conv = sext i32 %c to i64
@@ -43,7 +43,7 @@ entry:
ret i64 %sub
}
-; CHECK: msubu $5, $4
+; CHECK: msubu
define i64 @msub2(i32 %a, i32 %b, i32 %c) nounwind readnone {
entry:
%conv = zext i32 %c to i64
@@ -54,7 +54,7 @@ entry:
ret i64 %sub
}
-; CHECK: msub $5, $4
+; CHECK: msub
define i64 @msub3(i32 %a, i32 %b, i64 %c) nounwind readnone {
entry:
%conv = sext i32 %a to i64
diff --git a/test/CodeGen/Mips/mips64fpldst.ll b/test/CodeGen/Mips/mips64fpldst.ll
new file mode 100644
index 0000000..b8f3ca9
--- /dev/null
+++ b/test/CodeGen/Mips/mips64fpldst.ll
@@ -0,0 +1,58 @@
+; RUN: llc < %s -march=mips64el -mcpu=mips64r1 -mattr=n64 | FileCheck %s -check-prefix=CHECK-N64
+; RUN: llc < %s -march=mips64el -mcpu=mips64r1 -mattr=n32 | FileCheck %s -check-prefix=CHECK-N32
+
+@f0 = common global float 0.000000e+00, align 4
+@d0 = common global double 0.000000e+00, align 8
+@f1 = common global float 0.000000e+00, align 4
+@d1 = common global double 0.000000e+00, align 8
+
+define float @funcfl1() nounwind readonly {
+entry:
+; CHECK-N64: funcfl1
+; CHECK-N64: ld $[[R0:[0-9]+]], %got_disp(f0)
+; CHECK-N64: lwc1 $f{{[0-9]+}}, 0($[[R0]])
+; CHECK-N32: funcfl1
+; CHECK-N32: lw $[[R0:[0-9]+]], %got(f0)
+; CHECK-N32: lwc1 $f{{[0-9]+}}, 0($[[R0]])
+ %0 = load float* @f0, align 4
+ ret float %0
+}
+
+define double @funcfl2() nounwind readonly {
+entry:
+; CHECK-N64: funcfl2
+; CHECK-N64: ld $[[R0:[0-9]+]], %got_disp(d0)
+; CHECK-N64: ldc1 $f{{[0-9]+}}, 0($[[R0]])
+; CHECK-N32: funcfl2
+; CHECK-N32: lw $[[R0:[0-9]+]], %got(d0)
+; CHECK-N32: ldc1 $f{{[0-9]+}}, 0($[[R0]])
+ %0 = load double* @d0, align 8
+ ret double %0
+}
+
+define void @funcfs1() nounwind {
+entry:
+; CHECK-N64: funcfs1
+; CHECK-N64: ld $[[R0:[0-9]+]], %got_disp(f0)
+; CHECK-N64: swc1 $f{{[0-9]+}}, 0($[[R0]])
+; CHECK-N32: funcfs1
+; CHECK-N32: lw $[[R0:[0-9]+]], %got(f0)
+; CHECK-N32: swc1 $f{{[0-9]+}}, 0($[[R0]])
+ %0 = load float* @f1, align 4
+ store float %0, float* @f0, align 4
+ ret void
+}
+
+define void @funcfs2() nounwind {
+entry:
+; CHECK-N64: funcfs2
+; CHECK-N64: ld $[[R0:[0-9]+]], %got_disp(d0)
+; CHECK-N64: sdc1 $f{{[0-9]+}}, 0($[[R0]])
+; CHECK-N32: funcfs2
+; CHECK-N32: lw $[[R0:[0-9]+]], %got(d0)
+; CHECK-N32: sdc1 $f{{[0-9]+}}, 0($[[R0]])
+ %0 = load double* @d1, align 8
+ store double %0, double* @d0, align 8
+ ret void
+}
+
diff --git a/test/CodeGen/Mips/mips64instrs.ll b/test/CodeGen/Mips/mips64instrs.ll
new file mode 100644
index 0000000..c9812a2
--- /dev/null
+++ b/test/CodeGen/Mips/mips64instrs.ll
@@ -0,0 +1,143 @@
+; RUN: llc -march=mips64el -mcpu=mips64r1 < %s | FileCheck %s
+
+define i64 @f0(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; CHECK: daddu
+ %add = add nsw i64 %a1, %a0
+ ret i64 %add
+}
+
+define i64 @f1(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; CHECK: dsubu
+ %sub = sub nsw i64 %a0, %a1
+ ret i64 %sub
+}
+
+define i64 @f4(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; CHECK: and
+ %and = and i64 %a1, %a0
+ ret i64 %and
+}
+
+define i64 @f5(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; CHECK: or
+ %or = or i64 %a1, %a0
+ ret i64 %or
+}
+
+define i64 @f6(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; CHECK: xor
+ %xor = xor i64 %a1, %a0
+ ret i64 %xor
+}
+
+define i64 @f7(i64 %a0) nounwind readnone {
+entry:
+; CHECK: daddiu ${{[0-9]+}}, ${{[0-9]+}}, 20
+ %add = add nsw i64 %a0, 20
+ ret i64 %add
+}
+
+define i64 @f8(i64 %a0) nounwind readnone {
+entry:
+; CHECK: daddiu ${{[0-9]+}}, ${{[0-9]+}}, -20
+ %sub = add nsw i64 %a0, -20
+ ret i64 %sub
+}
+
+define i64 @f9(i64 %a0) nounwind readnone {
+entry:
+; CHECK: andi ${{[0-9]+}}, ${{[0-9]+}}, 20
+ %and = and i64 %a0, 20
+ ret i64 %and
+}
+
+define i64 @f10(i64 %a0) nounwind readnone {
+entry:
+; CHECK: ori ${{[0-9]+}}, ${{[0-9]+}}, 20
+ %or = or i64 %a0, 20
+ ret i64 %or
+}
+
+define i64 @f11(i64 %a0) nounwind readnone {
+entry:
+; CHECK: xori ${{[0-9]+}}, ${{[0-9]+}}, 20
+ %xor = xor i64 %a0, 20
+ ret i64 %xor
+}
+
+define i64 @f12(i64 %a, i64 %b) nounwind readnone {
+entry:
+; CHECK: mult
+ %mul = mul nsw i64 %b, %a
+ ret i64 %mul
+}
+
+define i64 @f13(i64 %a, i64 %b) nounwind readnone {
+entry:
+; CHECK: mult
+ %mul = mul i64 %b, %a
+ ret i64 %mul
+}
+
+define i64 @f14(i64 %a, i64 %b) nounwind readnone {
+entry:
+; CHECK: ddiv $zero
+; CHECK: mflo
+ %div = sdiv i64 %a, %b
+ ret i64 %div
+}
+
+define i64 @f15(i64 %a, i64 %b) nounwind readnone {
+entry:
+; CHECK: ddivu $zero
+; CHECK: mflo
+ %div = udiv i64 %a, %b
+ ret i64 %div
+}
+
+define i64 @f16(i64 %a, i64 %b) nounwind readnone {
+entry:
+; CHECK: ddiv $zero
+; CHECK: mfhi
+ %rem = srem i64 %a, %b
+ ret i64 %rem
+}
+
+define i64 @f17(i64 %a, i64 %b) nounwind readnone {
+entry:
+; CHECK: ddivu $zero
+; CHECK: mfhi
+ %rem = urem i64 %a, %b
+ ret i64 %rem
+}
+
+declare i64 @llvm.ctlz.i64(i64) nounwind readnone
+
+define i64 @f18(i64 %X) nounwind readnone {
+entry:
+; CHECK: dclz $2, $4
+ %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %X)
+ ret i64 %tmp1
+}
+
+define i64 @f19(i64 %X) nounwind readnone {
+entry:
+; CHECK: dclo $2, $4
+ %neg = xor i64 %X, -1
+ %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %neg)
+ ret i64 %tmp1
+}
+
+define i64 @f20(i64 %a, i64 %b) nounwind readnone {
+entry:
+; CHECK: nor
+ %or = or i64 %b, %a
+ %neg = xor i64 %or, -1
+ ret i64 %neg
+}
+
diff --git a/test/CodeGen/Mips/mips64intldst.ll b/test/CodeGen/Mips/mips64intldst.ll
new file mode 100644
index 0000000..fdf496b
--- /dev/null
+++ b/test/CodeGen/Mips/mips64intldst.ll
@@ -0,0 +1,157 @@
+; RUN: llc < %s -march=mips64el -mcpu=mips64r1 -mattr=n64 | FileCheck %s -check-prefix=CHECK-N64
+; RUN: llc < %s -march=mips64el -mcpu=mips64r1 -mattr=n32 | FileCheck %s -check-prefix=CHECK-N32
+
+@c = common global i8 0, align 4
+@s = common global i16 0, align 4
+@i = common global i32 0, align 4
+@l = common global i64 0, align 8
+@uc = common global i8 0, align 4
+@us = common global i16 0, align 4
+@ui = common global i32 0, align 4
+@l1 = common global i64 0, align 8
+
+define i64 @func1() nounwind readonly {
+entry:
+; CHECK-N64: func1
+; CHECK-N64: ld $[[R0:[0-9]+]], %got_disp(c)
+; CHECK-N64: lb ${{[0-9]+}}, 0($[[R0]])
+; CHECK-N32: func1
+; CHECK-N32: lw $[[R0:[0-9]+]], %got(c)
+; CHECK-N32: lb ${{[0-9]+}}, 0($[[R0]])
+ %0 = load i8* @c, align 4
+ %conv = sext i8 %0 to i64
+ ret i64 %conv
+}
+
+define i64 @func2() nounwind readonly {
+entry:
+; CHECK-N64: func2
+; CHECK-N64: ld $[[R0:[0-9]+]], %got_disp(s)
+; CHECK-N64: lh ${{[0-9]+}}, 0($[[R0]])
+; CHECK-N32: func2
+; CHECK-N32: lw $[[R0:[0-9]+]], %got(s)
+; CHECK-N32: lh ${{[0-9]+}}, 0($[[R0]])
+ %0 = load i16* @s, align 4
+ %conv = sext i16 %0 to i64
+ ret i64 %conv
+}
+
+define i64 @func3() nounwind readonly {
+entry:
+; CHECK-N64: func3
+; CHECK-N64: ld $[[R0:[0-9]+]], %got_disp(i)
+; CHECK-N64: lw ${{[0-9]+}}, 0($[[R0]])
+; CHECK-N32: func3
+; CHECK-N32: lw $[[R0:[0-9]+]], %got(i)
+; CHECK-N32: lw ${{[0-9]+}}, 0($[[R0]])
+ %0 = load i32* @i, align 4
+ %conv = sext i32 %0 to i64
+ ret i64 %conv
+}
+
+define i64 @func4() nounwind readonly {
+entry:
+; CHECK-N64: func4
+; CHECK-N64: ld $[[R0:[0-9]+]], %got_disp(l)
+; CHECK-N64: ld ${{[0-9]+}}, 0($[[R0]])
+; CHECK-N32: func4
+; CHECK-N32: lw $[[R0:[0-9]+]], %got(l)
+; CHECK-N32: ld ${{[0-9]+}}, 0($[[R0]])
+ %0 = load i64* @l, align 8
+ ret i64 %0
+}
+
+define i64 @ufunc1() nounwind readonly {
+entry:
+; CHECK-N64: ufunc1
+; CHECK-N64: ld $[[R0:[0-9]+]], %got_disp(uc)
+; CHECK-N64: lbu ${{[0-9]+}}, 0($[[R0]])
+; CHECK-N32: ufunc1
+; CHECK-N32: lw $[[R0:[0-9]+]], %got(uc)
+; CHECK-N32: lbu ${{[0-9]+}}, 0($[[R0]])
+ %0 = load i8* @uc, align 4
+ %conv = zext i8 %0 to i64
+ ret i64 %conv
+}
+
+define i64 @ufunc2() nounwind readonly {
+entry:
+; CHECK-N64: ufunc2
+; CHECK-N64: ld $[[R0:[0-9]+]], %got_disp(us)
+; CHECK-N64: lhu ${{[0-9]+}}, 0($[[R0]])
+; CHECK-N32: ufunc2
+; CHECK-N32: lw $[[R0:[0-9]+]], %got(us)
+; CHECK-N32: lhu ${{[0-9]+}}, 0($[[R0]])
+ %0 = load i16* @us, align 4
+ %conv = zext i16 %0 to i64
+ ret i64 %conv
+}
+
+define i64 @ufunc3() nounwind readonly {
+entry:
+; CHECK-N64: ufunc3
+; CHECK-N64: ld $[[R0:[0-9]+]], %got_disp(ui)
+; CHECK-N64: lwu ${{[0-9]+}}, 0($[[R0]])
+; CHECK-N32: ufunc3
+; CHECK-N32: lw $[[R0:[0-9]+]], %got(ui)
+; CHECK-N32: lwu ${{[0-9]+}}, 0($[[R0]])
+ %0 = load i32* @ui, align 4
+ %conv = zext i32 %0 to i64
+ ret i64 %conv
+}
+
+define void @sfunc1() nounwind {
+entry:
+; CHECK-N64: sfunc1
+; CHECK-N64: ld $[[R0:[0-9]+]], %got_disp(c)
+; CHECK-N64: sb ${{[0-9]+}}, 0($[[R0]])
+; CHECK-N32: sfunc1
+; CHECK-N32: lw $[[R0:[0-9]+]], %got(c)
+; CHECK-N32: sb ${{[0-9]+}}, 0($[[R0]])
+ %0 = load i64* @l1, align 8
+ %conv = trunc i64 %0 to i8
+ store i8 %conv, i8* @c, align 4
+ ret void
+}
+
+define void @sfunc2() nounwind {
+entry:
+; CHECK-N64: sfunc2
+; CHECK-N64: ld $[[R0:[0-9]+]], %got_disp(s)
+; CHECK-N64: sh ${{[0-9]+}}, 0($[[R0]])
+; CHECK-N32: sfunc2
+; CHECK-N32: lw $[[R0:[0-9]+]], %got(s)
+; CHECK-N32: sh ${{[0-9]+}}, 0($[[R0]])
+ %0 = load i64* @l1, align 8
+ %conv = trunc i64 %0 to i16
+ store i16 %conv, i16* @s, align 4
+ ret void
+}
+
+define void @sfunc3() nounwind {
+entry:
+; CHECK-N64: sfunc3
+; CHECK-N64: ld $[[R0:[0-9]+]], %got_disp(i)
+; CHECK-N64: sw ${{[0-9]+}}, 0($[[R0]])
+; CHECK-N32: sfunc3
+; CHECK-N32: lw $[[R0:[0-9]+]], %got(i)
+; CHECK-N32: sw ${{[0-9]+}}, 0($[[R0]])
+ %0 = load i64* @l1, align 8
+ %conv = trunc i64 %0 to i32
+ store i32 %conv, i32* @i, align 4
+ ret void
+}
+
+define void @sfunc4() nounwind {
+entry:
+; CHECK-N64: sfunc4
+; CHECK-N64: ld $[[R0:[0-9]+]], %got_disp(l)
+; CHECK-N64: sd ${{[0-9]+}}, 0($[[R0]])
+; CHECK-N32: sfunc4
+; CHECK-N32: lw $[[R0:[0-9]+]], %got(l)
+; CHECK-N32: sd ${{[0-9]+}}, 0($[[R0]])
+ %0 = load i64* @l1, align 8
+ store i64 %0, i64* @l, align 8
+ ret void
+}
+
diff --git a/test/CodeGen/Mips/mips64shift.ll b/test/CodeGen/Mips/mips64shift.ll
new file mode 100644
index 0000000..cc5e508
--- /dev/null
+++ b/test/CodeGen/Mips/mips64shift.ll
@@ -0,0 +1,104 @@
+; RUN: llc -march=mips64el -mcpu=mips64r2 < %s | FileCheck %s
+
+define i64 @f0(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; CHECK: dsllv
+ %shl = shl i64 %a0, %a1
+ ret i64 %shl
+}
+
+define i64 @f1(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; CHECK: dsrav
+ %shr = ashr i64 %a0, %a1
+ ret i64 %shr
+}
+
+define i64 @f2(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; CHECK: dsrlv
+ %shr = lshr i64 %a0, %a1
+ ret i64 %shr
+}
+
+define i64 @f3(i64 %a0) nounwind readnone {
+entry:
+; CHECK: dsll ${{[0-9]+}}, ${{[0-9]+}}, 10
+ %shl = shl i64 %a0, 10
+ ret i64 %shl
+}
+
+define i64 @f4(i64 %a0) nounwind readnone {
+entry:
+; CHECK: dsra ${{[0-9]+}}, ${{[0-9]+}}, 10
+ %shr = ashr i64 %a0, 10
+ ret i64 %shr
+}
+
+define i64 @f5(i64 %a0) nounwind readnone {
+entry:
+; CHECK: dsrl ${{[0-9]+}}, ${{[0-9]+}}, 10
+ %shr = lshr i64 %a0, 10
+ ret i64 %shr
+}
+
+define i64 @f6(i64 %a0) nounwind readnone {
+entry:
+; CHECK: dsll32 ${{[0-9]+}}, ${{[0-9]+}}, 8
+ %shl = shl i64 %a0, 40
+ ret i64 %shl
+}
+
+define i64 @f7(i64 %a0) nounwind readnone {
+entry:
+; CHECK: dsra32 ${{[0-9]+}}, ${{[0-9]+}}, 8
+ %shr = ashr i64 %a0, 40
+ ret i64 %shr
+}
+
+define i64 @f8(i64 %a0) nounwind readnone {
+entry:
+; CHECK: dsrl32 ${{[0-9]+}}, ${{[0-9]+}}, 8
+ %shr = lshr i64 %a0, 40
+ ret i64 %shr
+}
+
+define i64 @f9(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; CHECK: drotrv
+ %shr = lshr i64 %a0, %a1
+ %sub = sub i64 64, %a1
+ %shl = shl i64 %a0, %sub
+ %or = or i64 %shl, %shr
+ ret i64 %or
+}
+
+define i64 @f10(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; CHECK: drotrv
+ %shl = shl i64 %a0, %a1
+ %sub = sub i64 64, %a1
+ %shr = lshr i64 %a0, %sub
+ %or = or i64 %shr, %shl
+ ret i64 %or
+}
+
+define i64 @f11(i64 %a0) nounwind readnone {
+entry:
+; CHECK: drotr ${{[0-9]+}}, ${{[0-9]+}}, 10
+ %shr = lshr i64 %a0, 10
+ %shl = shl i64 %a0, 54
+ %or = or i64 %shr, %shl
+ ret i64 %or
+}
+
+define i64 @f12(i64 %a0) nounwind readnone {
+entry:
+; CHECK: drotr32 ${{[0-9]+}}, ${{[0-9]+}}, 22
+ %shl = shl i64 %a0, 10
+ %shr = lshr i64 %a0, 54
+ %or = or i64 %shl, %shr
+ ret i64 %or
+}
+
+
diff --git a/test/CodeGen/Mips/mipslopat.ll b/test/CodeGen/Mips/mipslopat.ll
new file mode 100644
index 0000000..0279828
--- /dev/null
+++ b/test/CodeGen/Mips/mipslopat.ll
@@ -0,0 +1,19 @@
+; This test does not check the machine code output.
+; RUN: llc -march=mips < %s
+
+@stat_vol_ptr_int = internal global i32* null, align 4
+@stat_ptr_vol_int = internal global i32* null, align 4
+
+define void @simple_vol_file() nounwind {
+entry:
+ %tmp = volatile load i32** @stat_vol_ptr_int, align 4
+ %0 = bitcast i32* %tmp to i8*
+ call void @llvm.prefetch(i8* %0, i32 0, i32 0, i32 1)
+ %tmp1 = load i32** @stat_ptr_vol_int, align 4
+ %1 = bitcast i32* %tmp1 to i8*
+ call void @llvm.prefetch(i8* %1, i32 0, i32 0, i32 1)
+ ret void
+}
+
+declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) nounwind
+
diff --git a/test/CodeGen/Mips/o32_cc.ll b/test/CodeGen/Mips/o32_cc.ll
index 3974cd4..70b66ef 100644
--- a/test/CodeGen/Mips/o32_cc.ll
+++ b/test/CodeGen/Mips/o32_cc.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mips -mcpu=4ke < %s | FileCheck %s
+; RUN: llc -march=mips < %s | FileCheck %s
; FIXME: Disabled because it unpredictably fails on certain platforms.
; REQUIRES: disabled
diff --git a/test/CodeGen/Mips/o32_cc_byval.ll b/test/CodeGen/Mips/o32_cc_byval.ll
index f5e1a87..e673480 100644
--- a/test/CodeGen/Mips/o32_cc_byval.ll
+++ b/test/CodeGen/Mips/o32_cc_byval.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mipsel -mcpu=4ke < %s | FileCheck %s
+; RUN: llc -march=mipsel < %s | FileCheck %s
%0 = type { i8, i16, i32, i64, double, i32, [4 x i8] }
%struct.S1 = type { i8, i16, i32, i64, double, i32 }
diff --git a/test/CodeGen/Mips/o32_cc_vararg.ll b/test/CodeGen/Mips/o32_cc_vararg.ll
index 14ce04b..4a3d9ab 100644
--- a/test/CodeGen/Mips/o32_cc_vararg.ll
+++ b/test/CodeGen/Mips/o32_cc_vararg.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mipsel -mcpu=mips2 -pre-RA-sched=source < %s | FileCheck %s
+; RUN: llc -march=mipsel -pre-RA-sched=source < %s | FileCheck %s
; All test functions do the same thing - they return the first variable
diff --git a/test/CodeGen/Mips/rotate.ll b/test/CodeGen/Mips/rotate.ll
index e7dc309..8e27f4a 100644
--- a/test/CodeGen/Mips/rotate.ll
+++ b/test/CodeGen/Mips/rotate.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=mips -mcpu=4ke < %s | FileCheck %s
-; CHECK: rotrv $2, $4, $2
+; CHECK: rotrv $2, $4
define i32 @rot0(i32 %a, i32 %b) nounwind readnone {
entry:
%shl = shl i32 %a, %b
diff --git a/test/CodeGen/Mips/select.ll b/test/CodeGen/Mips/select.ll
index c83fa3e..40115be 100644
--- a/test/CodeGen/Mips/select.ll
+++ b/test/CodeGen/Mips/select.ll
@@ -1,13 +1,11 @@
-; RUN: llc < %s -march=mipsel -mcpu=4ke | FileCheck %s -check-prefix=CHECK-MIPS32R2
-; RUN: llc < %s -march=mipsel | FileCheck %s -check-prefix=CHECK-MIPS1
+; RUN: llc < %s -march=mipsel | FileCheck %s -check-prefix=CHECK
@d2 = external global double
@d3 = external global double
define i32 @sel1(i32 %s, i32 %f0, i32 %f1) nounwind readnone {
entry:
-; CHECK-MIPS32R2: movn
-; CHECK-MIPS1: beq
+; CHECK: movn
%tobool = icmp ne i32 %s, 0
%cond = select i1 %tobool, i32 %f1, i32 %f0
ret i32 %cond
@@ -15,8 +13,7 @@ entry:
define float @sel2(i32 %s, float %f0, float %f1) nounwind readnone {
entry:
-; CHECK-MIPS32R2: movn.s
-; CHECK-MIPS1: beq
+; CHECK: movn.s
%tobool = icmp ne i32 %s, 0
%cond = select i1 %tobool, float %f0, float %f1
ret float %cond
@@ -24,8 +21,7 @@ entry:
define double @sel2_1(i32 %s, double %f0, double %f1) nounwind readnone {
entry:
-; CHECK-MIPS32R2: movn.d
-; CHECK-MIPS1: beq
+; CHECK: movn.d
%tobool = icmp ne i32 %s, 0
%cond = select i1 %tobool, double %f0, double %f1
ret double %cond
@@ -33,10 +29,8 @@ entry:
define float @sel3(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK-MIPS32R2: c.eq.s
-; CHECK-MIPS32R2: movt.s
-; CHECK-MIPS1: c.eq.s
-; CHECK-MIPS1: bc1f
+; CHECK: c.eq.s
+; CHECK: movt.s
%cmp = fcmp oeq float %f2, %f3
%cond = select i1 %cmp, float %f0, float %f1
ret float %cond
@@ -44,10 +38,8 @@ entry:
define float @sel4(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK-MIPS32R2: c.olt.s
-; CHECK-MIPS32R2: movt.s
-; CHECK-MIPS1: c.olt.s
-; CHECK-MIPS1: bc1f
+; CHECK: c.olt.s
+; CHECK: movt.s
%cmp = fcmp olt float %f2, %f3
%cond = select i1 %cmp, float %f0, float %f1
ret float %cond
@@ -55,10 +47,8 @@ entry:
define float @sel5(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK-MIPS32R2: c.ule.s
-; CHECK-MIPS32R2: movf.s
-; CHECK-MIPS1: c.ule.s
-; CHECK-MIPS1: bc1t
+; CHECK: c.ule.s
+; CHECK: movf.s
%cmp = fcmp ogt float %f2, %f3
%cond = select i1 %cmp, float %f0, float %f1
ret float %cond
@@ -66,10 +56,8 @@ entry:
define double @sel5_1(double %f0, double %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK-MIPS32R2: c.ule.s
-; CHECK-MIPS32R2: movf.d
-; CHECK-MIPS1: c.ule.s
-; CHECK-MIPS1: bc1t
+; CHECK: c.ule.s
+; CHECK: movf.d
%cmp = fcmp ogt float %f2, %f3
%cond = select i1 %cmp, double %f0, double %f1
ret double %cond
@@ -77,10 +65,8 @@ entry:
define double @sel6(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
entry:
-; CHECK-MIPS32R2: c.eq.d
-; CHECK-MIPS32R2: movt.d
-; CHECK-MIPS1: c.eq.d
-; CHECK-MIPS1: bc1f
+; CHECK: c.eq.d
+; CHECK: movt.d
%cmp = fcmp oeq double %f2, %f3
%cond = select i1 %cmp, double %f0, double %f1
ret double %cond
@@ -88,10 +74,8 @@ entry:
define double @sel7(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
entry:
-; CHECK-MIPS32R2: c.olt.d
-; CHECK-MIPS32R2: movt.d
-; CHECK-MIPS1: c.olt.d
-; CHECK-MIPS1: bc1f
+; CHECK: c.olt.d
+; CHECK: movt.d
%cmp = fcmp olt double %f2, %f3
%cond = select i1 %cmp, double %f0, double %f1
ret double %cond
@@ -99,10 +83,8 @@ entry:
define double @sel8(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
entry:
-; CHECK-MIPS32R2: c.ule.d
-; CHECK-MIPS32R2: movf.d
-; CHECK-MIPS1: c.ule.d
-; CHECK-MIPS1: bc1t
+; CHECK: c.ule.d
+; CHECK: movf.d
%cmp = fcmp ogt double %f2, %f3
%cond = select i1 %cmp, double %f0, double %f1
ret double %cond
@@ -110,10 +92,8 @@ entry:
define float @sel8_1(float %f0, float %f1, double %f2, double %f3) nounwind readnone {
entry:
-; CHECK-MIPS32R2: c.ule.d
-; CHECK-MIPS32R2: movf.s
-; CHECK-MIPS1: c.ule.d
-; CHECK-MIPS1: bc1t
+; CHECK: c.ule.d
+; CHECK: movf.s
%cmp = fcmp ogt double %f2, %f3
%cond = select i1 %cmp, float %f0, float %f1
ret float %cond
@@ -121,10 +101,8 @@ entry:
define i32 @sel9(i32 %f0, i32 %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK-MIPS32R2: c.eq.s
-; CHECK-MIPS32R2: movt
-; CHECK-MIPS1: c.eq.s
-; CHECK-MIPS1: bc1f
+; CHECK: c.eq.s
+; CHECK: movt
%cmp = fcmp oeq float %f2, %f3
%cond = select i1 %cmp, i32 %f0, i32 %f1
ret i32 %cond
@@ -132,10 +110,8 @@ entry:
define i32 @sel10(i32 %f0, i32 %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK-MIPS32R2: c.olt.s
-; CHECK-MIPS32R2: movt
-; CHECK-MIPS1: c.olt.s
-; CHECK-MIPS1: bc1f
+; CHECK: c.olt.s
+; CHECK: movt
%cmp = fcmp olt float %f2, %f3
%cond = select i1 %cmp, i32 %f0, i32 %f1
ret i32 %cond
@@ -143,10 +119,8 @@ entry:
define i32 @sel11(i32 %f0, i32 %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK-MIPS32R2: c.ule.s
-; CHECK-MIPS32R2: movf
-; CHECK-MIPS1: c.ule.s
-; CHECK-MIPS1: bc1t
+; CHECK: c.ule.s
+; CHECK: movf
%cmp = fcmp ogt float %f2, %f3
%cond = select i1 %cmp, i32 %f0, i32 %f1
ret i32 %cond
@@ -154,10 +128,8 @@ entry:
define i32 @sel12(i32 %f0, i32 %f1) nounwind readonly {
entry:
-; CHECK-MIPS32R2: c.eq.d
-; CHECK-MIPS32R2: movt
-; CHECK-MIPS1: c.eq.d
-; CHECK-MIPS1: bc1f
+; CHECK: c.eq.d
+; CHECK: movt
%tmp = load double* @d2, align 8, !tbaa !0
%tmp1 = load double* @d3, align 8, !tbaa !0
%cmp = fcmp oeq double %tmp, %tmp1
@@ -167,10 +139,8 @@ entry:
define i32 @sel13(i32 %f0, i32 %f1) nounwind readonly {
entry:
-; CHECK-MIPS32R2: c.olt.d
-; CHECK-MIPS32R2: movt
-; CHECK-MIPS1: c.olt.d
-; CHECK-MIPS1: bc1f
+; CHECK: c.olt.d
+; CHECK: movt
%tmp = load double* @d2, align 8, !tbaa !0
%tmp1 = load double* @d3, align 8, !tbaa !0
%cmp = fcmp olt double %tmp, %tmp1
@@ -180,10 +150,8 @@ entry:
define i32 @sel14(i32 %f0, i32 %f1) nounwind readonly {
entry:
-; CHECK-MIPS32R2: c.ule.d
-; CHECK-MIPS32R2: movf
-; CHECK-MIPS1: c.ule.d
-; CHECK-MIPS1: bc1t
+; CHECK: c.ule.d
+; CHECK: movf
%tmp = load double* @d2, align 8, !tbaa !0
%tmp1 = load double* @d3, align 8, !tbaa !0
%cmp = fcmp ogt double %tmp, %tmp1
diff --git a/test/CodeGen/Mips/tls.ll b/test/CodeGen/Mips/tls.ll
index 034738b..b0474b4 100644
--- a/test/CodeGen/Mips/tls.ll
+++ b/test/CodeGen/Mips/tls.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=mipsel -mcpu=mips2 < %s | FileCheck %s -check-prefix=PIC
-; RUN: llc -march=mipsel -mcpu=mips2 -relocation-model=static < %s \
+; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=PIC
+; RUN: llc -march=mipsel -relocation-model=static < %s \
; RUN: | FileCheck %s -check-prefix=STATIC
diff --git a/test/CodeGen/Mips/unalignedload.ll b/test/CodeGen/Mips/unalignedload.ll
new file mode 100644
index 0000000..433e896
--- /dev/null
+++ b/test/CodeGen/Mips/unalignedload.ll
@@ -0,0 +1,41 @@
+; RUN: llc < %s -march=mipsel | FileCheck %s -check-prefix=CHECK-EL
+; RUN: llc < %s -march=mips | FileCheck %s -check-prefix=CHECK-EB
+%struct.S2 = type { %struct.S1, %struct.S1 }
+%struct.S1 = type { i8, i8 }
+%struct.S4 = type { [7 x i8] }
+
+@s2 = common global %struct.S2 zeroinitializer, align 1
+@s4 = common global %struct.S4 zeroinitializer, align 1
+
+define void @foo1() nounwind {
+entry:
+; CHECK-EL: lw $25, %call16(foo2)
+; CHECK-EL: ulhu $4, 2
+; CHECK-EL: lw $[[R0:[0-9]+]], %got(s4)
+; CHECK-EL: lbu $[[R1:[0-9]+]], 6($[[R0]])
+; CHECK-EL: ulhu $[[R2:[0-9]+]], 4($[[R0]])
+; CHECK-EL: sll $[[R3:[0-9]+]], $[[R1]], 16
+; CHECK-EL: ulw $4, 0($[[R0]])
+; CHECK-EL: lw $25, %call16(foo4)
+; CHECK-EL: or $5, $[[R2]], $[[R3]]
+
+; CHECK-EB: ulhu $[[R0:[0-9]+]], 2
+; CHECK-EB: lw $25, %call16(foo2)
+; CHECK-EB: sll $4, $[[R0]], 16
+; CHECK-EB: lw $[[R1:[0-9]+]], %got(s4)
+; CHECK-EB: ulhu $[[R2:[0-9]+]], 4($[[R1]])
+; CHECK-EB: lbu $[[R3:[0-9]+]], 6($[[R1]])
+; CHECK-EB: sll $[[R4:[0-9]+]], $[[R2]], 16
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R3]], 8
+; CHECK-EB: ulw $4, 0($[[R1]])
+; CHECK-EB: lw $25, %call16(foo4)
+; CHECK-EB: or $5, $[[R4]], $[[R5]]
+
+ tail call void @foo2(%struct.S1* byval getelementptr inbounds (%struct.S2* @s2, i32 0, i32 1)) nounwind
+ tail call void @foo4(%struct.S4* byval @s4) nounwind
+ ret void
+}
+
+declare void @foo2(%struct.S1* byval)
+
+declare void @foo4(%struct.S4* byval)
diff --git a/test/CodeGen/PTX/20110926-sitofp.ll b/test/CodeGen/PTX/20110926-sitofp.ll
new file mode 100644
index 0000000..38d35c5
--- /dev/null
+++ b/test/CodeGen/PTX/20110926-sitofp.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s -march=ptx32 | FileCheck %s
+
+@A = common global [1536 x [1536 x float]] zeroinitializer, align 4
+@B = common global [1536 x [1536 x float]] zeroinitializer, align 4
+
+define internal ptx_device void @init_array(i32 %x, i32 %y) {
+ %arrayidx103 = getelementptr [1536 x [1536 x float]]* @A, i32 0, i32 %x, i32 %y
+ %arrayidx224 = getelementptr [1536 x [1536 x float]]* @B, i32 0, i32 %x, i32 %y
+ %mul5 = mul i32 %x, %y
+ %rem = srem i32 %mul5, 1024
+ %add = add nsw i32 %rem, 1
+; CHECK: cvt.rn.f64.s32 %fd{{[0-9]+}}, %r{{[0-9]+}}
+ %conv = sitofp i32 %add to double
+ %div = fmul double %conv, 5.000000e-01
+ %conv7 = fptrunc double %div to float
+ store float %conv7, float* %arrayidx103, align 4
+ %rem14 = srem i32 %mul5, 1024
+ %add15 = add nsw i32 %rem14, 1
+ %conv16 = sitofp i32 %add15 to double
+ %div17 = fmul double %conv16, 5.000000e-01
+ %conv18 = fptrunc double %div17 to float
+ store float %conv18, float* %arrayidx224, align 4
+ ret void
+}
diff --git a/test/CodeGen/PTX/add.ll b/test/CodeGen/PTX/add.ll
index 293aebe..8b10d11 100644
--- a/test/CodeGen/PTX/add.ll
+++ b/test/CodeGen/PTX/add.ll
@@ -1,71 +1,71 @@
; RUN: llc < %s -march=ptx32 | FileCheck %s
define ptx_device i16 @t1_u16(i16 %x, i16 %y) {
-; CHECK: add.u16 rh{{[0-9]+}}, rh{{[0-9]+}}, rh{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: add.u16 %ret{{[0-9]+}}, %rh{{[0-9]+}}, %rh{{[0-9]+}};
+; CHECK: ret;
%z = add i16 %x, %y
ret i16 %z
}
define ptx_device i32 @t1_u32(i32 %x, i32 %y) {
-; CHECK: add.u32 r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: add.u32 %ret{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: ret;
%z = add i32 %x, %y
ret i32 %z
}
define ptx_device i64 @t1_u64(i64 %x, i64 %y) {
-; CHECK: add.u64 rd{{[0-9]+}}, rd{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: add.u64 %ret{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}};
+; CHECK: ret;
%z = add i64 %x, %y
ret i64 %z
}
define ptx_device float @t1_f32(float %x, float %y) {
-; CHECK: add.rn.f32 r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}
-; CHECK-NEXT: ret;
+; CHECK: add.rn.f32 %ret{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}
+; CHECK: ret;
%z = fadd float %x, %y
ret float %z
}
define ptx_device double @t1_f64(double %x, double %y) {
-; CHECK: add.rn.f64 rd{{[0-9]+}}, rd{{[0-9]+}}, rd{{[0-9]+}}
-; CHECK-NEXT: ret;
+; CHECK: add.rn.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}
+; CHECK: ret;
%z = fadd double %x, %y
ret double %z
}
define ptx_device i16 @t2_u16(i16 %x) {
-; CHECK: add.u16 rh{{[0-9]+}}, rh{{[0-9]+}}, 1;
-; CHECK-NEXT: ret;
+; CHECK: add.u16 %ret{{[0-9]+}}, %rh{{[0-9]+}}, 1;
+; CHECK: ret;
%z = add i16 %x, 1
ret i16 %z
}
define ptx_device i32 @t2_u32(i32 %x) {
-; CHECK: add.u32 r{{[0-9]+}}, r{{[0-9]+}}, 1;
-; CHECK-NEXT: ret;
+; CHECK: add.u32 %ret{{[0-9]+}}, %r{{[0-9]+}}, 1;
+; CHECK: ret;
%z = add i32 %x, 1
ret i32 %z
}
define ptx_device i64 @t2_u64(i64 %x) {
-; CHECK: add.u64 rd{{[0-9]+}}, rd{{[0-9]+}}, 1;
-; CHECK-NEXT: ret;
+; CHECK: add.u64 %ret{{[0-9]+}}, %rd{{[0-9]+}}, 1;
+; CHECK: ret;
%z = add i64 %x, 1
ret i64 %z
}
define ptx_device float @t2_f32(float %x) {
-; CHECK: add.rn.f32 r{{[0-9]+}}, r{{[0-9]+}}, 0F3F800000;
-; CHECK-NEXT: ret;
+; CHECK: add.rn.f32 %ret{{[0-9]+}}, %f{{[0-9]+}}, 0D3FF0000000000000;
+; CHECK: ret;
%z = fadd float %x, 1.0
ret float %z
}
define ptx_device double @t2_f64(double %x) {
-; CHECK: add.rn.f64 rd{{[0-9]+}}, rd{{[0-9]+}}, 0D3FF0000000000000;
-; CHECK-NEXT: ret;
+; CHECK: add.rn.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}}, 0D3FF0000000000000;
+; CHECK: ret;
%z = fadd double %x, 1.0
ret double %z
}
diff --git a/test/CodeGen/PTX/aggregates.ll b/test/CodeGen/PTX/aggregates.ll
index 23f28a7..3fc0c40 100644
--- a/test/CodeGen/PTX/aggregates.ll
+++ b/test/CodeGen/PTX/aggregates.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -march=ptx32 -mattr=sm20 | FileCheck %s
+; XFAIL: *
%complex = type { float, float }
diff --git a/test/CodeGen/PTX/bitwise.ll b/test/CodeGen/PTX/bitwise.ll
index 3859280..1403a23 100644
--- a/test/CodeGen/PTX/bitwise.ll
+++ b/test/CodeGen/PTX/bitwise.ll
@@ -3,21 +3,21 @@
; preds
define ptx_device i32 @t1_and_preds(i1 %x, i1 %y) {
-; CHECK: and.pred p{{[0-9]+}}, p{{[0-9]+}}, p{{[0-9]+}}
+; CHECK: and.pred %p{{[0-9]+}}, %p{{[0-9]+}}, %p{{[0-9]+}}
%c = and i1 %x, %y
%d = zext i1 %c to i32
ret i32 %d
}
define ptx_device i32 @t1_or_preds(i1 %x, i1 %y) {
-; CHECK: or.pred p{{[0-9]+}}, p{{[0-9]+}}, p{{[0-9]+}}
+; CHECK: or.pred %p{{[0-9]+}}, %p{{[0-9]+}}, %p{{[0-9]+}}
%a = or i1 %x, %y
%b = zext i1 %a to i32
ret i32 %b
}
define ptx_device i32 @t1_xor_preds(i1 %x, i1 %y) {
-; CHECK: xor.pred p{{[0-9]+}}, p{{[0-9]+}}, p{{[0-9]+}}
+; CHECK: xor.pred %p{{[0-9]+}}, %p{{[0-9]+}}, %p{{[0-9]+}}
%a = xor i1 %x, %y
%b = zext i1 %a to i32
ret i32 %b
diff --git a/test/CodeGen/PTX/bra.ll b/test/CodeGen/PTX/bra.ll
index 7cc9444..464c29c 100644
--- a/test/CodeGen/PTX/bra.ll
+++ b/test/CodeGen/PTX/bra.ll
@@ -10,15 +10,15 @@ loop:
define ptx_device i32 @test_bra_cond_direct(i32 %x, i32 %y) {
entry:
-; CHECK: setp.le.u32 p0, r[[R0:[0-9]+]], r[[R1:[0-9]+]]
+; CHECK: setp.le.u32 %p0, %r[[R0:[0-9]+]], %r[[R1:[0-9]+]]
%p = icmp ugt i32 %x, %y
-; CHECK-NEXT: @p0 bra
+; CHECK-NEXT: @%p0 bra
; CHECK-NOT: bra
br i1 %p, label %clause.if, label %clause.else
clause.if:
-; CHECK: mov.u32 r{{[0-9]+}}, r[[R0]]
+; CHECK: mov.u32 %ret{{[0-9]+}}, %r[[R0]]
ret i32 %x
clause.else:
-; CHECK: mov.u32 r{{[0-9]+}}, r[[R1]]
+; CHECK: mov.u32 %ret{{[0-9]+}}, %r[[R1]]
ret i32 %y
}
diff --git a/test/CodeGen/PTX/cvt.ll b/test/CodeGen/PTX/cvt.ll
index 853abaf..a643d25 100644
--- a/test/CodeGen/PTX/cvt.ll
+++ b/test/CodeGen/PTX/cvt.ll
@@ -1,13 +1,13 @@
; RUN: llc < %s -march=ptx32 | FileCheck %s
-; preds
+; preds
; (note: we convert back to i32 to return)
define ptx_device i32 @cvt_pred_i16(i16 %x, i1 %y) {
-; CHECK: setp.gt.u16 p[[P0:[0-9]+]], rh{{[0-9]+}}, 0
-; CHECK-NEXT: and.pred p0, p[[P0:[0-9]+]], p{{[0-9]+}};
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0:[0-9]+]];
-; CHECK-NEXT: ret;
+; CHECK: setp.gt.u16 %p[[P0:[0-9]+]], %rh{{[0-9]+}}, 0
+; CHECK: and.pred %p2, %p[[P0:[0-9]+]], %p{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0:[0-9]+]];
+; CHECK: ret;
%a = trunc i16 %x to i1
%b = and i1 %a, %y
%c = zext i1 %b to i32
@@ -15,10 +15,10 @@ define ptx_device i32 @cvt_pred_i16(i16 %x, i1 %y) {
}
define ptx_device i32 @cvt_pred_i32(i32 %x, i1 %y) {
-; CHECK: setp.gt.u32 p[[P0:[0-9]+]], r{{[0-9]+}}, 0
-; CHECK-NEXT: and.pred p0, p[[P0:[0-9]+]], p{{[0-9]+}};
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0:[0-9]+]];
-; CHECK-NEXT: ret;
+; CHECK: setp.gt.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, 0
+; CHECK: and.pred %p2, %p[[P0:[0-9]+]], %p{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0:[0-9]+]];
+; CHECK: ret;
%a = trunc i32 %x to i1
%b = and i1 %a, %y
%c = zext i1 %b to i32
@@ -26,10 +26,10 @@ define ptx_device i32 @cvt_pred_i32(i32 %x, i1 %y) {
}
define ptx_device i32 @cvt_pred_i64(i64 %x, i1 %y) {
-; CHECK: setp.gt.u64 p[[P0:[0-9]+]], rd{{[0-9]+}}, 0
-; CHECK-NEXT: and.pred p0, p[[P0:[0-9]+]], p{{[0-9]+}};
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0:[0-9]+]];
-; CHECK-NEXT: ret;
+; CHECK: setp.gt.u64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, 0
+; CHECK: and.pred %p2, %p[[P0:[0-9]+]], %p{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0:[0-9]+]];
+; CHECK: ret;
%a = trunc i64 %x to i1
%b = and i1 %a, %y
%c = zext i1 %b to i32
@@ -37,10 +37,10 @@ define ptx_device i32 @cvt_pred_i64(i64 %x, i1 %y) {
}
define ptx_device i32 @cvt_pred_f32(float %x, i1 %y) {
-; CHECK: setp.gt.f32 p[[P0:[0-9]+]], r{{[0-9]+}}, 0
-; CHECK-NEXT: and.pred p0, p[[P0:[0-9]+]], p{{[0-9]+}};
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0:[0-9]+]];
-; CHECK-NEXT: ret;
+; CHECK: setp.gt.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, 0
+; CHECK: and.pred %p2, %p[[P0:[0-9]+]], %p{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0:[0-9]+]];
+; CHECK: ret;
%a = fptoui float %x to i1
%b = and i1 %a, %y
%c = zext i1 %b to i32
@@ -48,10 +48,10 @@ define ptx_device i32 @cvt_pred_f32(float %x, i1 %y) {
}
define ptx_device i32 @cvt_pred_f64(double %x, i1 %y) {
-; CHECK: setp.gt.f64 p[[P0:[0-9]+]], rd{{[0-9]+}}, 0
-; CHECK-NEXT: and.pred p0, p[[P0:[0-9]+]], p{{[0-9]+}};
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0:[0-9]+]];
-; CHECK-NEXT: ret;
+; CHECK: setp.gt.u64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, 0
+; CHECK: and.pred %p2, %p[[P0:[0-9]+]], %p{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0:[0-9]+]];
+; CHECK: ret;
%a = fptoui double %x to i1
%b = and i1 %a, %y
%c = zext i1 %b to i32
@@ -61,36 +61,36 @@ define ptx_device i32 @cvt_pred_f64(double %x, i1 %y) {
; i16
define ptx_device i16 @cvt_i16_preds(i1 %x) {
-; CHECK: selp.u16 rh{{[0-9]+}}, 1, 0, p{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: selp.u16 %ret{{[0-9]+}}, 1, 0, %p{{[0-9]+}};
+; CHECK: ret;
%a = zext i1 %x to i16
ret i16 %a
}
define ptx_device i16 @cvt_i16_i32(i32 %x) {
-; CHECK: cvt.u16.u32 rh{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.u16.u32 %ret{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: ret;
%a = trunc i32 %x to i16
ret i16 %a
}
define ptx_device i16 @cvt_i16_i64(i64 %x) {
-; CHECK: cvt.u16.u64 rh{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.u16.u64 %ret{{[0-9]+}}, %rd{{[0-9]+}};
+; CHECK: ret;
%a = trunc i64 %x to i16
ret i16 %a
}
define ptx_device i16 @cvt_i16_f32(float %x) {
-; CHECK: cvt.rzi.u16.f32 rh{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.rzi.u16.f32 %ret{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
%a = fptoui float %x to i16
ret i16 %a
}
define ptx_device i16 @cvt_i16_f64(double %x) {
-; CHECK: cvt.rzi.u16.f64 rh{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.rzi.u16.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
%a = fptoui double %x to i16
ret i16 %a
}
@@ -98,36 +98,36 @@ define ptx_device i16 @cvt_i16_f64(double %x) {
; i32
define ptx_device i32 @cvt_i32_preds(i1 %x) {
-; CHECK: selp.u32 r{{[0-9]+}}, 1, 0, p{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p{{[0-9]+}};
+; CHECK: ret;
%a = zext i1 %x to i32
ret i32 %a
}
define ptx_device i32 @cvt_i32_i16(i16 %x) {
-; CHECK: cvt.u32.u16 r{{[0-9]+}}, rh{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.u32.u16 %ret{{[0-9]+}}, %rh{{[0-9]+}};
+; CHECK: ret;
%a = zext i16 %x to i32
ret i32 %a
}
define ptx_device i32 @cvt_i32_i64(i64 %x) {
-; CHECK: cvt.u32.u64 r{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.u32.u64 %ret{{[0-9]+}}, %rd{{[0-9]+}};
+; CHECK: ret;
%a = trunc i64 %x to i32
ret i32 %a
}
define ptx_device i32 @cvt_i32_f32(float %x) {
-; CHECK: cvt.rzi.u32.f32 r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.rzi.u32.f32 %ret{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
%a = fptoui float %x to i32
ret i32 %a
}
define ptx_device i32 @cvt_i32_f64(double %x) {
-; CHECK: cvt.rzi.u32.f64 r{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.rzi.u32.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
%a = fptoui double %x to i32
ret i32 %a
}
@@ -135,35 +135,35 @@ define ptx_device i32 @cvt_i32_f64(double %x) {
; i64
define ptx_device i64 @cvt_i64_preds(i1 %x) {
-; CHECK: selp.u64 rd{{[0-9]+}}, 1, 0, p{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: selp.u64 %ret{{[0-9]+}}, 1, 0, %p{{[0-9]+}};
+; CHECK: ret;
%a = zext i1 %x to i64
ret i64 %a
}
define ptx_device i64 @cvt_i64_i16(i16 %x) {
-; CHECK: cvt.u64.u16 rd{{[0-9]+}}, rh{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.u64.u16 %ret{{[0-9]+}}, %rh{{[0-9]+}};
+; CHECK: ret;
%a = zext i16 %x to i64
ret i64 %a
}
define ptx_device i64 @cvt_i64_i32(i32 %x) {
-; CHECK: cvt.u64.u32 rd{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.u64.u32 %ret{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: ret;
%a = zext i32 %x to i64
ret i64 %a
}
define ptx_device i64 @cvt_i64_f32(float %x) {
-; CHECK: cvt.rzi.u64.f32 rd{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.rzi.u64.f32 %ret{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
%a = fptoui float %x to i64
ret i64 %a
}
define ptx_device i64 @cvt_i64_f64(double %x) {
-; CHECK: cvt.rzi.u64.f64 rd{{[0-9]+}}, rd{{[0-9]+}};
+; CHECK: cvt.rzi.u64.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}};
; CHECK: ret;
%a = fptoui double %x to i64
ret i64 %a
@@ -172,73 +172,119 @@ define ptx_device i64 @cvt_i64_f64(double %x) {
; f32
define ptx_device float @cvt_f32_preds(i1 %x) {
-; CHECK: selp.f32 r{{[0-9]+}}, 0F3F800000, 0F00000000, p{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: mov.b32 %f0, 1065353216;
+; CHECK: mov.b32 %f1, 0;
+; CHECK: selp.f32 %ret{{[0-9]+}}, %f0, %f1, %p{{[0-9]+}};
+; CHECK: ret;
%a = uitofp i1 %x to float
ret float %a
}
define ptx_device float @cvt_f32_i16(i16 %x) {
-; CHECK: cvt.rn.f32.u16 r{{[0-9]+}}, rh{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.rn.f32.u16 %ret{{[0-9]+}}, %rh{{[0-9]+}};
+; CHECK: ret;
%a = uitofp i16 %x to float
ret float %a
}
define ptx_device float @cvt_f32_i32(i32 %x) {
-; CHECK: cvt.rn.f32.u32 r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.rn.f32.u32 %ret{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: ret;
%a = uitofp i32 %x to float
ret float %a
}
define ptx_device float @cvt_f32_i64(i64 %x) {
-; CHECK: cvt.rn.f32.u64 r{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.rn.f32.u64 %ret{{[0-9]+}}, %rd{{[0-9]+}};
+; CHECK: ret;
%a = uitofp i64 %x to float
ret float %a
}
define ptx_device float @cvt_f32_f64(double %x) {
-; CHECK: cvt.rn.f32.f64 r{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.rn.f32.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
%a = fptrunc double %x to float
ret float %a
}
+define ptx_device float @cvt_f32_s16(i16 %x) {
+; CHECK: cvt.rn.f32.s16 %ret{{[0-9]+}}, %rh{{[0-9]+}}
+; CHECK: ret
+ %a = sitofp i16 %x to float
+ ret float %a
+}
+
+define ptx_device float @cvt_f32_s32(i32 %x) {
+; CHECK: cvt.rn.f32.s32 %ret{{[0-9]+}}, %r{{[0-9]+}}
+; CHECK: ret
+ %a = sitofp i32 %x to float
+ ret float %a
+}
+
+define ptx_device float @cvt_f32_s64(i64 %x) {
+; CHECK: cvt.rn.f32.s64 %ret{{[0-9]+}}, %rd{{[0-9]+}}
+; CHECK: ret
+ %a = sitofp i64 %x to float
+ ret float %a
+}
+
; f64
define ptx_device double @cvt_f64_preds(i1 %x) {
-; CHECK: selp.f64 rd{{[0-9]+}}, 0D3F80000000000000, 0D0000000000000000, p{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: mov.b64 %fd0, 4575657221408423936;
+; CHECK: mov.b64 %fd1, 0;
+; CHECK: selp.f64 %ret{{[0-9]+}}, %fd0, %fd1, %p{{[0-9]+}};
+; CHECK: ret;
%a = uitofp i1 %x to double
ret double %a
}
define ptx_device double @cvt_f64_i16(i16 %x) {
-; CHECK: cvt.rn.f64.u16 rd{{[0-9]+}}, rh{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.rn.f64.u16 %ret{{[0-9]+}}, %rh{{[0-9]+}};
+; CHECK: ret;
%a = uitofp i16 %x to double
ret double %a
}
define ptx_device double @cvt_f64_i32(i32 %x) {
-; CHECK: cvt.rn.f64.u32 rd{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.rn.f64.u32 %ret{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: ret;
%a = uitofp i32 %x to double
ret double %a
}
define ptx_device double @cvt_f64_i64(i64 %x) {
-; CHECK: cvt.rn.f64.u64 rd{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.rn.f64.u64 %ret{{[0-9]+}}, %rd{{[0-9]+}};
+; CHECK: ret;
%a = uitofp i64 %x to double
ret double %a
}
define ptx_device double @cvt_f64_f32(float %x) {
-; CHECK: cvt.f64.f32 rd{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cvt.f64.f32 %ret{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
%a = fpext float %x to double
ret double %a
}
+
+define ptx_device double @cvt_f64_s16(i16 %x) {
+; CHECK: cvt.rn.f64.s16 %ret{{[0-9]+}}, %rh{{[0-9]+}}
+; CHECK: ret
+ %a = sitofp i16 %x to double
+ ret double %a
+}
+
+define ptx_device double @cvt_f64_s32(i32 %x) {
+; CHECK: cvt.rn.f64.s32 %ret{{[0-9]+}}, %r{{[0-9]+}}
+; CHECK: ret
+ %a = sitofp i32 %x to double
+ ret double %a
+}
+
+define ptx_device double @cvt_f64_s64(i64 %x) {
+; CHECK: cvt.rn.f64.s64 %ret{{[0-9]+}}, %rd{{[0-9]+}}
+; CHECK: ret
+ %a = sitofp i64 %x to double
+ ret double %a
+}
diff --git a/test/CodeGen/PTX/fdiv-sm10.ll b/test/CodeGen/PTX/fdiv-sm10.ll
index 049d891..e1013be 100644
--- a/test/CodeGen/PTX/fdiv-sm10.ll
+++ b/test/CodeGen/PTX/fdiv-sm10.ll
@@ -1,15 +1,15 @@
; RUN: llc < %s -march=ptx32 -mattr=+sm10 | FileCheck %s
define ptx_device float @t1_f32(float %x, float %y) {
-; CHECK: div.f32 r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: div.f32 %ret{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
%a = fdiv float %x, %y
ret float %a
}
define ptx_device double @t1_f64(double %x, double %y) {
-; CHECK: div.f64 rd{{[0-9]+}}, rd{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: div.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
%a = fdiv double %x, %y
ret double %a
}
diff --git a/test/CodeGen/PTX/fdiv-sm13.ll b/test/CodeGen/PTX/fdiv-sm13.ll
index 2d95339..1afa2eb 100644
--- a/test/CodeGen/PTX/fdiv-sm13.ll
+++ b/test/CodeGen/PTX/fdiv-sm13.ll
@@ -1,15 +1,15 @@
; RUN: llc < %s -march=ptx32 -mattr=+sm13 | FileCheck %s
define ptx_device float @t1_f32(float %x, float %y) {
-; CHECK: div.rn.f32 r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: div.rn.f32 %ret{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
%a = fdiv float %x, %y
ret float %a
}
define ptx_device double @t1_f64(double %x, double %y) {
-; CHECK: div.rn.f64 rd{{[0-9]+}}, rd{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: div.rn.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
%a = fdiv double %x, %y
ret double %a
}
diff --git a/test/CodeGen/PTX/fneg.ll b/test/CodeGen/PTX/fneg.ll
index 66ca74a..2b76e63 100644
--- a/test/CodeGen/PTX/fneg.ll
+++ b/test/CodeGen/PTX/fneg.ll
@@ -1,15 +1,15 @@
; RUN: llc < %s -march=ptx32 | FileCheck %s
define ptx_device float @t1_f32(float %x) {
-; CHECK: neg.f32 r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: neg.f32 %ret{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
%y = fsub float -0.000000e+00, %x
ret float %y
}
define ptx_device double @t1_f64(double %x) {
-; CHECK: neg.f64 rd{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: neg.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
%y = fsub double -0.000000e+00, %x
ret double %y
}
diff --git a/test/CodeGen/PTX/intrinsic.ll b/test/CodeGen/PTX/intrinsic.ll
index af987d6..9f37ead 100644
--- a/test/CodeGen/PTX/intrinsic.ll
+++ b/test/CodeGen/PTX/intrinsic.ll
@@ -1,239 +1,239 @@
; RUN: llc < %s -march=ptx32 -mattr=+ptx20 | FileCheck %s
define ptx_device i32 @test_tid_x() {
-; CHECK: mov.u32 r0, %tid.x;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %tid.x;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.tid.x()
ret i32 %x
}
define ptx_device i32 @test_tid_y() {
-; CHECK: mov.u32 r0, %tid.y;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %tid.y;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.tid.y()
ret i32 %x
}
define ptx_device i32 @test_tid_z() {
-; CHECK: mov.u32 r0, %tid.z;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %tid.z;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.tid.z()
ret i32 %x
}
define ptx_device i32 @test_tid_w() {
-; CHECK: mov.u32 r0, %tid.w;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %tid.w;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.tid.w()
ret i32 %x
}
define ptx_device i32 @test_ntid_x() {
-; CHECK: mov.u32 r0, %ntid.x;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %ntid.x;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.ntid.x()
ret i32 %x
}
define ptx_device i32 @test_ntid_y() {
-; CHECK: mov.u32 r0, %ntid.y;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %ntid.y;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.ntid.y()
ret i32 %x
}
define ptx_device i32 @test_ntid_z() {
-; CHECK: mov.u32 r0, %ntid.z;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %ntid.z;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.ntid.z()
ret i32 %x
}
define ptx_device i32 @test_ntid_w() {
-; CHECK: mov.u32 r0, %ntid.w;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %ntid.w;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.ntid.w()
ret i32 %x
}
define ptx_device i32 @test_laneid() {
-; CHECK: mov.u32 r0, %laneid;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %laneid;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.laneid()
ret i32 %x
}
define ptx_device i32 @test_warpid() {
-; CHECK: mov.u32 r0, %warpid;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %warpid;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.warpid()
ret i32 %x
}
define ptx_device i32 @test_nwarpid() {
-; CHECK: mov.u32 r0, %nwarpid;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %nwarpid;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.nwarpid()
ret i32 %x
}
define ptx_device i32 @test_ctaid_x() {
-; CHECK: mov.u32 r0, %ctaid.x;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %ctaid.x;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.ctaid.x()
ret i32 %x
}
define ptx_device i32 @test_ctaid_y() {
-; CHECK: mov.u32 r0, %ctaid.y;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %ctaid.y;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.ctaid.y()
ret i32 %x
}
define ptx_device i32 @test_ctaid_z() {
-; CHECK: mov.u32 r0, %ctaid.z;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %ctaid.z;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.ctaid.z()
ret i32 %x
}
define ptx_device i32 @test_ctaid_w() {
-; CHECK: mov.u32 r0, %ctaid.w;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %ctaid.w;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.ctaid.w()
ret i32 %x
}
define ptx_device i32 @test_nctaid_x() {
-; CHECK: mov.u32 r0, %nctaid.x;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %nctaid.x;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.nctaid.x()
ret i32 %x
}
define ptx_device i32 @test_nctaid_y() {
-; CHECK: mov.u32 r0, %nctaid.y;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %nctaid.y;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.nctaid.y()
ret i32 %x
}
define ptx_device i32 @test_nctaid_z() {
-; CHECK: mov.u32 r0, %nctaid.z;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %nctaid.z;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.nctaid.z()
ret i32 %x
}
define ptx_device i32 @test_nctaid_w() {
-; CHECK: mov.u32 r0, %nctaid.w;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %nctaid.w;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.nctaid.w()
ret i32 %x
}
define ptx_device i32 @test_smid() {
-; CHECK: mov.u32 r0, %smid;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %smid;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.smid()
ret i32 %x
}
define ptx_device i32 @test_nsmid() {
-; CHECK: mov.u32 r0, %nsmid;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %nsmid;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.nsmid()
ret i32 %x
}
define ptx_device i32 @test_gridid() {
-; CHECK: mov.u32 r0, %gridid;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %gridid;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.gridid()
ret i32 %x
}
define ptx_device i32 @test_lanemask_eq() {
-; CHECK: mov.u32 r0, %lanemask_eq;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %lanemask_eq;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.lanemask.eq()
ret i32 %x
}
define ptx_device i32 @test_lanemask_le() {
-; CHECK: mov.u32 r0, %lanemask_le;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %lanemask_le;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.lanemask.le()
ret i32 %x
}
define ptx_device i32 @test_lanemask_lt() {
-; CHECK: mov.u32 r0, %lanemask_lt;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %lanemask_lt;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.lanemask.lt()
ret i32 %x
}
define ptx_device i32 @test_lanemask_ge() {
-; CHECK: mov.u32 r0, %lanemask_ge;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %lanemask_ge;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.lanemask.ge()
ret i32 %x
}
define ptx_device i32 @test_lanemask_gt() {
-; CHECK: mov.u32 r0, %lanemask_gt;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %lanemask_gt;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.lanemask.gt()
ret i32 %x
}
define ptx_device i32 @test_clock() {
-; CHECK: mov.u32 r0, %clock;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %clock;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.clock()
ret i32 %x
}
define ptx_device i64 @test_clock64() {
-; CHECK: mov.u64 rd0, %clock64;
-; CHECK-NEXT: ret;
+; CHECK: mov.u64 %ret0, %clock64;
+; CHECK: ret;
%x = call i64 @llvm.ptx.read.clock64()
ret i64 %x
}
define ptx_device i32 @test_pm0() {
-; CHECK: mov.u32 r0, %pm0;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %pm0;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.pm0()
ret i32 %x
}
define ptx_device i32 @test_pm1() {
-; CHECK: mov.u32 r0, %pm1;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %pm1;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.pm1()
ret i32 %x
}
define ptx_device i32 @test_pm2() {
-; CHECK: mov.u32 r0, %pm2;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %pm2;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.pm2()
ret i32 %x
}
define ptx_device i32 @test_pm3() {
-; CHECK: mov.u32 r0, %pm3;
-; CHECK-NEXT: ret;
+; CHECK: mov.u32 %ret0, %pm3;
+; CHECK: ret;
%x = call i32 @llvm.ptx.read.pm3()
ret i32 %x
}
define ptx_device void @test_bar_sync() {
; CHECK: bar.sync 0
-; CHECK-NEXT: ret;
+; CHECK: ret;
call void @llvm.ptx.bar.sync(i32 0)
ret void
}
diff --git a/test/CodeGen/PTX/ld.ll b/test/CodeGen/PTX/ld.ll
index d184d12..81fd33a 100644
--- a/test/CodeGen/PTX/ld.ll
+++ b/test/CodeGen/PTX/ld.ll
@@ -6,9 +6,6 @@
;CHECK: .extern .const .b8 array_constant_i16[20];
@array_constant_i16 = external addrspace(1) constant [10 x i16]
-;CHECK: .extern .local .b8 array_local_i16[20];
-@array_local_i16 = external addrspace(2) global [10 x i16]
-
;CHECK: .extern .shared .b8 array_shared_i16[20];
@array_shared_i16 = external addrspace(4) global [10 x i16]
@@ -18,9 +15,6 @@
;CHECK: .extern .const .b8 array_constant_i32[40];
@array_constant_i32 = external addrspace(1) constant [10 x i32]
-;CHECK: .extern .local .b8 array_local_i32[40];
-@array_local_i32 = external addrspace(2) global [10 x i32]
-
;CHECK: .extern .shared .b8 array_shared_i32[40];
@array_shared_i32 = external addrspace(4) global [10 x i32]
@@ -30,9 +24,6 @@
;CHECK: .extern .const .b8 array_constant_i64[80];
@array_constant_i64 = external addrspace(1) constant [10 x i64]
-;CHECK: .extern .local .b8 array_local_i64[80];
-@array_local_i64 = external addrspace(2) global [10 x i64]
-
;CHECK: .extern .shared .b8 array_shared_i64[80];
@array_shared_i64 = external addrspace(4) global [10 x i64]
@@ -42,9 +33,6 @@
;CHECK: .extern .const .b8 array_constant_float[40];
@array_constant_float = external addrspace(1) constant [10 x float]
-;CHECK: .extern .local .b8 array_local_float[40];
-@array_local_float = external addrspace(2) global [10 x float]
-
;CHECK: .extern .shared .b8 array_shared_float[40];
@array_shared_float = external addrspace(4) global [10 x float]
@@ -54,57 +42,54 @@
;CHECK: .extern .const .b8 array_constant_double[80];
@array_constant_double = external addrspace(1) constant [10 x double]
-;CHECK: .extern .local .b8 array_local_double[80];
-@array_local_double = external addrspace(2) global [10 x double]
-
;CHECK: .extern .shared .b8 array_shared_double[80];
@array_shared_double = external addrspace(4) global [10 x double]
define ptx_device i16 @t1_u16(i16* %p) {
entry:
-;CHECK: ld.global.u16 rh{{[0-9]+}}, [r{{[0-9]+}}];
-;CHECK-NEXT: ret;
+;CHECK: ld.global.u16 %ret{{[0-9]+}}, [%r{{[0-9]+}}];
+;CHECK: ret;
%x = load i16* %p
ret i16 %x
}
define ptx_device i32 @t1_u32(i32* %p) {
entry:
-;CHECK: ld.global.u32 r{{[0-9]+}}, [r{{[0-9]+}}];
-;CHECK-NEXT: ret;
+;CHECK: ld.global.u32 %ret{{[0-9]+}}, [%r{{[0-9]+}}];
+;CHECK: ret;
%x = load i32* %p
ret i32 %x
}
define ptx_device i64 @t1_u64(i64* %p) {
entry:
-;CHECK: ld.global.u64 rd{{[0-9]+}}, [r{{[0-9]+}}];
-;CHECK-NEXT: ret;
+;CHECK: ld.global.u64 %ret{{[0-9]+}}, [%r{{[0-9]+}}];
+;CHECK: ret;
%x = load i64* %p
ret i64 %x
}
define ptx_device float @t1_f32(float* %p) {
entry:
-;CHECK: ld.global.f32 r{{[0-9]+}}, [r{{[0-9]+}}];
-;CHECK-NEXT: ret;
+;CHECK: ld.global.f32 %ret{{[0-9]+}}, [%r{{[0-9]+}}];
+;CHECK: ret;
%x = load float* %p
ret float %x
}
define ptx_device double @t1_f64(double* %p) {
entry:
-;CHECK: ld.global.f64 rd{{[0-9]+}}, [r{{[0-9]+}}];
-;CHECK-NEXT: ret;
+;CHECK: ld.global.f64 %ret{{[0-9]+}}, [%r{{[0-9]+}}];
+;CHECK: ret;
%x = load double* %p
ret double %x
}
define ptx_device i16 @t2_u16(i16* %p) {
entry:
-;CHECK: ld.global.u16 rh{{[0-9]+}}, [r{{[0-9]+}}+2];
-;CHECK-NEXT: ret;
+;CHECK: ld.global.u16 %ret{{[0-9]+}}, [%r{{[0-9]+}}+2];
+;CHECK: ret;
%i = getelementptr i16* %p, i32 1
%x = load i16* %i
ret i16 %x
@@ -112,8 +97,8 @@ entry:
define ptx_device i32 @t2_u32(i32* %p) {
entry:
-;CHECK: ld.global.u32 r{{[0-9]+}}, [r{{[0-9]+}}+4];
-;CHECK-NEXT: ret;
+;CHECK: ld.global.u32 %ret{{[0-9]+}}, [%r{{[0-9]+}}+4];
+;CHECK: ret;
%i = getelementptr i32* %p, i32 1
%x = load i32* %i
ret i32 %x
@@ -121,8 +106,8 @@ entry:
define ptx_device i64 @t2_u64(i64* %p) {
entry:
-;CHECK: ld.global.u64 rd{{[0-9]+}}, [r{{[0-9]+}}+8];
-;CHECK-NEXT: ret;
+;CHECK: ld.global.u64 %ret{{[0-9]+}}, [%r{{[0-9]+}}+8];
+;CHECK: ret;
%i = getelementptr i64* %p, i32 1
%x = load i64* %i
ret i64 %x
@@ -130,8 +115,8 @@ entry:
define ptx_device float @t2_f32(float* %p) {
entry:
-;CHECK: ld.global.f32 r{{[0-9]+}}, [r{{[0-9]+}}+4];
-;CHECK-NEXT: ret;
+;CHECK: ld.global.f32 %ret{{[0-9]+}}, [%r{{[0-9]+}}+4];
+;CHECK: ret;
%i = getelementptr float* %p, i32 1
%x = load float* %i
ret float %x
@@ -139,8 +124,8 @@ entry:
define ptx_device double @t2_f64(double* %p) {
entry:
-;CHECK: ld.global.f64 rd{{[0-9]+}}, [r{{[0-9]+}}+8];
-;CHECK-NEXT: ret;
+;CHECK: ld.global.f64 %ret{{[0-9]+}}, [%r{{[0-9]+}}+8];
+;CHECK: ret;
%i = getelementptr double* %p, i32 1
%x = load double* %i
ret double %x
@@ -148,9 +133,9 @@ entry:
define ptx_device i16 @t3_u16(i16* %p, i32 %q) {
entry:
-;CHECK: shl.b32 r[[R0:[0-9]+]], r{{[0-9]+}}, 1;
-;CHECK-NEXT: add.u32 r[[R0]], r{{[0-9]+}}, r[[R0]];
-;CHECK-NEXT: ld.global.u16 rh{{[0-9]+}}, [r[[R0]]];
+;CHECK: shl.b32 %r[[R0:[0-9]+]], %r{{[0-9]+}}, 1;
+;CHECK: add.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r[[R0]];
+;CHECK: ld.global.u16 %ret{{[0-9]+}}, [%r{{[0-9]+}}];
%i = getelementptr i16* %p, i32 %q
%x = load i16* %i
ret i16 %x
@@ -158,9 +143,9 @@ entry:
define ptx_device i32 @t3_u32(i32* %p, i32 %q) {
entry:
-;CHECK: shl.b32 r[[R0:[0-9]+]], r{{[0-9]+}}, 2;
-;CHECK-NEXT: add.u32 r[[R0]], r{{[0-9]+}}, r[[R0]];
-;CHECK-NEXT: ld.global.u32 r{{[0-9]+}}, [r[[R0]]];
+;CHECK: shl.b32 %r[[R0:[0-9]+]], %r{{[0-9]+}}, 2;
+;CHECK: add.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r[[R0]];
+;CHECK: ld.global.u32 %ret{{[0-9]+}}, [%r{{[0-9]+}}];
%i = getelementptr i32* %p, i32 %q
%x = load i32* %i
ret i32 %x
@@ -168,9 +153,9 @@ entry:
define ptx_device i64 @t3_u64(i64* %p, i32 %q) {
entry:
-;CHECK: shl.b32 r[[R0:[0-9]+]], r{{[0-9]+}}, 3;
-;CHECK-NEXT: add.u32 r[[R0]], r{{[0-9]+}}, r[[R0]];
-;CHECK-NEXT: ld.global.u64 rd{{[0-9]+}}, [r[[R0]]];
+;CHECK: shl.b32 %r[[R0:[0-9]+]], %r{{[0-9]+}}, 3;
+;CHECK: add.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r[[R0]];
+;CHECK: ld.global.u64 %ret{{[0-9]+}}, [%r{{[0-9]+}}];
%i = getelementptr i64* %p, i32 %q
%x = load i64* %i
ret i64 %x
@@ -178,9 +163,9 @@ entry:
define ptx_device float @t3_f32(float* %p, i32 %q) {
entry:
-;CHECK: shl.b32 r[[R0:[0-9]+]], r{{[0-9]+}}, 2;
-;CHECK-NEXT: add.u32 r[[R0]], r{{[0-9]+}}, r[[R0]];
-;CHECK-NEXT: ld.global.f32 r{{[0-9]+}}, [r[[R0]]];
+;CHECK: shl.b32 %r[[R0:[0-9]+]], %r{{[0-9]+}}, 2;
+;CHECK: add.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r[[R0]];
+;CHECK: ld.global.f32 %ret{{[0-9]+}}, [%r{{[0-9]+}}];
%i = getelementptr float* %p, i32 %q
%x = load float* %i
ret float %x
@@ -188,9 +173,9 @@ entry:
define ptx_device double @t3_f64(double* %p, i32 %q) {
entry:
-;CHECK: shl.b32 r[[R0:[0-9]+]], r{{[0-9]+}}, 3;
-;CHECK-NEXT: add.u32 r[[R0]], r{{[0-9]+}}, r[[R0]];
-;CHECK-NEXT: ld.global.f64 rd{{[0-9]+}}, [r[[R0]]];
+;CHECK: shl.b32 %r[[R0:[0-9]+]], %r{{[0-9]+}}, 3;
+;CHECK: add.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r[[R0]];
+;CHECK: ld.global.f64 %ret{{[0-9]+}}, [%r{{[0-9]+}}];
%i = getelementptr double* %p, i32 %q
%x = load double* %i
ret double %x
@@ -198,9 +183,9 @@ entry:
define ptx_device i16 @t4_global_u16() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_i16;
-;CHECK-NEXT: ld.global.u16 rh{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_i16;
+;CHECK: ld.global.u16 %ret{{[0-9]+}}, [%r[[R0]]];
+;CHECK: ret;
%i = getelementptr [10 x i16]* @array_i16, i32 0, i32 0
%x = load i16* %i
ret i16 %x
@@ -208,9 +193,9 @@ entry:
define ptx_device i32 @t4_global_u32() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_i32;
-;CHECK-NEXT: ld.global.u32 r{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_i32;
+;CHECK: ld.global.u32 %ret{{[0-9]+}}, [%r[[R0]]];
+;CHECK: ret;
%i = getelementptr [10 x i32]* @array_i32, i32 0, i32 0
%x = load i32* %i
ret i32 %x
@@ -218,9 +203,9 @@ entry:
define ptx_device i64 @t4_global_u64() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_i64;
-;CHECK-NEXT: ld.global.u64 rd{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_i64;
+;CHECK: ld.global.u64 %ret{{[0-9]+}}, [%r[[R0]]];
+;CHECK: ret;
%i = getelementptr [10 x i64]* @array_i64, i32 0, i32 0
%x = load i64* %i
ret i64 %x
@@ -228,9 +213,9 @@ entry:
define ptx_device float @t4_global_f32() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_float;
-;CHECK-NEXT: ld.global.f32 r{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_float;
+;CHECK: ld.global.f32 %ret{{[0-9]+}}, [%r[[R0]]];
+;CHECK: ret;
%i = getelementptr [10 x float]* @array_float, i32 0, i32 0
%x = load float* %i
ret float %x
@@ -238,9 +223,9 @@ entry:
define ptx_device double @t4_global_f64() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_double;
-;CHECK-NEXT: ld.global.f64 rd{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_double;
+;CHECK: ld.global.f64 %ret{{[0-9]+}}, [%r[[R0]]];
+;CHECK: ret;
%i = getelementptr [10 x double]* @array_double, i32 0, i32 0
%x = load double* %i
ret double %x
@@ -248,9 +233,9 @@ entry:
define ptx_device i16 @t4_const_u16() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_constant_i16;
-;CHECK-NEXT: ld.const.u16 rh{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_constant_i16;
+;CHECK: ld.const.u16 %ret{{[0-9]+}}, [%r[[R0]]];
+;CHECK: ret;
%i = getelementptr [10 x i16] addrspace(1)* @array_constant_i16, i32 0, i32 0
%x = load i16 addrspace(1)* %i
ret i16 %x
@@ -258,9 +243,9 @@ entry:
define ptx_device i32 @t4_const_u32() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_constant_i32;
-;CHECK-NEXT: ld.const.u32 r{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_constant_i32;
+;CHECK: ld.const.u32 %ret{{[0-9]+}}, [%r[[R0]]];
+;CHECK: ret;
%i = getelementptr [10 x i32] addrspace(1)* @array_constant_i32, i32 0, i32 0
%x = load i32 addrspace(1)* %i
ret i32 %x
@@ -268,9 +253,9 @@ entry:
define ptx_device i64 @t4_const_u64() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_constant_i64;
-;CHECK-NEXT: ld.const.u64 rd{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_constant_i64;
+;CHECK: ld.const.u64 %ret{{[0-9]+}}, [%r[[R0]]];
+;CHECK: ret;
%i = getelementptr [10 x i64] addrspace(1)* @array_constant_i64, i32 0, i32 0
%x = load i64 addrspace(1)* %i
ret i64 %x
@@ -278,9 +263,9 @@ entry:
define ptx_device float @t4_const_f32() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_constant_float;
-;CHECK-NEXT: ld.const.f32 r{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_constant_float;
+;CHECK: ld.const.f32 %ret{{[0-9]+}}, [%r[[R0]]];
+;CHECK: ret;
%i = getelementptr [10 x float] addrspace(1)* @array_constant_float, i32 0, i32 0
%x = load float addrspace(1)* %i
ret float %x
@@ -288,69 +273,19 @@ entry:
define ptx_device double @t4_const_f64() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_constant_double;
-;CHECK-NEXT: ld.const.f64 rd{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_constant_double;
+;CHECK: ld.const.f64 %ret{{[0-9]+}}, [%r[[R0]]];
+;CHECK: ret;
%i = getelementptr [10 x double] addrspace(1)* @array_constant_double, i32 0, i32 0
%x = load double addrspace(1)* %i
ret double %x
}
-define ptx_device i16 @t4_local_u16() {
-entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_local_i16;
-;CHECK-NEXT: ld.local.u16 rh{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
- %i = getelementptr [10 x i16] addrspace(2)* @array_local_i16, i32 0, i32 0
- %x = load i16 addrspace(2)* %i
- ret i16 %x
-}
-
-define ptx_device i32 @t4_local_u32() {
-entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_local_i32;
-;CHECK-NEXT: ld.local.u32 r{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
- %i = getelementptr [10 x i32] addrspace(2)* @array_local_i32, i32 0, i32 0
- %x = load i32 addrspace(2)* %i
- ret i32 %x
-}
-
-define ptx_device i64 @t4_local_u64() {
-entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_local_i64;
-;CHECK-NEXT: ld.local.u64 rd{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
- %i = getelementptr [10 x i64] addrspace(2)* @array_local_i64, i32 0, i32 0
- %x = load i64 addrspace(2)* %i
- ret i64 %x
-}
-
-define ptx_device float @t4_local_f32() {
-entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_local_float;
-;CHECK-NEXT: ld.local.f32 r{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
- %i = getelementptr [10 x float] addrspace(2)* @array_local_float, i32 0, i32 0
- %x = load float addrspace(2)* %i
- ret float %x
-}
-
-define ptx_device double @t4_local_f64() {
-entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_local_double;
-;CHECK-NEXT: ld.local.f64 rd{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
- %i = getelementptr [10 x double] addrspace(2)* @array_local_double, i32 0, i32 0
- %x = load double addrspace(2)* %i
- ret double %x
-}
-
define ptx_device i16 @t4_shared_u16() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_shared_i16;
-;CHECK-NEXT: ld.shared.u16 rh{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_shared_i16;
+;CHECK: ld.shared.u16 %ret{{[0-9]+}}, [%r[[R0]]];
+;CHECK: ret;
%i = getelementptr [10 x i16] addrspace(4)* @array_shared_i16, i32 0, i32 0
%x = load i16 addrspace(4)* %i
ret i16 %x
@@ -358,9 +293,9 @@ entry:
define ptx_device i32 @t4_shared_u32() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_shared_i32;
-;CHECK-NEXT: ld.shared.u32 r{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_shared_i32;
+;CHECK: ld.shared.u32 %ret{{[0-9]+}}, [%r[[R0]]];
+;CHECK: ret;
%i = getelementptr [10 x i32] addrspace(4)* @array_shared_i32, i32 0, i32 0
%x = load i32 addrspace(4)* %i
ret i32 %x
@@ -368,9 +303,9 @@ entry:
define ptx_device i64 @t4_shared_u64() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_shared_i64;
-;CHECK-NEXT: ld.shared.u64 rd{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_shared_i64;
+;CHECK: ld.shared.u64 %ret{{[0-9]+}}, [%r[[R0]]];
+;CHECK: ret;
%i = getelementptr [10 x i64] addrspace(4)* @array_shared_i64, i32 0, i32 0
%x = load i64 addrspace(4)* %i
ret i64 %x
@@ -378,9 +313,9 @@ entry:
define ptx_device float @t4_shared_f32() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_shared_float;
-;CHECK-NEXT: ld.shared.f32 r{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_shared_float;
+;CHECK: ld.shared.f32 %ret{{[0-9]+}}, [%r[[R0]]];
+;CHECK: ret;
%i = getelementptr [10 x float] addrspace(4)* @array_shared_float, i32 0, i32 0
%x = load float addrspace(4)* %i
ret float %x
@@ -388,9 +323,9 @@ entry:
define ptx_device double @t4_shared_f64() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_shared_double;
-;CHECK-NEXT: ld.shared.f64 rd{{[0-9]+}}, [r[[R0]]];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_shared_double;
+;CHECK: ld.shared.f64 %ret{{[0-9]+}}, [%r[[R0]]];
+;CHECK: ret;
%i = getelementptr [10 x double] addrspace(4)* @array_shared_double, i32 0, i32 0
%x = load double addrspace(4)* %i
ret double %x
@@ -398,9 +333,9 @@ entry:
define ptx_device i16 @t5_u16() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_i16;
-;CHECK-NEXT: ld.global.u16 rh{{[0-9]+}}, [r[[R0]]+2];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_i16;
+;CHECK: ld.global.u16 %ret{{[0-9]+}}, [%r[[R0]]+2];
+;CHECK: ret;
%i = getelementptr [10 x i16]* @array_i16, i32 0, i32 1
%x = load i16* %i
ret i16 %x
@@ -408,9 +343,9 @@ entry:
define ptx_device i32 @t5_u32() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_i32;
-;CHECK-NEXT: ld.global.u32 r{{[0-9]+}}, [r[[R0]]+4];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_i32;
+;CHECK: ld.global.u32 %ret{{[0-9]+}}, [%r[[R0]]+4];
+;CHECK: ret;
%i = getelementptr [10 x i32]* @array_i32, i32 0, i32 1
%x = load i32* %i
ret i32 %x
@@ -418,9 +353,9 @@ entry:
define ptx_device i64 @t5_u64() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_i64;
-;CHECK-NEXT: ld.global.u64 rd{{[0-9]+}}, [r[[R0]]+8];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_i64;
+;CHECK: ld.global.u64 %ret{{[0-9]+}}, [%r[[R0]]+8];
+;CHECK: ret;
%i = getelementptr [10 x i64]* @array_i64, i32 0, i32 1
%x = load i64* %i
ret i64 %x
@@ -428,9 +363,9 @@ entry:
define ptx_device float @t5_f32() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_float;
-;CHECK-NEXT: ld.global.f32 r{{[0-9]+}}, [r[[R0]]+4];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_float;
+;CHECK: ld.global.f32 %ret{{[0-9]+}}, [%r[[R0]]+4];
+;CHECK: ret;
%i = getelementptr [10 x float]* @array_float, i32 0, i32 1
%x = load float* %i
ret float %x
@@ -438,9 +373,9 @@ entry:
define ptx_device double @t5_f64() {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_double;
-;CHECK-NEXT: ld.global.f64 rd{{[0-9]+}}, [r[[R0]]+8];
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_double;
+;CHECK: ld.global.f64 %ret{{[0-9]+}}, [%r[[R0]]+8];
+;CHECK: ret;
%i = getelementptr [10 x double]* @array_double, i32 0, i32 1
%x = load double* %i
ret double %x
diff --git a/test/CodeGen/PTX/llvm-intrinsic.ll b/test/CodeGen/PTX/llvm-intrinsic.ll
index 4611c54..e73ad25 100644
--- a/test/CodeGen/PTX/llvm-intrinsic.ll
+++ b/test/CodeGen/PTX/llvm-intrinsic.ll
@@ -2,48 +2,48 @@
define ptx_device float @test_sqrt_f32(float %x) {
entry:
-; CHECK: sqrt.rn.f32 r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: sqrt.rn.f32 %ret{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
%y = call float @llvm.sqrt.f32(float %x)
ret float %y
}
define ptx_device double @test_sqrt_f64(double %x) {
entry:
-; CHECK: sqrt.rn.f64 rd{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: sqrt.rn.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
%y = call double @llvm.sqrt.f64(double %x)
ret double %y
}
define ptx_device float @test_sin_f32(float %x) {
entry:
-; CHECK: sin.approx.f32 r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: sin.approx.f32 %ret{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
%y = call float @llvm.sin.f32(float %x)
ret float %y
}
define ptx_device double @test_sin_f64(double %x) {
entry:
-; CHECK: sin.approx.f64 rd{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: sin.approx.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
%y = call double @llvm.sin.f64(double %x)
ret double %y
}
define ptx_device float @test_cos_f32(float %x) {
entry:
-; CHECK: cos.approx.f32 r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cos.approx.f32 %ret{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
%y = call float @llvm.cos.f32(float %x)
ret float %y
}
define ptx_device double @test_cos_f64(double %x) {
entry:
-; CHECK: cos.approx.f64 rd{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: cos.approx.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
%y = call double @llvm.cos.f64(double %x)
ret double %y
}
diff --git a/test/CodeGen/PTX/mad.ll b/test/CodeGen/PTX/mad.ll
index 0e4d3f9..cc28e3f 100644
--- a/test/CodeGen/PTX/mad.ll
+++ b/test/CodeGen/PTX/mad.ll
@@ -1,16 +1,16 @@
; RUN: llc < %s -march=ptx32 -mattr=+sm13 | FileCheck %s
define ptx_device float @t1_f32(float %x, float %y, float %z) {
-; CHECK: mad.rn.f32 r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: mad.rn.f32 %ret{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
%a = fmul float %x, %y
%b = fadd float %a, %z
ret float %b
}
define ptx_device double @t1_f64(double %x, double %y, double %z) {
-; CHECK: mad.rn.f64 rd{{[0-9]+}}, rd{{[0-9]+}}, rd{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: mad.rn.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
%a = fmul double %x, %y
%b = fadd double %a, %z
ret double %b
diff --git a/test/CodeGen/PTX/mov.ll b/test/CodeGen/PTX/mov.ll
index cce6a5b..75555a7 100644
--- a/test/CodeGen/PTX/mov.ll
+++ b/test/CodeGen/PTX/mov.ll
@@ -1,62 +1,62 @@
; RUN: llc < %s -march=ptx32 | FileCheck %s
define ptx_device i16 @t1_u16() {
-; CHECK: mov.u16 rh{{[0-9]+}}, 0;
+; CHECK: mov.u16 %ret{{[0-9]+}}, 0;
; CHECK: ret;
ret i16 0
}
define ptx_device i32 @t1_u32() {
-; CHECK: mov.u32 r{{[0-9]+}}, 0;
+; CHECK: mov.u32 %ret{{[0-9]+}}, 0;
; CHECK: ret;
ret i32 0
}
define ptx_device i64 @t1_u64() {
-; CHECK: mov.u64 rd{{[0-9]+}}, 0;
+; CHECK: mov.u64 %ret{{[0-9]+}}, 0;
; CHECK: ret;
ret i64 0
}
define ptx_device float @t1_f32() {
-; CHECK: mov.f32 r{{[0-9]+}}, 0F00000000;
+; CHECK: mov.f32 %ret{{[0-9]+}}, 0D0000000000000000;
; CHECK: ret;
ret float 0.0
}
define ptx_device double @t1_f64() {
-; CHECK: mov.f64 rd{{[0-9]+}}, 0D0000000000000000;
+; CHECK: mov.f64 %ret{{[0-9]+}}, 0D0000000000000000;
; CHECK: ret;
ret double 0.0
}
define ptx_device i16 @t2_u16(i16 %x) {
-; CHECK: mov.u16 rh{{[0-9]+}}, rh{{[0-9]+}};
+; CHECK: mov.b16 %ret{{[0-9]+}}, %param{{[0-9]+}};
; CHECK: ret;
ret i16 %x
}
define ptx_device i32 @t2_u32(i32 %x) {
-; CHECK: mov.u32 r{{[0-9]+}}, r{{[0-9]+}};
+; CHECK: mov.b32 %ret{{[0-9]+}}, %param{{[0-9]+}};
; CHECK: ret;
ret i32 %x
}
define ptx_device i64 @t2_u64(i64 %x) {
-; CHECK: mov.u64 rd{{[0-9]+}}, rd{{[0-9]+}};
+; CHECK: mov.b64 %ret{{[0-9]+}}, %param{{[0-9]+}};
; CHECK: ret;
ret i64 %x
}
define ptx_device float @t3_f32(float %x) {
-; CHECK: mov.u32 r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: mov.f32 %ret{{[0-9]+}}, %param{{[0-9]+}};
+; CHECK: ret;
ret float %x
}
define ptx_device double @t3_f64(double %x) {
-; CHECK: mov.u64 rd{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: mov.f64 %ret{{[0-9]+}}, %param{{[0-9]+}};
+; CHECK: ret;
ret double %x
}
diff --git a/test/CodeGen/PTX/mul.ll b/test/CodeGen/PTX/mul.ll
index 491cc74..91949db 100644
--- a/test/CodeGen/PTX/mul.ll
+++ b/test/CodeGen/PTX/mul.ll
@@ -11,29 +11,29 @@
;}
define ptx_device float @t1_f32(float %x, float %y) {
-; CHECK: mul.rn.f32 r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}
-; CHECK-NEXT: ret;
+; CHECK: mul.rn.f32 %ret{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}
+; CHECK: ret;
%z = fmul float %x, %y
ret float %z
}
define ptx_device double @t1_f64(double %x, double %y) {
-; CHECK: mul.rn.f64 rd{{[0-9]+}}, rd{{[0-9]+}}, rd{{[0-9]+}}
-; CHECK-NEXT: ret;
+; CHECK: mul.rn.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}
+; CHECK: ret;
%z = fmul double %x, %y
ret double %z
}
define ptx_device float @t2_f32(float %x) {
-; CHECK: mul.rn.f32 r{{[0-9]+}}, r{{[0-9]+}}, 0F40A00000;
-; CHECK-NEXT: ret;
+; CHECK: mul.rn.f32 %ret{{[0-9]+}}, %f{{[0-9]+}}, 0D4014000000000000;
+; CHECK: ret;
%z = fmul float %x, 5.0
ret float %z
}
define ptx_device double @t2_f64(double %x) {
-; CHECK: mul.rn.f64 rd{{[0-9]+}}, rd{{[0-9]+}}, 0D4014000000000000;
-; CHECK-NEXT: ret;
+; CHECK: mul.rn.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}}, 0D4014000000000000;
+; CHECK: ret;
%z = fmul double %x, 5.0
ret double %z
}
diff --git a/test/CodeGen/PTX/parameter-order.ll b/test/CodeGen/PTX/parameter-order.ll
index b16556e..09015da 100644
--- a/test/CodeGen/PTX/parameter-order.ll
+++ b/test/CodeGen/PTX/parameter-order.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -march=ptx32 | FileCheck %s
-; CHECK: .func (.reg .b32 r{{[0-9]+}}) test_parameter_order (.reg .b32 r{{[0-9]+}}, .reg .b32 r{{[0-9]+}}, .reg .b32 r{{[0-9]+}}, .reg .b32 r{{[0-9]+}})
+; CHECK: .func (.reg .b32 %ret{{[0-9]+}}) test_parameter_order (.reg .b32 %param{{[0-9]+}}, .reg .b32 %param{{[0-9]+}}, .reg .b32 %param{{[0-9]+}}, .reg .b32 %param{{[0-9]+}})
define ptx_device i32 @test_parameter_order(float %a, i32 %b, i32 %c, float %d) {
-; CHECK: sub.u32 r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}
+; CHECK: sub.u32 %ret{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
%result = sub i32 %b, %c
ret i32 %result
}
diff --git a/test/CodeGen/PTX/selp.ll b/test/CodeGen/PTX/selp.ll
index e705fbe..aa7ce85 100644
--- a/test/CodeGen/PTX/selp.ll
+++ b/test/CodeGen/PTX/selp.ll
@@ -1,25 +1,25 @@
; RUN: llc < %s -march=ptx32 | FileCheck %s
define ptx_device i32 @test_selp_i32(i1 %x, i32 %y, i32 %z) {
-; CHECK: selp.u32 r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}, p{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %p{{[0-9]+}};
%a = select i1 %x, i32 %y, i32 %z
ret i32 %a
}
define ptx_device i64 @test_selp_i64(i1 %x, i64 %y, i64 %z) {
-; CHECK: selp.u64 rd{{[0-9]+}}, rd{{[0-9]+}}, rd{{[0-9]+}}, p{{[0-9]+}};
+; CHECK: selp.u64 %ret{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}, %p{{[0-9]+}};
%a = select i1 %x, i64 %y, i64 %z
ret i64 %a
}
define ptx_device float @test_selp_f32(i1 %x, float %y, float %z) {
-; CHECK: selp.f32 r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}, p{{[0-9]+}};
+; CHECK: selp.f32 %ret{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %p{{[0-9]+}};
%a = select i1 %x, float %y, float %z
ret float %a
}
define ptx_device double @test_selp_f64(i1 %x, double %y, double %z) {
-; CHECK: selp.f64 rd{{[0-9]+}}, rd{{[0-9]+}}, rd{{[0-9]+}}, p{{[0-9]+}};
+; CHECK: selp.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}, %p{{[0-9]+}};
%a = select i1 %x, double %y, double %z
ret double %a
}
diff --git a/test/CodeGen/PTX/setp.ll b/test/CodeGen/PTX/setp.ll
index e0044d6..646abab 100644
--- a/test/CodeGen/PTX/setp.ll
+++ b/test/CodeGen/PTX/setp.ll
@@ -1,190 +1,190 @@
; RUN: llc < %s -march=ptx32 | FileCheck %s
define ptx_device i32 @test_setp_eq_u32_rr(i32 %x, i32 %y) {
-; CHECK: setp.eq.u32 p[[P0:[0-9]+]], r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.eq.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp eq i32 %x, %y
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_ne_u32_rr(i32 %x, i32 %y) {
-; CHECK: setp.ne.u32 p[[P0:[0-9]+]], r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.ne.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp ne i32 %x, %y
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_lt_u32_rr(i32 %x, i32 %y) {
-; CHECK: setp.lt.u32 p[[P0:[0-9]+]], r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.lt.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp ult i32 %x, %y
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_le_u32_rr(i32 %x, i32 %y) {
-; CHECK: setp.le.u32 p[[P0:[0-9]+]], r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.le.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp ule i32 %x, %y
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_gt_u32_rr(i32 %x, i32 %y) {
-; CHECK: setp.gt.u32 p[[P0:[0-9]+]], r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.gt.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp ugt i32 %x, %y
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_ge_u32_rr(i32 %x, i32 %y) {
-; CHECK: setp.ge.u32 p[[P0:[0-9]+]], r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.ge.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp uge i32 %x, %y
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_lt_s32_rr(i32 %x, i32 %y) {
-; CHECK: setp.lt.s32 p[[P0:[0-9]+]], r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.lt.s32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp slt i32 %x, %y
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_le_s32_rr(i32 %x, i32 %y) {
-; CHECK: setp.le.s32 p[[P0:[0-9]+]], r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.le.s32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp sle i32 %x, %y
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_gt_s32_rr(i32 %x, i32 %y) {
-; CHECK: setp.gt.s32 p[[P0:[0-9]+]], r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.gt.s32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp sgt i32 %x, %y
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_ge_s32_rr(i32 %x, i32 %y) {
-; CHECK: setp.ge.s32 p[[P0:[0-9]+]], r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.ge.s32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp sge i32 %x, %y
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_eq_u32_ri(i32 %x) {
-; CHECK: setp.eq.u32 p[[P0:[0-9]+]], r{{[0-9]+}}, 1;
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.eq.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, 1;
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp eq i32 %x, 1
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_ne_u32_ri(i32 %x) {
-; CHECK: setp.ne.u32 p[[P0:[0-9]+]], r{{[0-9]+}}, 1;
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.ne.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, 1;
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp ne i32 %x, 1
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_lt_u32_ri(i32 %x) {
-; CHECK: setp.eq.u32 p[[P0:[0-9]+]], r{{[0-9]+}}, 0;
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.eq.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, 0;
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp ult i32 %x, 1
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_le_u32_ri(i32 %x) {
-; CHECK: setp.lt.u32 p[[P0:[0-9]+]], r{{[0-9]+}}, 2;
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.lt.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, 2;
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp ule i32 %x, 1
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_gt_u32_ri(i32 %x) {
-; CHECK: setp.gt.u32 p[[P0:[0-9]+]], r{{[0-9]+}}, 1;
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.gt.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, 1;
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp ugt i32 %x, 1
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_ge_u32_ri(i32 %x) {
-; CHECK: setp.ne.u32 p[[P0:[0-9]+]], r{{[0-9]+}}, 0;
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.ne.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, 0;
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp uge i32 %x, 1
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_lt_s32_ri(i32 %x) {
-; CHECK: setp.lt.s32 p[[P0:[0-9]+]], r{{[0-9]+}}, 1;
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.lt.s32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, 1;
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp slt i32 %x, 1
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_le_s32_ri(i32 %x) {
-; CHECK: setp.lt.s32 p[[P0:[0-9]+]], r{{[0-9]+}}, 2;
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.lt.s32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, 2;
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp sle i32 %x, 1
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_gt_s32_ri(i32 %x) {
-; CHECK: setp.gt.s32 p[[P0:[0-9]+]], r{{[0-9]+}}, 1;
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.gt.s32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, 1;
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp sgt i32 %x, 1
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_ge_s32_ri(i32 %x) {
-; CHECK: setp.gt.s32 p[[P0:[0-9]+]], r{{[0-9]+}}, 0;
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.gt.s32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, 0;
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p[[P0]];
+; CHECK: ret;
%p = icmp sge i32 %x, 1
%z = zext i1 %p to i32
ret i32 %z
}
define ptx_device i32 @test_setp_4_op_format_1(i32 %x, i32 %y, i32 %u, i32 %v) {
-; CHECK: setp.gt.u32 p[[P0:[0-9]+]], r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: setp.eq.and.u32 p[[P0]], r{{[0-9]+}}, r{{[0-9]+}}, p[[P0]];
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.gt.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: setp.eq.and.u32 %p1, %r{{[0-9]+}}, %r{{[0-9]+}}, %p[[P0]];
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p1;
+; CHECK: ret;
%c = icmp eq i32 %x, %y
%d = icmp ugt i32 %u, %v
%e = and i1 %c, %d
@@ -193,10 +193,10 @@ define ptx_device i32 @test_setp_4_op_format_1(i32 %x, i32 %y, i32 %u, i32 %v) {
}
define ptx_device i32 @test_setp_4_op_format_2(i32 %x, i32 %y, i32 %w) {
-; CHECK: setp.gt.u32 p[[P0:[0-9]+]], r{{[0-9]+}}, 0;
-; CHECK-NEXT: setp.eq.and.u32 p[[P0]], r{{[0-9]+}}, r{{[0-9]+}}, !p[[P0]];
-; CHECK-NEXT: selp.u32 r{{[0-9]+}}, 1, 0, p[[P0]];
-; CHECK-NEXT: ret;
+; CHECK: setp.gt.u32 %p[[P0:[0-9]+]], %r{{[0-9]+}}, 0;
+; CHECK: setp.eq.and.u32 %p1, %r{{[0-9]+}}, %r{{[0-9]+}}, !%p[[P0]];
+; CHECK: selp.u32 %ret{{[0-9]+}}, 1, 0, %p1;
+; CHECK: ret;
%c = trunc i32 %w to i1
%d = icmp eq i32 %x, %y
%e = xor i1 %c, 1
diff --git a/test/CodeGen/PTX/shl.ll b/test/CodeGen/PTX/shl.ll
index b3818e1..d9fe2cd 100644
--- a/test/CodeGen/PTX/shl.ll
+++ b/test/CodeGen/PTX/shl.ll
@@ -1,21 +1,21 @@
; RUN: llc < %s -march=ptx32 | FileCheck %s
define ptx_device i32 @t1(i32 %x, i32 %y) {
-; CHECK: shl.b32 r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}
+; CHECK: shl.b32 %ret{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
%z = shl i32 %x, %y
; CHECK: ret;
ret i32 %z
}
define ptx_device i32 @t2(i32 %x) {
-; CHECK: shl.b32 r{{[0-9]+}}, r{{[0-9]+}}, 3
+; CHECK: shl.b32 %ret{{[0-9]+}}, %r{{[0-9]+}}, 3
%z = shl i32 %x, 3
; CHECK: ret;
ret i32 %z
}
define ptx_device i32 @t3(i32 %x) {
-; CHECK: shl.b32 r{{[0-9]+}}, 3, r{{[0-9]+}}
+; CHECK: shl.b32 %ret{{[0-9]+}}, 3, %r{{[0-9]+}}
%z = shl i32 3, %x
; CHECK: ret;
ret i32 %z
diff --git a/test/CodeGen/PTX/shr.ll b/test/CodeGen/PTX/shr.ll
index cb57546..eb4666f 100644
--- a/test/CodeGen/PTX/shr.ll
+++ b/test/CodeGen/PTX/shr.ll
@@ -1,42 +1,42 @@
; RUN: llc < %s -march=ptx32 | FileCheck %s
define ptx_device i32 @t1(i32 %x, i32 %y) {
-; CHECK: shr.u32 r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}
+; CHECK: shr.u32 %ret{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
%z = lshr i32 %x, %y
; CHECK: ret;
ret i32 %z
}
define ptx_device i32 @t2(i32 %x) {
-; CHECK: shr.u32 r{{[0-9]+}}, r{{[0-9]+}}, 3
+; CHECK: shr.u32 %ret{{[0-9]+}}, %r{{[0-9]+}}, 3
%z = lshr i32 %x, 3
; CHECK: ret;
ret i32 %z
}
define ptx_device i32 @t3(i32 %x) {
-; CHECK: shr.u32 r{{[0-9]+}}, 3, r{{[0-9]+}}
+; CHECK: shr.u32 %ret{{[0-9]+}}, 3, %r{{[0-9]+}}
%z = lshr i32 3, %x
; CHECK: ret;
ret i32 %z
}
define ptx_device i32 @t4(i32 %x, i32 %y) {
-; CHECK: shr.s32 r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}
+; CHECK: shr.s32 %ret{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}
%z = ashr i32 %x, %y
; CHECK: ret;
ret i32 %z
}
define ptx_device i32 @t5(i32 %x) {
-; CHECK: shr.s32 r{{[0-9]+}}, r{{[0-9]+}}, 3
+; CHECK: shr.s32 %ret{{[0-9]+}}, %r{{[0-9]+}}, 3
%z = ashr i32 %x, 3
; CHECK: ret;
ret i32 %z
}
define ptx_device i32 @t6(i32 %x) {
-; CHECK: shr.s32 r{{[0-9]+}}, -3, r{{[0-9]+}}
+; CHECK: shr.s32 %ret{{[0-9]+}}, -3, %r{{[0-9]+}}
%z = ashr i32 -3, %x
; CHECK: ret;
ret i32 %z
diff --git a/test/CodeGen/PTX/simple-call.ll b/test/CodeGen/PTX/simple-call.ll
new file mode 100644
index 0000000..77ea29e
--- /dev/null
+++ b/test/CodeGen/PTX/simple-call.ll
@@ -0,0 +1,27 @@
+; RUN: llc < %s -march=ptx32 -mattr=sm20 | FileCheck %s
+
+define ptx_device void @test_add(float %x, float %y) {
+; CHECK: ret;
+ %z = fadd float %x, %y
+ ret void
+}
+
+define ptx_device float @test_call(float %x, float %y) {
+ %a = fadd float %x, %y
+; CHECK: call.uni test_add, (__localparam_{{[0-9]+}}, __localparam_{{[0-9]+}});
+ call void @test_add(float %a, float %y)
+ ret float %a
+}
+
+define ptx_device float @test_compute(float %x, float %y) {
+; CHECK: ret;
+ %z = fadd float %x, %y
+ ret float %z
+}
+
+define ptx_device float @test_call_compute(float %x, float %y) {
+; CHECK: call.uni (__localparam_{{[0-9]+}}), test_compute, (__localparam_{{[0-9]+}}, __localparam_{{[0-9]+}})
+ %z = call float @test_compute(float %x, float %y)
+ ret float %z
+}
+
diff --git a/test/CodeGen/PTX/st.ll b/test/CodeGen/PTX/st.ll
index b08528e..63ef58c 100644
--- a/test/CodeGen/PTX/st.ll
+++ b/test/CodeGen/PTX/st.ll
@@ -6,9 +6,6 @@
;CHECK: .extern .const .b8 array_constant_i16[20];
@array_constant_i16 = external addrspace(1) constant [10 x i16]
-;CHECK: .extern .local .b8 array_local_i16[20];
-@array_local_i16 = external addrspace(2) global [10 x i16]
-
;CHECK: .extern .shared .b8 array_shared_i16[20];
@array_shared_i16 = external addrspace(4) global [10 x i16]
@@ -18,9 +15,6 @@
;CHECK: .extern .const .b8 array_constant_i32[40];
@array_constant_i32 = external addrspace(1) constant [10 x i32]
-;CHECK: .extern .local .b8 array_local_i32[40];
-@array_local_i32 = external addrspace(2) global [10 x i32]
-
;CHECK: .extern .shared .b8 array_shared_i32[40];
@array_shared_i32 = external addrspace(4) global [10 x i32]
@@ -30,9 +24,6 @@
;CHECK: .extern .const .b8 array_constant_i64[80];
@array_constant_i64 = external addrspace(1) constant [10 x i64]
-;CHECK: .extern .local .b8 array_local_i64[80];
-@array_local_i64 = external addrspace(2) global [10 x i64]
-
;CHECK: .extern .shared .b8 array_shared_i64[80];
@array_shared_i64 = external addrspace(4) global [10 x i64]
@@ -42,9 +33,6 @@
;CHECK: .extern .const .b8 array_constant_float[40];
@array_constant_float = external addrspace(1) constant [10 x float]
-;CHECK: .extern .local .b8 array_local_float[40];
-@array_local_float = external addrspace(2) global [10 x float]
-
;CHECK: .extern .shared .b8 array_shared_float[40];
@array_shared_float = external addrspace(4) global [10 x float]
@@ -54,57 +42,54 @@
;CHECK: .extern .const .b8 array_constant_double[80];
@array_constant_double = external addrspace(1) constant [10 x double]
-;CHECK: .extern .local .b8 array_local_double[80];
-@array_local_double = external addrspace(2) global [10 x double]
-
;CHECK: .extern .shared .b8 array_shared_double[80];
@array_shared_double = external addrspace(4) global [10 x double]
define ptx_device void @t1_u16(i16* %p, i16 %x) {
entry:
-;CHECK: st.global.u16 [r{{[0-9]+}}], rh{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: st.global.u16 [%r{{[0-9]+}}], %rh{{[0-9]+}};
+;CHECK: ret;
store i16 %x, i16* %p
ret void
}
define ptx_device void @t1_u32(i32* %p, i32 %x) {
entry:
-;CHECK: st.global.u32 [r{{[0-9]+}}], r{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: st.global.u32 [%r{{[0-9]+}}], %r{{[0-9]+}};
+;CHECK: ret;
store i32 %x, i32* %p
ret void
}
define ptx_device void @t1_u64(i64* %p, i64 %x) {
entry:
-;CHECK: st.global.u64 [r{{[0-9]+}}], rd{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: st.global.u64 [%r{{[0-9]+}}], %rd{{[0-9]+}};
+;CHECK: ret;
store i64 %x, i64* %p
ret void
}
define ptx_device void @t1_f32(float* %p, float %x) {
entry:
-;CHECK: st.global.f32 [r{{[0-9]+}}], r{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: st.global.f32 [%r{{[0-9]+}}], %f{{[0-9]+}};
+;CHECK: ret;
store float %x, float* %p
ret void
}
define ptx_device void @t1_f64(double* %p, double %x) {
entry:
-;CHECK: st.global.f64 [r{{[0-9]+}}], rd{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: st.global.f64 [%r{{[0-9]+}}], %fd{{[0-9]+}};
+;CHECK: ret;
store double %x, double* %p
ret void
}
define ptx_device void @t2_u16(i16* %p, i16 %x) {
entry:
-;CHECK: st.global.u16 [r{{[0-9]+}}+2], rh{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: st.global.u16 [%r{{[0-9]+}}+2], %rh{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr i16* %p, i32 1
store i16 %x, i16* %i
ret void
@@ -112,8 +97,8 @@ entry:
define ptx_device void @t2_u32(i32* %p, i32 %x) {
entry:
-;CHECK: st.global.u32 [r{{[0-9]+}}+4], r{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: st.global.u32 [%r{{[0-9]+}}+4], %r{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr i32* %p, i32 1
store i32 %x, i32* %i
ret void
@@ -121,8 +106,8 @@ entry:
define ptx_device void @t2_u64(i64* %p, i64 %x) {
entry:
-;CHECK: st.global.u64 [r{{[0-9]+}}+8], rd{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: st.global.u64 [%r{{[0-9]+}}+8], %rd{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr i64* %p, i32 1
store i64 %x, i64* %i
ret void
@@ -130,8 +115,8 @@ entry:
define ptx_device void @t2_f32(float* %p, float %x) {
entry:
-;CHECK: st.global.f32 [r{{[0-9]+}}+4], r{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: st.global.f32 [%r{{[0-9]+}}+4], %f{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr float* %p, i32 1
store float %x, float* %i
ret void
@@ -139,8 +124,8 @@ entry:
define ptx_device void @t2_f64(double* %p, double %x) {
entry:
-;CHECK: st.global.f64 [r{{[0-9]+}}+8], rd{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: st.global.f64 [%r{{[0-9]+}}+8], %fd{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr double* %p, i32 1
store double %x, double* %i
ret void
@@ -148,10 +133,10 @@ entry:
define ptx_device void @t3_u16(i16* %p, i32 %q, i16 %x) {
entry:
-;CHECK: shl.b32 r[[R0:[0-9]+]], r{{[0-9]+}}, 1;
-;CHECK-NEXT: add.u32 r[[R0]], r{{[0-9]+}}, r[[R0]];
-;CHECK-NEXT: st.global.u16 [r[[R0]]], rh{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: shl.b32 %r[[R0:[0-9]+]], %r{{[0-9]+}}, 1;
+;CHECK: add.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r[[R0]];
+;CHECK: st.global.u16 [%r{{[0-9]+}}], %rh{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr i16* %p, i32 %q
store i16 %x, i16* %i
ret void
@@ -159,10 +144,10 @@ entry:
define ptx_device void @t3_u32(i32* %p, i32 %q, i32 %x) {
entry:
-;CHECK: shl.b32 r[[R0:[0-9]+]], r{{[0-9]+}}, 2;
-;CHECK-NEXT: add.u32 r[[R0]], r{{[0-9]+}}, r[[R0]];
-;CHECK-NEXT: st.global.u32 [r[[R0]]], r{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: shl.b32 %r[[R0:[0-9]+]], %r{{[0-9]+}}, 2;
+;CHECK: add.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r[[R0]];
+;CHECK: st.global.u32 [%r{{[0-9]+}}], %r{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr i32* %p, i32 %q
store i32 %x, i32* %i
ret void
@@ -170,10 +155,10 @@ entry:
define ptx_device void @t3_u64(i64* %p, i32 %q, i64 %x) {
entry:
-;CHECK: shl.b32 r[[R0:[0-9]+]], r{{[0-9]+}}, 3;
-;CHECK-NEXT: add.u32 r[[R0]], r{{[0-9]+}}, r[[R0]];
-;CHECK-NEXT: st.global.u64 [r[[R0]]], rd{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: shl.b32 %r[[R0:[0-9]+]], %r{{[0-9]+}}, 3;
+;CHECK: add.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r[[R0]];
+;CHECK: st.global.u64 [%r{{[0-9]+}}], %rd{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr i64* %p, i32 %q
store i64 %x, i64* %i
ret void
@@ -181,10 +166,10 @@ entry:
define ptx_device void @t3_f32(float* %p, i32 %q, float %x) {
entry:
-;CHECK: shl.b32 r[[R0:[0-9]+]], r{{[0-9]+}}, 2;
-;CHECK-NEXT: add.u32 r[[R0]], r{{[0-9]+}}, r[[R0]];
-;CHECK-NEXT: st.global.f32 [r[[R0]]], r{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: shl.b32 %r[[R0:[0-9]+]], %r{{[0-9]+}}, 2;
+;CHECK: add.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r[[R0]];
+;CHECK: st.global.f32 [%r{{[0-9]+}}], %f{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr float* %p, i32 %q
store float %x, float* %i
ret void
@@ -192,10 +177,10 @@ entry:
define ptx_device void @t3_f64(double* %p, i32 %q, double %x) {
entry:
-;CHECK: shl.b32 r[[R0:[0-9]+]], r{{[0-9]+}}, 3;
-;CHECK-NEXT: add.u32 r[[R0]], r{{[0-9]+}}, r[[R0]];
-;CHECK-NEXT: st.global.f64 [r[[R0]]], rd{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: shl.b32 %r[[R0:[0-9]+]], %r{{[0-9]+}}, 3;
+;CHECK: add.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r[[R0]];
+;CHECK: st.global.f64 [%r{{[0-9]+}}], %fd{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr double* %p, i32 %q
store double %x, double* %i
ret void
@@ -203,9 +188,9 @@ entry:
define ptx_device void @t4_global_u16(i16 %x) {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_i16;
-;CHECK-NEXT: st.global.u16 [r[[R0]]], rh{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_i16;
+;CHECK: st.global.u16 [%r[[R0]]], %rh{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr [10 x i16]* @array_i16, i16 0, i16 0
store i16 %x, i16* %i
ret void
@@ -213,9 +198,9 @@ entry:
define ptx_device void @t4_global_u32(i32 %x) {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_i32;
-;CHECK-NEXT: st.global.u32 [r[[R0]]], r{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_i32;
+;CHECK: st.global.u32 [%r[[R0]]], %r{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr [10 x i32]* @array_i32, i32 0, i32 0
store i32 %x, i32* %i
ret void
@@ -223,9 +208,9 @@ entry:
define ptx_device void @t4_global_u64(i64 %x) {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_i64;
-;CHECK-NEXT: st.global.u64 [r[[R0]]], rd{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_i64;
+;CHECK: st.global.u64 [%r[[R0]]], %rd{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr [10 x i64]* @array_i64, i32 0, i32 0
store i64 %x, i64* %i
ret void
@@ -233,9 +218,9 @@ entry:
define ptx_device void @t4_global_f32(float %x) {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_float;
-;CHECK-NEXT: st.global.f32 [r[[R0]]], r{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_float;
+;CHECK: st.global.f32 [%r[[R0]]], %f{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr [10 x float]* @array_float, i32 0, i32 0
store float %x, float* %i
ret void
@@ -243,69 +228,19 @@ entry:
define ptx_device void @t4_global_f64(double %x) {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_double;
-;CHECK-NEXT: st.global.f64 [r[[R0]]], rd{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_double;
+;CHECK: st.global.f64 [%r[[R0]]], %fd{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr [10 x double]* @array_double, i32 0, i32 0
store double %x, double* %i
ret void
}
-define ptx_device void @t4_local_u16(i16 %x) {
-entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_local_i16;
-;CHECK-NEXT: st.local.u16 [r[[R0]]], rh{{[0-9]+}};
-;CHECK-NEXT: ret;
- %i = getelementptr [10 x i16] addrspace(2)* @array_local_i16, i32 0, i32 0
- store i16 %x, i16 addrspace(2)* %i
- ret void
-}
-
-define ptx_device void @t4_local_u32(i32 %x) {
-entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_local_i32;
-;CHECK-NEXT: st.local.u32 [r[[R0]]], r{{[0-9]+}};
-;CHECK-NEXT: ret;
- %i = getelementptr [10 x i32] addrspace(2)* @array_local_i32, i32 0, i32 0
- store i32 %x, i32 addrspace(2)* %i
- ret void
-}
-
-define ptx_device void @t4_local_u64(i64 %x) {
-entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_local_i64;
-;CHECK-NEXT: st.local.u64 [r[[R0]]], rd{{[0-9]+}};
-;CHECK-NEXT: ret;
- %i = getelementptr [10 x i64] addrspace(2)* @array_local_i64, i32 0, i32 0
- store i64 %x, i64 addrspace(2)* %i
- ret void
-}
-
-define ptx_device void @t4_local_f32(float %x) {
-entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_local_float;
-;CHECK-NEXT: st.local.f32 [r[[R0]]], r{{[0-9]+}};
-;CHECK-NEXT: ret;
- %i = getelementptr [10 x float] addrspace(2)* @array_local_float, i32 0, i32 0
- store float %x, float addrspace(2)* %i
- ret void
-}
-
-define ptx_device void @t4_local_f64(double %x) {
-entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_local_double;
-;CHECK-NEXT: st.local.f64 [r[[R0]]], rd{{[0-9]+}};
-;CHECK-NEXT: ret;
- %i = getelementptr [10 x double] addrspace(2)* @array_local_double, i32 0, i32 0
- store double %x, double addrspace(2)* %i
- ret void
-}
-
define ptx_device void @t4_shared_u16(i16 %x) {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_shared_i16;
-;CHECK-NEXT: st.shared.u16 [r[[R0]]], rh{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_shared_i16;
+;CHECK: st.shared.u16 [%r[[R0]]], %rh{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr [10 x i16] addrspace(4)* @array_shared_i16, i32 0, i32 0
store i16 %x, i16 addrspace(4)* %i
ret void
@@ -313,9 +248,9 @@ entry:
define ptx_device void @t4_shared_u32(i32 %x) {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_shared_i32;
-;CHECK-NEXT: st.shared.u32 [r[[R0]]], r{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_shared_i32;
+;CHECK: st.shared.u32 [%r[[R0]]], %r{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr [10 x i32] addrspace(4)* @array_shared_i32, i32 0, i32 0
store i32 %x, i32 addrspace(4)* %i
ret void
@@ -323,9 +258,9 @@ entry:
define ptx_device void @t4_shared_u64(i64 %x) {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_shared_i64;
-;CHECK-NEXT: st.shared.u64 [r[[R0]]], rd{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_shared_i64;
+;CHECK: st.shared.u64 [%r[[R0]]], %rd{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr [10 x i64] addrspace(4)* @array_shared_i64, i32 0, i32 0
store i64 %x, i64 addrspace(4)* %i
ret void
@@ -333,9 +268,9 @@ entry:
define ptx_device void @t4_shared_f32(float %x) {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_shared_float;
-;CHECK-NEXT: st.shared.f32 [r[[R0]]], r{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_shared_float;
+;CHECK: st.shared.f32 [%r[[R0]]], %f{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr [10 x float] addrspace(4)* @array_shared_float, i32 0, i32 0
store float %x, float addrspace(4)* %i
ret void
@@ -343,9 +278,9 @@ entry:
define ptx_device void @t4_shared_f64(double %x) {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_shared_double;
-;CHECK-NEXT: st.shared.f64 [r[[R0]]], rd{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_shared_double;
+;CHECK: st.shared.f64 [%r[[R0]]], %fd{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr [10 x double] addrspace(4)* @array_shared_double, i32 0, i32 0
store double %x, double addrspace(4)* %i
ret void
@@ -353,9 +288,9 @@ entry:
define ptx_device void @t5_u16(i16 %x) {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_i16;
-;CHECK-NEXT: st.global.u16 [r[[R0]]+2], rh{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_i16;
+;CHECK: st.global.u16 [%r[[R0]]+2], %rh{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr [10 x i16]* @array_i16, i32 0, i32 1
store i16 %x, i16* %i
ret void
@@ -363,9 +298,9 @@ entry:
define ptx_device void @t5_u32(i32 %x) {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_i32;
-;CHECK-NEXT: st.global.u32 [r[[R0]]+4], r{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_i32;
+;CHECK: st.global.u32 [%r[[R0]]+4], %r{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr [10 x i32]* @array_i32, i32 0, i32 1
store i32 %x, i32* %i
ret void
@@ -373,9 +308,9 @@ entry:
define ptx_device void @t5_u64(i64 %x) {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_i64;
-;CHECK-NEXT: st.global.u64 [r[[R0]]+8], rd{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_i64;
+;CHECK: st.global.u64 [%r[[R0]]+8], %rd{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr [10 x i64]* @array_i64, i32 0, i32 1
store i64 %x, i64* %i
ret void
@@ -383,9 +318,9 @@ entry:
define ptx_device void @t5_f32(float %x) {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_float;
-;CHECK-NEXT: st.global.f32 [r[[R0]]+4], r{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_float;
+;CHECK: st.global.f32 [%r[[R0]]+4], %f{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr [10 x float]* @array_float, i32 0, i32 1
store float %x, float* %i
ret void
@@ -393,9 +328,9 @@ entry:
define ptx_device void @t5_f64(double %x) {
entry:
-;CHECK: mov.u32 r[[R0:[0-9]+]], array_double;
-;CHECK-NEXT: st.global.f64 [r[[R0]]+8], rd{{[0-9]+}};
-;CHECK-NEXT: ret;
+;CHECK: mov.u32 %r[[R0:[0-9]+]], array_double;
+;CHECK: st.global.f64 [%r[[R0]]+8], %fd{{[0-9]+}};
+;CHECK: ret;
%i = getelementptr [10 x double]* @array_double, i32 0, i32 1
store double %x, double* %i
ret void
diff --git a/test/CodeGen/PTX/stack-object.ll b/test/CodeGen/PTX/stack-object.ll
new file mode 100644
index 0000000..65f8ee2
--- /dev/null
+++ b/test/CodeGen/PTX/stack-object.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -march=ptx32 -mattr=sm20 | FileCheck %s
+
+define ptx_device float @stack1(float %a) {
+ ; CHECK: .local .align 4 .b8 __local0[4];
+ %a.2 = alloca float, align 4
+ ; CHECK: st.local.f32 [__local0], %f0
+ store float %a, float* %a.2
+ %a.3 = load float* %a.2
+ ret float %a.3
+}
+
+define ptx_device float @stack1_align8(float %a) {
+ ; CHECK: .local .align 8 .b8 __local0[4];
+ %a.2 = alloca float, align 8
+ ; CHECK: st.local.f32 [__local0], %f0
+ store float %a, float* %a.2
+ %a.3 = load float* %a.2
+ ret float %a.3
+}
diff --git a/test/CodeGen/PTX/sub.ll b/test/CodeGen/PTX/sub.ll
index acef396..7ac886a 100644
--- a/test/CodeGen/PTX/sub.ll
+++ b/test/CodeGen/PTX/sub.ll
@@ -1,71 +1,71 @@
; RUN: llc < %s -march=ptx32 | FileCheck %s
define ptx_device i16 @t1_u16(i16 %x, i16 %y) {
-; CHECK: sub.u16 rh{{[0-9]+}}, rh{{[0-9]+}}, rh{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: sub.u16 %ret{{[0-9]+}}, %rh{{[0-9]+}}, %rh{{[0-9]+}};
+; CHECK: ret;
%z = sub i16 %x, %y
ret i16 %z
}
define ptx_device i32 @t1_u32(i32 %x, i32 %y) {
-; CHECK: sub.u32 r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: sub.u32 %ret{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: ret;
%z = sub i32 %x, %y
ret i32 %z
}
define ptx_device i64 @t1_u64(i64 %x, i64 %y) {
-; CHECK: sub.u64 rd{{[0-9]+}}, rd{{[0-9]+}}, rd{{[0-9]+}};
-; CHECK-NEXT: ret;
+; CHECK: sub.u64 %ret{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}};
+; CHECK: ret;
%z = sub i64 %x, %y
ret i64 %z
}
define ptx_device float @t1_f32(float %x, float %y) {
-; CHECK: sub.rn.f32 r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}
-; CHECK-NEXT: ret;
+; CHECK: sub.rn.f32 %ret{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}
+; CHECK: ret;
%z = fsub float %x, %y
ret float %z
}
define ptx_device double @t1_f64(double %x, double %y) {
-; CHECK: sub.rn.f64 rd{{[0-9]+}}, rd{{[0-9]+}}, rd{{[0-9]+}}
-; CHECK-NEXT: ret;
+; CHECK: sub.rn.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}
+; CHECK: ret;
%z = fsub double %x, %y
ret double %z
}
define ptx_device i16 @t2_u16(i16 %x) {
-; CHECK: add.u16 rh{{[0-9]+}}, rh{{[0-9]+}}, -1;
-; CHECK-NEXT: ret;
+; CHECK: add.u16 %ret{{[0-9]+}}, %rh{{[0-9]+}}, -1;
+; CHECK: ret;
%z = sub i16 %x, 1
ret i16 %z
}
define ptx_device i32 @t2_u32(i32 %x) {
-; CHECK: add.u32 r{{[0-9]+}}, r{{[0-9]+}}, -1;
-; CHECK-NEXT: ret;
+; CHECK: add.u32 %ret{{[0-9]+}}, %r{{[0-9]+}}, -1;
+; CHECK: ret;
%z = sub i32 %x, 1
ret i32 %z
}
define ptx_device i64 @t2_u64(i64 %x) {
-; CHECK: add.u64 rd{{[0-9]+}}, rd{{[0-9]+}}, -1;
-; CHECK-NEXT: ret;
+; CHECK: add.u64 %ret{{[0-9]+}}, %rd{{[0-9]+}}, -1;
+; CHECK: ret;
%z = sub i64 %x, 1
ret i64 %z
}
define ptx_device float @t2_f32(float %x) {
-; CHECK: add.rn.f32 r{{[0-9]+}}, r{{[0-9]+}}, 0FBF800000;
-; CHECK-NEXT: ret;
+; CHECK: add.rn.f32 %ret{{[0-9]+}}, %f{{[0-9]+}}, 0DBFF0000000000000;
+; CHECK: ret;
%z = fsub float %x, 1.0
ret float %z
}
define ptx_device double @t2_f64(double %x) {
-; CHECK: add.rn.f64 rd{{[0-9]+}}, rd{{[0-9]+}}, 0DBFF0000000000000;
-; CHECK-NEXT: ret;
+; CHECK: add.rn.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}}, 0DBFF0000000000000;
+; CHECK: ret;
%z = fsub double %x, 1.0
ret double %z
}
diff --git a/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll b/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
index 0c11674..ecf45ef 100644
--- a/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
+++ b/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
@@ -18,11 +18,10 @@ bb30.preheader: ; preds = %entry
br label %bb30
unwind: ; preds = %cond_true, %entry
- %eh_ptr = call i8* @llvm.eh.exception() ; <i8*> [#uses=2]
- %eh_select = call i64 (i8*, i8*, ...)* @llvm.eh.selector.i64(i8* %eh_ptr, i8* bitcast (void ()* @__gxx_personality_v0 to i8*), i8* null) ; <i64> [#uses=0]
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ catch i8* null
call void @llvm.stackrestore(i8* %tmp4)
- call void @_Unwind_Resume(i8* %eh_ptr)
- unreachable
+ resume { i8*, i32 } %exn
invcont23: ; preds = %cond_true
%tmp27 = load i64* %tmp26, align 8 ; <i64> [#uses=1]
@@ -46,14 +45,8 @@ declare i8* @llvm.stacksave() nounwind
declare void @Foo(i8**)
-declare i8* @llvm.eh.exception() nounwind
-
-declare i64 @llvm.eh.selector.i64(i8*, i8*, ...) nounwind
-
-declare void @__gxx_personality_v0()
-
-declare void @_Unwind_Resume(i8*)
-
declare void @Bar(i64, %struct.Range*)
declare void @llvm.stackrestore(i8*) nounwind
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/PowerPC/2009-01-16-DeclareISelBug.ll b/test/CodeGen/PowerPC/2009-01-16-DeclareISelBug.ll
index d49d58d..ce8e72d 100644
--- a/test/CodeGen/PowerPC/2009-01-16-DeclareISelBug.ll
+++ b/test/CodeGen/PowerPC/2009-01-16-DeclareISelBug.ll
@@ -1,13 +1,6 @@
; RUN: llc < %s -mtriple=powerpc-apple-darwin9.5
; rdar://6499616
- %llvm.dbg.anchor.type = type { i32, i32 }
- %llvm.dbg.compile_unit.type = type { i32, { }*, i32, i8*, i8*, i8* }
-@llvm.dbg.compile_units = linkonce constant %llvm.dbg.anchor.type { i32 458752, i32 17 } ; <%llvm.dbg.anchor.type*> [#uses=1]
-@.str = internal constant [11 x i8] c"testcase.c\00" ; <[11 x i8]*> [#uses=1]
-@.str1 = internal constant [30 x i8] c"/Volumes/SandBox/NightlyTest/\00" ; <[30 x i8]*> [#uses=1]
-@.str2 = internal constant [57 x i8] c"4.2.1 (Based on Apple Inc. build 5628) (LLVM build 9999)\00" ; <[57 x i8]*> [#uses=1]
-@llvm.dbg.compile_unit = internal constant %llvm.dbg.compile_unit.type { i32 458769, { }* bitcast (%llvm.dbg.anchor.type* @llvm.dbg.compile_units to { }*), i32 1, i8* getelementptr ([11 x i8]* @.str, i32 0, i32 0), i8* getelementptr ([30 x i8]* @.str1, i32 0, i32 0), i8* getelementptr ([57 x i8]* @.str2, i32 0, i32 0) } ; <%llvm.dbg.compile_unit.type*> [#uses=0]
@"\01LC" = internal constant [13 x i8] c"conftest.val\00" ; <[13 x i8]*> [#uses=1]
define i32 @main() nounwind {
diff --git a/test/CodeGen/PowerPC/2009-08-23-linkerprivate.ll b/test/CodeGen/PowerPC/2009-08-23-linkerprivate.ll
index 0bde2d5..ae2acd4 100644
--- a/test/CodeGen/PowerPC/2009-08-23-linkerprivate.ll
+++ b/test/CodeGen/PowerPC/2009-08-23-linkerprivate.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=ppc32 -mtriple=ppc-apple-darwin | FileCheck %s
+; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin | FileCheck %s
; ModuleID = '/Volumes/MacOS9/tests/WebKit/JavaScriptCore/profiler/ProfilerServer.mm'
diff --git a/test/CodeGen/PowerPC/Atomics-32.ll b/test/CodeGen/PowerPC/Atomics-32.ll
index 03905a3..64f1495 100644
--- a/test/CodeGen/PowerPC/Atomics-32.ll
+++ b/test/CodeGen/PowerPC/Atomics-32.ll
@@ -1,749 +1,699 @@
; RUN: llc < %s -march=ppc32
-; ModuleID = 'Atomics.c'
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
target triple = "powerpc-apple-darwin9"
-@sc = common global i8 0 ; <i8*> [#uses=52]
-@uc = common global i8 0 ; <i8*> [#uses=100]
-@ss = common global i16 0 ; <i16*> [#uses=15]
-@us = common global i16 0 ; <i16*> [#uses=15]
-@si = common global i32 0 ; <i32*> [#uses=15]
-@ui = common global i32 0 ; <i32*> [#uses=23]
-@sl = common global i32 0 ; <i32*> [#uses=15]
-@ul = common global i32 0 ; <i32*> [#uses=15]
-@sll = common global i64 0, align 8 ; <i64*> [#uses=1]
-@ull = common global i64 0, align 8 ; <i64*> [#uses=1]
+
+@sc = common global i8 0
+@uc = common global i8 0
+@ss = common global i16 0
+@us = common global i16 0
+@si = common global i32 0
+@ui = common global i32 0
+@sl = common global i32 0
+@ul = common global i32 0
+@sll = common global i64 0, align 8
+@ull = common global i64 0, align 8
define void @test_op_ignore() nounwind {
entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=0]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 1 ) ; <i32>:11 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 1 ) ; <i32>:13 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:14 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:15 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 1 ) ; <i16>:17 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 1 ) ; <i16>:19 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 1 ) ; <i32>:21 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 1 ) ; <i32>:23 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 1 ) ; <i32>:25 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:26 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 1 ) ; <i32>:27 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:28 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:29 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 1 ) ; <i16>:31 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 1 ) ; <i16>:33 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 1 ) ; <i32>:35 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 1 ) ; <i32>:37 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:38 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 1 ) ; <i32>:39 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:40 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 1 ) ; <i32>:41 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:42 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:43 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 1 ) ; <i16>:45 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 1 ) ; <i16>:47 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 1 ) ; <i32>:49 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 1 ) ; <i32>:51 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:52 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 1 ) ; <i32>:53 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 1 ) ; <i32>:55 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:56 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:57 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 1 ) ; <i16>:59 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 1 ) ; <i16>:61 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 1 ) ; <i32>:63 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 1 ) ; <i32>:65 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:66 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 1 ) ; <i32>:67 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:68 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 1 ) ; <i32>:69 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:70 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:71 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 1 ) ; <i16>:73 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 1 ) ; <i16>:75 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 1 ) ; <i32>:77 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 1 ) ; <i32>:79 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:80 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 1 ) ; <i32>:81 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:82 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 1 ) ; <i32>:83 [#uses=0]
- br label %return
-
-return: ; preds = %entry
- ret void
+ %0 = atomicrmw add i8* @sc, i8 1 monotonic
+ %1 = atomicrmw add i8* @uc, i8 1 monotonic
+ %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %3 = atomicrmw add i16* %2, i16 1 monotonic
+ %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %5 = atomicrmw add i16* %4, i16 1 monotonic
+ %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %7 = atomicrmw add i32* %6, i32 1 monotonic
+ %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %9 = atomicrmw add i32* %8, i32 1 monotonic
+ %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %11 = atomicrmw add i32* %10, i32 1 monotonic
+ %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %13 = atomicrmw add i32* %12, i32 1 monotonic
+ %14 = atomicrmw sub i8* @sc, i8 1 monotonic
+ %15 = atomicrmw sub i8* @uc, i8 1 monotonic
+ %16 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %17 = atomicrmw sub i16* %16, i16 1 monotonic
+ %18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %19 = atomicrmw sub i16* %18, i16 1 monotonic
+ %20 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %21 = atomicrmw sub i32* %20, i32 1 monotonic
+ %22 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %23 = atomicrmw sub i32* %22, i32 1 monotonic
+ %24 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %25 = atomicrmw sub i32* %24, i32 1 monotonic
+ %26 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %27 = atomicrmw sub i32* %26, i32 1 monotonic
+ %28 = atomicrmw or i8* @sc, i8 1 monotonic
+ %29 = atomicrmw or i8* @uc, i8 1 monotonic
+ %30 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %31 = atomicrmw or i16* %30, i16 1 monotonic
+ %32 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %33 = atomicrmw or i16* %32, i16 1 monotonic
+ %34 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %35 = atomicrmw or i32* %34, i32 1 monotonic
+ %36 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %37 = atomicrmw or i32* %36, i32 1 monotonic
+ %38 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %39 = atomicrmw or i32* %38, i32 1 monotonic
+ %40 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %41 = atomicrmw or i32* %40, i32 1 monotonic
+ %42 = atomicrmw xor i8* @sc, i8 1 monotonic
+ %43 = atomicrmw xor i8* @uc, i8 1 monotonic
+ %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %45 = atomicrmw xor i16* %44, i16 1 monotonic
+ %46 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %47 = atomicrmw xor i16* %46, i16 1 monotonic
+ %48 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %49 = atomicrmw xor i32* %48, i32 1 monotonic
+ %50 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %51 = atomicrmw xor i32* %50, i32 1 monotonic
+ %52 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %53 = atomicrmw xor i32* %52, i32 1 monotonic
+ %54 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %55 = atomicrmw xor i32* %54, i32 1 monotonic
+ %56 = atomicrmw and i8* @sc, i8 1 monotonic
+ %57 = atomicrmw and i8* @uc, i8 1 monotonic
+ %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %59 = atomicrmw and i16* %58, i16 1 monotonic
+ %60 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %61 = atomicrmw and i16* %60, i16 1 monotonic
+ %62 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %63 = atomicrmw and i32* %62, i32 1 monotonic
+ %64 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %65 = atomicrmw and i32* %64, i32 1 monotonic
+ %66 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %67 = atomicrmw and i32* %66, i32 1 monotonic
+ %68 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %69 = atomicrmw and i32* %68, i32 1 monotonic
+ %70 = atomicrmw nand i8* @sc, i8 1 monotonic
+ %71 = atomicrmw nand i8* @uc, i8 1 monotonic
+ %72 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %73 = atomicrmw nand i16* %72, i16 1 monotonic
+ %74 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %75 = atomicrmw nand i16* %74, i16 1 monotonic
+ %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %77 = atomicrmw nand i32* %76, i32 1 monotonic
+ %78 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %79 = atomicrmw nand i32* %78, i32 1 monotonic
+ %80 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %81 = atomicrmw nand i32* %80, i32 1 monotonic
+ %82 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %83 = atomicrmw nand i32* %82, i32 1 monotonic
+ br label %return
+
+return: ; preds = %entry
+ ret void
}
-declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind
-
define void @test_fetch_and_op() nounwind {
entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 11 ) ; <i32>:11 [#uses=1]
- store i32 %11, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 11 ) ; <i32>:13 [#uses=1]
- store i32 %13, i32* @ul, align 4
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:14 [#uses=1]
- store i8 %14, i8* @sc, align 1
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:15 [#uses=1]
- store i8 %15, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 11 ) ; <i16>:17 [#uses=1]
- store i16 %17, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 11 ) ; <i16>:19 [#uses=1]
- store i16 %19, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 11 ) ; <i32>:21 [#uses=1]
- store i32 %21, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 11 ) ; <i32>:23 [#uses=1]
- store i32 %23, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 11 ) ; <i32>:25 [#uses=1]
- store i32 %25, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:26 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 11 ) ; <i32>:27 [#uses=1]
- store i32 %27, i32* @ul, align 4
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:28 [#uses=1]
- store i8 %28, i8* @sc, align 1
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:29 [#uses=1]
- store i8 %29, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 11 ) ; <i16>:31 [#uses=1]
- store i16 %31, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 11 ) ; <i16>:33 [#uses=1]
- store i16 %33, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 11 ) ; <i32>:35 [#uses=1]
- store i32 %35, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 11 ) ; <i32>:37 [#uses=1]
- store i32 %37, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:38 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 11 ) ; <i32>:39 [#uses=1]
- store i32 %39, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:40 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 11 ) ; <i32>:41 [#uses=1]
- store i32 %41, i32* @ul, align 4
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:42 [#uses=1]
- store i8 %42, i8* @sc, align 1
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:43 [#uses=1]
- store i8 %43, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 11 ) ; <i16>:45 [#uses=1]
- store i16 %45, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 11 ) ; <i16>:47 [#uses=1]
- store i16 %47, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 11 ) ; <i32>:49 [#uses=1]
- store i32 %49, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 11 ) ; <i32>:51 [#uses=1]
- store i32 %51, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:52 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 11 ) ; <i32>:53 [#uses=1]
- store i32 %53, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 11 ) ; <i32>:55 [#uses=1]
- store i32 %55, i32* @ul, align 4
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:56 [#uses=1]
- store i8 %56, i8* @sc, align 1
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:57 [#uses=1]
- store i8 %57, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 11 ) ; <i16>:59 [#uses=1]
- store i16 %59, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 11 ) ; <i16>:61 [#uses=1]
- store i16 %61, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 11 ) ; <i32>:63 [#uses=1]
- store i32 %63, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 11 ) ; <i32>:65 [#uses=1]
- store i32 %65, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:66 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 11 ) ; <i32>:67 [#uses=1]
- store i32 %67, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:68 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 11 ) ; <i32>:69 [#uses=1]
- store i32 %69, i32* @ul, align 4
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:70 [#uses=1]
- store i8 %70, i8* @sc, align 1
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:71 [#uses=1]
- store i8 %71, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 11 ) ; <i16>:73 [#uses=1]
- store i16 %73, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 11 ) ; <i16>:75 [#uses=1]
- store i16 %75, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 11 ) ; <i32>:77 [#uses=1]
- store i32 %77, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 11 ) ; <i32>:79 [#uses=1]
- store i32 %79, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:80 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 11 ) ; <i32>:81 [#uses=1]
- store i32 %81, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:82 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 11 ) ; <i32>:83 [#uses=1]
- store i32 %83, i32* @ul, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
+ %0 = atomicrmw add i8* @sc, i8 11 monotonic
+ store i8 %0, i8* @sc, align 1
+ %1 = atomicrmw add i8* @uc, i8 11 monotonic
+ store i8 %1, i8* @uc, align 1
+ %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %3 = atomicrmw add i16* %2, i16 11 monotonic
+ store i16 %3, i16* @ss, align 2
+ %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %5 = atomicrmw add i16* %4, i16 11 monotonic
+ store i16 %5, i16* @us, align 2
+ %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %7 = atomicrmw add i32* %6, i32 11 monotonic
+ store i32 %7, i32* @si, align 4
+ %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %9 = atomicrmw add i32* %8, i32 11 monotonic
+ store i32 %9, i32* @ui, align 4
+ %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %11 = atomicrmw add i32* %10, i32 11 monotonic
+ store i32 %11, i32* @sl, align 4
+ %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %13 = atomicrmw add i32* %12, i32 11 monotonic
+ store i32 %13, i32* @ul, align 4
+ %14 = atomicrmw sub i8* @sc, i8 11 monotonic
+ store i8 %14, i8* @sc, align 1
+ %15 = atomicrmw sub i8* @uc, i8 11 monotonic
+ store i8 %15, i8* @uc, align 1
+ %16 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %17 = atomicrmw sub i16* %16, i16 11 monotonic
+ store i16 %17, i16* @ss, align 2
+ %18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %19 = atomicrmw sub i16* %18, i16 11 monotonic
+ store i16 %19, i16* @us, align 2
+ %20 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %21 = atomicrmw sub i32* %20, i32 11 monotonic
+ store i32 %21, i32* @si, align 4
+ %22 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %23 = atomicrmw sub i32* %22, i32 11 monotonic
+ store i32 %23, i32* @ui, align 4
+ %24 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %25 = atomicrmw sub i32* %24, i32 11 monotonic
+ store i32 %25, i32* @sl, align 4
+ %26 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %27 = atomicrmw sub i32* %26, i32 11 monotonic
+ store i32 %27, i32* @ul, align 4
+ %28 = atomicrmw or i8* @sc, i8 11 monotonic
+ store i8 %28, i8* @sc, align 1
+ %29 = atomicrmw or i8* @uc, i8 11 monotonic
+ store i8 %29, i8* @uc, align 1
+ %30 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %31 = atomicrmw or i16* %30, i16 11 monotonic
+ store i16 %31, i16* @ss, align 2
+ %32 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %33 = atomicrmw or i16* %32, i16 11 monotonic
+ store i16 %33, i16* @us, align 2
+ %34 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %35 = atomicrmw or i32* %34, i32 11 monotonic
+ store i32 %35, i32* @si, align 4
+ %36 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %37 = atomicrmw or i32* %36, i32 11 monotonic
+ store i32 %37, i32* @ui, align 4
+ %38 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %39 = atomicrmw or i32* %38, i32 11 monotonic
+ store i32 %39, i32* @sl, align 4
+ %40 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %41 = atomicrmw or i32* %40, i32 11 monotonic
+ store i32 %41, i32* @ul, align 4
+ %42 = atomicrmw xor i8* @sc, i8 11 monotonic
+ store i8 %42, i8* @sc, align 1
+ %43 = atomicrmw xor i8* @uc, i8 11 monotonic
+ store i8 %43, i8* @uc, align 1
+ %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %45 = atomicrmw xor i16* %44, i16 11 monotonic
+ store i16 %45, i16* @ss, align 2
+ %46 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %47 = atomicrmw xor i16* %46, i16 11 monotonic
+ store i16 %47, i16* @us, align 2
+ %48 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %49 = atomicrmw xor i32* %48, i32 11 monotonic
+ store i32 %49, i32* @si, align 4
+ %50 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %51 = atomicrmw xor i32* %50, i32 11 monotonic
+ store i32 %51, i32* @ui, align 4
+ %52 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %53 = atomicrmw xor i32* %52, i32 11 monotonic
+ store i32 %53, i32* @sl, align 4
+ %54 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %55 = atomicrmw xor i32* %54, i32 11 monotonic
+ store i32 %55, i32* @ul, align 4
+ %56 = atomicrmw and i8* @sc, i8 11 monotonic
+ store i8 %56, i8* @sc, align 1
+ %57 = atomicrmw and i8* @uc, i8 11 monotonic
+ store i8 %57, i8* @uc, align 1
+ %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %59 = atomicrmw and i16* %58, i16 11 monotonic
+ store i16 %59, i16* @ss, align 2
+ %60 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %61 = atomicrmw and i16* %60, i16 11 monotonic
+ store i16 %61, i16* @us, align 2
+ %62 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %63 = atomicrmw and i32* %62, i32 11 monotonic
+ store i32 %63, i32* @si, align 4
+ %64 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %65 = atomicrmw and i32* %64, i32 11 monotonic
+ store i32 %65, i32* @ui, align 4
+ %66 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %67 = atomicrmw and i32* %66, i32 11 monotonic
+ store i32 %67, i32* @sl, align 4
+ %68 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %69 = atomicrmw and i32* %68, i32 11 monotonic
+ store i32 %69, i32* @ul, align 4
+ %70 = atomicrmw nand i8* @sc, i8 11 monotonic
+ store i8 %70, i8* @sc, align 1
+ %71 = atomicrmw nand i8* @uc, i8 11 monotonic
+ store i8 %71, i8* @uc, align 1
+ %72 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %73 = atomicrmw nand i16* %72, i16 11 monotonic
+ store i16 %73, i16* @ss, align 2
+ %74 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %75 = atomicrmw nand i16* %74, i16 11 monotonic
+ store i16 %75, i16* @us, align 2
+ %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %77 = atomicrmw nand i32* %76, i32 11 monotonic
+ store i32 %77, i32* @si, align 4
+ %78 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %79 = atomicrmw nand i32* %78, i32 11 monotonic
+ store i32 %79, i32* @ui, align 4
+ %80 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %81 = atomicrmw nand i32* %80, i32 11 monotonic
+ store i32 %81, i32* @sl, align 4
+ %82 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %83 = atomicrmw nand i32* %82, i32 11 monotonic
+ store i32 %83, i32* @ul, align 4
+ br label %return
+
+return: ; preds = %entry
+ ret void
}
define void @test_op_and_fetch() nounwind {
entry:
- load i8* @uc, align 1 ; <i8>:0 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %0 ) ; <i8>:1 [#uses=1]
- add i8 %1, %0 ; <i8>:2 [#uses=1]
- store i8 %2, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:3 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %3 ) ; <i8>:4 [#uses=1]
- add i8 %4, %3 ; <i8>:5 [#uses=1]
- store i8 %5, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:6 [#uses=1]
- zext i8 %6 to i16 ; <i16>:7 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:8 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %8, i16 %7 ) ; <i16>:9 [#uses=1]
- add i16 %9, %7 ; <i16>:10 [#uses=1]
- store i16 %10, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:11 [#uses=1]
- zext i8 %11 to i16 ; <i16>:12 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:13 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %13, i16 %12 ) ; <i16>:14 [#uses=1]
- add i16 %14, %12 ; <i16>:15 [#uses=1]
- store i16 %15, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:16 [#uses=1]
- zext i8 %16 to i32 ; <i32>:17 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:18 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %18, i32 %17 ) ; <i32>:19 [#uses=1]
- add i32 %19, %17 ; <i32>:20 [#uses=1]
- store i32 %20, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:21 [#uses=1]
- zext i8 %21 to i32 ; <i32>:22 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:23 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %23, i32 %22 ) ; <i32>:24 [#uses=1]
- add i32 %24, %22 ; <i32>:25 [#uses=1]
- store i32 %25, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:26 [#uses=1]
- zext i8 %26 to i32 ; <i32>:27 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:28 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %28, i32 %27 ) ; <i32>:29 [#uses=1]
- add i32 %29, %27 ; <i32>:30 [#uses=1]
- store i32 %30, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:31 [#uses=1]
- zext i8 %31 to i32 ; <i32>:32 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:33 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %33, i32 %32 ) ; <i32>:34 [#uses=1]
- add i32 %34, %32 ; <i32>:35 [#uses=1]
- store i32 %35, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:36 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %36 ) ; <i8>:37 [#uses=1]
- sub i8 %37, %36 ; <i8>:38 [#uses=1]
- store i8 %38, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:39 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %39 ) ; <i8>:40 [#uses=1]
- sub i8 %40, %39 ; <i8>:41 [#uses=1]
- store i8 %41, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:42 [#uses=1]
- zext i8 %42 to i16 ; <i16>:43 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %44, i16 %43 ) ; <i16>:45 [#uses=1]
- sub i16 %45, %43 ; <i16>:46 [#uses=1]
- store i16 %46, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:47 [#uses=1]
- zext i8 %47 to i16 ; <i16>:48 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:49 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %49, i16 %48 ) ; <i16>:50 [#uses=1]
- sub i16 %50, %48 ; <i16>:51 [#uses=1]
- store i16 %51, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:52 [#uses=1]
- zext i8 %52 to i32 ; <i32>:53 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:54 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %54, i32 %53 ) ; <i32>:55 [#uses=1]
- sub i32 %55, %53 ; <i32>:56 [#uses=1]
- store i32 %56, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:57 [#uses=1]
- zext i8 %57 to i32 ; <i32>:58 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:59 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %59, i32 %58 ) ; <i32>:60 [#uses=1]
- sub i32 %60, %58 ; <i32>:61 [#uses=1]
- store i32 %61, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:62 [#uses=1]
- zext i8 %62 to i32 ; <i32>:63 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:64 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %64, i32 %63 ) ; <i32>:65 [#uses=1]
- sub i32 %65, %63 ; <i32>:66 [#uses=1]
- store i32 %66, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:67 [#uses=1]
- zext i8 %67 to i32 ; <i32>:68 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:69 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %69, i32 %68 ) ; <i32>:70 [#uses=1]
- sub i32 %70, %68 ; <i32>:71 [#uses=1]
- store i32 %71, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:72 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %72 ) ; <i8>:73 [#uses=1]
- or i8 %73, %72 ; <i8>:74 [#uses=1]
- store i8 %74, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:75 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %75 ) ; <i8>:76 [#uses=1]
- or i8 %76, %75 ; <i8>:77 [#uses=1]
- store i8 %77, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:78 [#uses=1]
- zext i8 %78 to i16 ; <i16>:79 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:80 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %80, i16 %79 ) ; <i16>:81 [#uses=1]
- or i16 %81, %79 ; <i16>:82 [#uses=1]
- store i16 %82, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:83 [#uses=1]
- zext i8 %83 to i16 ; <i16>:84 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:85 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %85, i16 %84 ) ; <i16>:86 [#uses=1]
- or i16 %86, %84 ; <i16>:87 [#uses=1]
- store i16 %87, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:88 [#uses=1]
- zext i8 %88 to i32 ; <i32>:89 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:90 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %90, i32 %89 ) ; <i32>:91 [#uses=1]
- or i32 %91, %89 ; <i32>:92 [#uses=1]
- store i32 %92, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:93 [#uses=1]
- zext i8 %93 to i32 ; <i32>:94 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:95 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %95, i32 %94 ) ; <i32>:96 [#uses=1]
- or i32 %96, %94 ; <i32>:97 [#uses=1]
- store i32 %97, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:98 [#uses=1]
- zext i8 %98 to i32 ; <i32>:99 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:100 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %100, i32 %99 ) ; <i32>:101 [#uses=1]
- or i32 %101, %99 ; <i32>:102 [#uses=1]
- store i32 %102, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:103 [#uses=1]
- zext i8 %103 to i32 ; <i32>:104 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:105 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %105, i32 %104 ) ; <i32>:106 [#uses=1]
- or i32 %106, %104 ; <i32>:107 [#uses=1]
- store i32 %107, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:108 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %108 ) ; <i8>:109 [#uses=1]
- xor i8 %109, %108 ; <i8>:110 [#uses=1]
- store i8 %110, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:111 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %111 ) ; <i8>:112 [#uses=1]
- xor i8 %112, %111 ; <i8>:113 [#uses=1]
- store i8 %113, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:114 [#uses=1]
- zext i8 %114 to i16 ; <i16>:115 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:116 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %116, i16 %115 ) ; <i16>:117 [#uses=1]
- xor i16 %117, %115 ; <i16>:118 [#uses=1]
- store i16 %118, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:119 [#uses=1]
- zext i8 %119 to i16 ; <i16>:120 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:121 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %121, i16 %120 ) ; <i16>:122 [#uses=1]
- xor i16 %122, %120 ; <i16>:123 [#uses=1]
- store i16 %123, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:124 [#uses=1]
- zext i8 %124 to i32 ; <i32>:125 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:126 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %126, i32 %125 ) ; <i32>:127 [#uses=1]
- xor i32 %127, %125 ; <i32>:128 [#uses=1]
- store i32 %128, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:129 [#uses=1]
- zext i8 %129 to i32 ; <i32>:130 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:131 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %131, i32 %130 ) ; <i32>:132 [#uses=1]
- xor i32 %132, %130 ; <i32>:133 [#uses=1]
- store i32 %133, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:134 [#uses=1]
- zext i8 %134 to i32 ; <i32>:135 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:136 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %136, i32 %135 ) ; <i32>:137 [#uses=1]
- xor i32 %137, %135 ; <i32>:138 [#uses=1]
- store i32 %138, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:139 [#uses=1]
- zext i8 %139 to i32 ; <i32>:140 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:141 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %141, i32 %140 ) ; <i32>:142 [#uses=1]
- xor i32 %142, %140 ; <i32>:143 [#uses=1]
- store i32 %143, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:144 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %144 ) ; <i8>:145 [#uses=1]
- and i8 %145, %144 ; <i8>:146 [#uses=1]
- store i8 %146, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:147 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %147 ) ; <i8>:148 [#uses=1]
- and i8 %148, %147 ; <i8>:149 [#uses=1]
- store i8 %149, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:150 [#uses=1]
- zext i8 %150 to i16 ; <i16>:151 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:152 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %152, i16 %151 ) ; <i16>:153 [#uses=1]
- and i16 %153, %151 ; <i16>:154 [#uses=1]
- store i16 %154, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:155 [#uses=1]
- zext i8 %155 to i16 ; <i16>:156 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:157 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %157, i16 %156 ) ; <i16>:158 [#uses=1]
- and i16 %158, %156 ; <i16>:159 [#uses=1]
- store i16 %159, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:160 [#uses=1]
- zext i8 %160 to i32 ; <i32>:161 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:162 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %162, i32 %161 ) ; <i32>:163 [#uses=1]
- and i32 %163, %161 ; <i32>:164 [#uses=1]
- store i32 %164, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:165 [#uses=1]
- zext i8 %165 to i32 ; <i32>:166 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:167 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %167, i32 %166 ) ; <i32>:168 [#uses=1]
- and i32 %168, %166 ; <i32>:169 [#uses=1]
- store i32 %169, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:170 [#uses=1]
- zext i8 %170 to i32 ; <i32>:171 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:172 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %172, i32 %171 ) ; <i32>:173 [#uses=1]
- and i32 %173, %171 ; <i32>:174 [#uses=1]
- store i32 %174, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:175 [#uses=1]
- zext i8 %175 to i32 ; <i32>:176 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:177 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %177, i32 %176 ) ; <i32>:178 [#uses=1]
- and i32 %178, %176 ; <i32>:179 [#uses=1]
- store i32 %179, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:180 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %180 ) ; <i8>:181 [#uses=1]
- xor i8 %181, -1 ; <i8>:182 [#uses=1]
- and i8 %182, %180 ; <i8>:183 [#uses=1]
- store i8 %183, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:184 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %184 ) ; <i8>:185 [#uses=1]
- xor i8 %185, -1 ; <i8>:186 [#uses=1]
- and i8 %186, %184 ; <i8>:187 [#uses=1]
- store i8 %187, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:188 [#uses=1]
- zext i8 %188 to i16 ; <i16>:189 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:190 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %190, i16 %189 ) ; <i16>:191 [#uses=1]
- xor i16 %191, -1 ; <i16>:192 [#uses=1]
- and i16 %192, %189 ; <i16>:193 [#uses=1]
- store i16 %193, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:194 [#uses=1]
- zext i8 %194 to i16 ; <i16>:195 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:196 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %196, i16 %195 ) ; <i16>:197 [#uses=1]
- xor i16 %197, -1 ; <i16>:198 [#uses=1]
- and i16 %198, %195 ; <i16>:199 [#uses=1]
- store i16 %199, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:200 [#uses=1]
- zext i8 %200 to i32 ; <i32>:201 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:202 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %202, i32 %201 ) ; <i32>:203 [#uses=1]
- xor i32 %203, -1 ; <i32>:204 [#uses=1]
- and i32 %204, %201 ; <i32>:205 [#uses=1]
- store i32 %205, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:206 [#uses=1]
- zext i8 %206 to i32 ; <i32>:207 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:208 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %208, i32 %207 ) ; <i32>:209 [#uses=1]
- xor i32 %209, -1 ; <i32>:210 [#uses=1]
- and i32 %210, %207 ; <i32>:211 [#uses=1]
- store i32 %211, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:212 [#uses=1]
- zext i8 %212 to i32 ; <i32>:213 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:214 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %214, i32 %213 ) ; <i32>:215 [#uses=1]
- xor i32 %215, -1 ; <i32>:216 [#uses=1]
- and i32 %216, %213 ; <i32>:217 [#uses=1]
- store i32 %217, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:218 [#uses=1]
- zext i8 %218 to i32 ; <i32>:219 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:220 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %220, i32 %219 ) ; <i32>:221 [#uses=1]
- xor i32 %221, -1 ; <i32>:222 [#uses=1]
- and i32 %222, %219 ; <i32>:223 [#uses=1]
- store i32 %223, i32* @ul, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
+ %0 = load i8* @uc, align 1
+ %1 = atomicrmw add i8* @sc, i8 %0 monotonic
+ %2 = add i8 %1, %0
+ store i8 %2, i8* @sc, align 1
+ %3 = load i8* @uc, align 1
+ %4 = atomicrmw add i8* @uc, i8 %3 monotonic
+ %5 = add i8 %4, %3
+ store i8 %5, i8* @uc, align 1
+ %6 = load i8* @uc, align 1
+ %7 = zext i8 %6 to i16
+ %8 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %9 = atomicrmw add i16* %8, i16 %7 monotonic
+ %10 = add i16 %9, %7
+ store i16 %10, i16* @ss, align 2
+ %11 = load i8* @uc, align 1
+ %12 = zext i8 %11 to i16
+ %13 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %14 = atomicrmw add i16* %13, i16 %12 monotonic
+ %15 = add i16 %14, %12
+ store i16 %15, i16* @us, align 2
+ %16 = load i8* @uc, align 1
+ %17 = zext i8 %16 to i32
+ %18 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %19 = atomicrmw add i32* %18, i32 %17 monotonic
+ %20 = add i32 %19, %17
+ store i32 %20, i32* @si, align 4
+ %21 = load i8* @uc, align 1
+ %22 = zext i8 %21 to i32
+ %23 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %24 = atomicrmw add i32* %23, i32 %22 monotonic
+ %25 = add i32 %24, %22
+ store i32 %25, i32* @ui, align 4
+ %26 = load i8* @uc, align 1
+ %27 = zext i8 %26 to i32
+ %28 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %29 = atomicrmw add i32* %28, i32 %27 monotonic
+ %30 = add i32 %29, %27
+ store i32 %30, i32* @sl, align 4
+ %31 = load i8* @uc, align 1
+ %32 = zext i8 %31 to i32
+ %33 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %34 = atomicrmw add i32* %33, i32 %32 monotonic
+ %35 = add i32 %34, %32
+ store i32 %35, i32* @ul, align 4
+ %36 = load i8* @uc, align 1
+ %37 = atomicrmw sub i8* @sc, i8 %36 monotonic
+ %38 = sub i8 %37, %36
+ store i8 %38, i8* @sc, align 1
+ %39 = load i8* @uc, align 1
+ %40 = atomicrmw sub i8* @uc, i8 %39 monotonic
+ %41 = sub i8 %40, %39
+ store i8 %41, i8* @uc, align 1
+ %42 = load i8* @uc, align 1
+ %43 = zext i8 %42 to i16
+ %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %45 = atomicrmw sub i16* %44, i16 %43 monotonic
+ %46 = sub i16 %45, %43
+ store i16 %46, i16* @ss, align 2
+ %47 = load i8* @uc, align 1
+ %48 = zext i8 %47 to i16
+ %49 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %50 = atomicrmw sub i16* %49, i16 %48 monotonic
+ %51 = sub i16 %50, %48
+ store i16 %51, i16* @us, align 2
+ %52 = load i8* @uc, align 1
+ %53 = zext i8 %52 to i32
+ %54 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %55 = atomicrmw sub i32* %54, i32 %53 monotonic
+ %56 = sub i32 %55, %53
+ store i32 %56, i32* @si, align 4
+ %57 = load i8* @uc, align 1
+ %58 = zext i8 %57 to i32
+ %59 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %60 = atomicrmw sub i32* %59, i32 %58 monotonic
+ %61 = sub i32 %60, %58
+ store i32 %61, i32* @ui, align 4
+ %62 = load i8* @uc, align 1
+ %63 = zext i8 %62 to i32
+ %64 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %65 = atomicrmw sub i32* %64, i32 %63 monotonic
+ %66 = sub i32 %65, %63
+ store i32 %66, i32* @sl, align 4
+ %67 = load i8* @uc, align 1
+ %68 = zext i8 %67 to i32
+ %69 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %70 = atomicrmw sub i32* %69, i32 %68 monotonic
+ %71 = sub i32 %70, %68
+ store i32 %71, i32* @ul, align 4
+ %72 = load i8* @uc, align 1
+ %73 = atomicrmw or i8* @sc, i8 %72 monotonic
+ %74 = or i8 %73, %72
+ store i8 %74, i8* @sc, align 1
+ %75 = load i8* @uc, align 1
+ %76 = atomicrmw or i8* @uc, i8 %75 monotonic
+ %77 = or i8 %76, %75
+ store i8 %77, i8* @uc, align 1
+ %78 = load i8* @uc, align 1
+ %79 = zext i8 %78 to i16
+ %80 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %81 = atomicrmw or i16* %80, i16 %79 monotonic
+ %82 = or i16 %81, %79
+ store i16 %82, i16* @ss, align 2
+ %83 = load i8* @uc, align 1
+ %84 = zext i8 %83 to i16
+ %85 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %86 = atomicrmw or i16* %85, i16 %84 monotonic
+ %87 = or i16 %86, %84
+ store i16 %87, i16* @us, align 2
+ %88 = load i8* @uc, align 1
+ %89 = zext i8 %88 to i32
+ %90 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %91 = atomicrmw or i32* %90, i32 %89 monotonic
+ %92 = or i32 %91, %89
+ store i32 %92, i32* @si, align 4
+ %93 = load i8* @uc, align 1
+ %94 = zext i8 %93 to i32
+ %95 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %96 = atomicrmw or i32* %95, i32 %94 monotonic
+ %97 = or i32 %96, %94
+ store i32 %97, i32* @ui, align 4
+ %98 = load i8* @uc, align 1
+ %99 = zext i8 %98 to i32
+ %100 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %101 = atomicrmw or i32* %100, i32 %99 monotonic
+ %102 = or i32 %101, %99
+ store i32 %102, i32* @sl, align 4
+ %103 = load i8* @uc, align 1
+ %104 = zext i8 %103 to i32
+ %105 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %106 = atomicrmw or i32* %105, i32 %104 monotonic
+ %107 = or i32 %106, %104
+ store i32 %107, i32* @ul, align 4
+ %108 = load i8* @uc, align 1
+ %109 = atomicrmw xor i8* @sc, i8 %108 monotonic
+ %110 = xor i8 %109, %108
+ store i8 %110, i8* @sc, align 1
+ %111 = load i8* @uc, align 1
+ %112 = atomicrmw xor i8* @uc, i8 %111 monotonic
+ %113 = xor i8 %112, %111
+ store i8 %113, i8* @uc, align 1
+ %114 = load i8* @uc, align 1
+ %115 = zext i8 %114 to i16
+ %116 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %117 = atomicrmw xor i16* %116, i16 %115 monotonic
+ %118 = xor i16 %117, %115
+ store i16 %118, i16* @ss, align 2
+ %119 = load i8* @uc, align 1
+ %120 = zext i8 %119 to i16
+ %121 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %122 = atomicrmw xor i16* %121, i16 %120 monotonic
+ %123 = xor i16 %122, %120
+ store i16 %123, i16* @us, align 2
+ %124 = load i8* @uc, align 1
+ %125 = zext i8 %124 to i32
+ %126 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %127 = atomicrmw xor i32* %126, i32 %125 monotonic
+ %128 = xor i32 %127, %125
+ store i32 %128, i32* @si, align 4
+ %129 = load i8* @uc, align 1
+ %130 = zext i8 %129 to i32
+ %131 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %132 = atomicrmw xor i32* %131, i32 %130 monotonic
+ %133 = xor i32 %132, %130
+ store i32 %133, i32* @ui, align 4
+ %134 = load i8* @uc, align 1
+ %135 = zext i8 %134 to i32
+ %136 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %137 = atomicrmw xor i32* %136, i32 %135 monotonic
+ %138 = xor i32 %137, %135
+ store i32 %138, i32* @sl, align 4
+ %139 = load i8* @uc, align 1
+ %140 = zext i8 %139 to i32
+ %141 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %142 = atomicrmw xor i32* %141, i32 %140 monotonic
+ %143 = xor i32 %142, %140
+ store i32 %143, i32* @ul, align 4
+ %144 = load i8* @uc, align 1
+ %145 = atomicrmw and i8* @sc, i8 %144 monotonic
+ %146 = and i8 %145, %144
+ store i8 %146, i8* @sc, align 1
+ %147 = load i8* @uc, align 1
+ %148 = atomicrmw and i8* @uc, i8 %147 monotonic
+ %149 = and i8 %148, %147
+ store i8 %149, i8* @uc, align 1
+ %150 = load i8* @uc, align 1
+ %151 = zext i8 %150 to i16
+ %152 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %153 = atomicrmw and i16* %152, i16 %151 monotonic
+ %154 = and i16 %153, %151
+ store i16 %154, i16* @ss, align 2
+ %155 = load i8* @uc, align 1
+ %156 = zext i8 %155 to i16
+ %157 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %158 = atomicrmw and i16* %157, i16 %156 monotonic
+ %159 = and i16 %158, %156
+ store i16 %159, i16* @us, align 2
+ %160 = load i8* @uc, align 1
+ %161 = zext i8 %160 to i32
+ %162 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %163 = atomicrmw and i32* %162, i32 %161 monotonic
+ %164 = and i32 %163, %161
+ store i32 %164, i32* @si, align 4
+ %165 = load i8* @uc, align 1
+ %166 = zext i8 %165 to i32
+ %167 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %168 = atomicrmw and i32* %167, i32 %166 monotonic
+ %169 = and i32 %168, %166
+ store i32 %169, i32* @ui, align 4
+ %170 = load i8* @uc, align 1
+ %171 = zext i8 %170 to i32
+ %172 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %173 = atomicrmw and i32* %172, i32 %171 monotonic
+ %174 = and i32 %173, %171
+ store i32 %174, i32* @sl, align 4
+ %175 = load i8* @uc, align 1
+ %176 = zext i8 %175 to i32
+ %177 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %178 = atomicrmw and i32* %177, i32 %176 monotonic
+ %179 = and i32 %178, %176
+ store i32 %179, i32* @ul, align 4
+ %180 = load i8* @uc, align 1
+ %181 = atomicrmw nand i8* @sc, i8 %180 monotonic
+ %182 = xor i8 %181, -1
+ %183 = and i8 %182, %180
+ store i8 %183, i8* @sc, align 1
+ %184 = load i8* @uc, align 1
+ %185 = atomicrmw nand i8* @uc, i8 %184 monotonic
+ %186 = xor i8 %185, -1
+ %187 = and i8 %186, %184
+ store i8 %187, i8* @uc, align 1
+ %188 = load i8* @uc, align 1
+ %189 = zext i8 %188 to i16
+ %190 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %191 = atomicrmw nand i16* %190, i16 %189 monotonic
+ %192 = xor i16 %191, -1
+ %193 = and i16 %192, %189
+ store i16 %193, i16* @ss, align 2
+ %194 = load i8* @uc, align 1
+ %195 = zext i8 %194 to i16
+ %196 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %197 = atomicrmw nand i16* %196, i16 %195 monotonic
+ %198 = xor i16 %197, -1
+ %199 = and i16 %198, %195
+ store i16 %199, i16* @us, align 2
+ %200 = load i8* @uc, align 1
+ %201 = zext i8 %200 to i32
+ %202 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %203 = atomicrmw nand i32* %202, i32 %201 monotonic
+ %204 = xor i32 %203, -1
+ %205 = and i32 %204, %201
+ store i32 %205, i32* @si, align 4
+ %206 = load i8* @uc, align 1
+ %207 = zext i8 %206 to i32
+ %208 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %209 = atomicrmw nand i32* %208, i32 %207 monotonic
+ %210 = xor i32 %209, -1
+ %211 = and i32 %210, %207
+ store i32 %211, i32* @ui, align 4
+ %212 = load i8* @uc, align 1
+ %213 = zext i8 %212 to i32
+ %214 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %215 = atomicrmw nand i32* %214, i32 %213 monotonic
+ %216 = xor i32 %215, -1
+ %217 = and i32 %216, %213
+ store i32 %217, i32* @sl, align 4
+ %218 = load i8* @uc, align 1
+ %219 = zext i8 %218 to i32
+ %220 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %221 = atomicrmw nand i32* %220, i32 %219 monotonic
+ %222 = xor i32 %221, -1
+ %223 = and i32 %222, %219
+ store i32 %223, i32* @ul, align 4
+ br label %return
+
+return: ; preds = %entry
+ ret void
}
define void @test_compare_and_swap() nounwind {
entry:
- load i8* @uc, align 1 ; <i8>:0 [#uses=1]
- load i8* @sc, align 1 ; <i8>:1 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %0, i8 %1 ) ; <i8>:2 [#uses=1]
- store i8 %2, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:3 [#uses=1]
- load i8* @sc, align 1 ; <i8>:4 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %3, i8 %4 ) ; <i8>:5 [#uses=1]
- store i8 %5, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:6 [#uses=1]
- zext i8 %6 to i16 ; <i16>:7 [#uses=1]
- load i8* @sc, align 1 ; <i8>:8 [#uses=1]
- sext i8 %8 to i16 ; <i16>:9 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:10 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %10, i16 %7, i16 %9 ) ; <i16>:11 [#uses=1]
- store i16 %11, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:12 [#uses=1]
- zext i8 %12 to i16 ; <i16>:13 [#uses=1]
- load i8* @sc, align 1 ; <i8>:14 [#uses=1]
- sext i8 %14 to i16 ; <i16>:15 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %16, i16 %13, i16 %15 ) ; <i16>:17 [#uses=1]
- store i16 %17, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:18 [#uses=1]
- zext i8 %18 to i32 ; <i32>:19 [#uses=1]
- load i8* @sc, align 1 ; <i8>:20 [#uses=1]
- sext i8 %20 to i32 ; <i32>:21 [#uses=1]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %22, i32 %19, i32 %21 ) ; <i32>:23 [#uses=1]
- store i32 %23, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:24 [#uses=1]
- zext i8 %24 to i32 ; <i32>:25 [#uses=1]
- load i8* @sc, align 1 ; <i8>:26 [#uses=1]
- sext i8 %26 to i32 ; <i32>:27 [#uses=1]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:28 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %28, i32 %25, i32 %27 ) ; <i32>:29 [#uses=1]
- store i32 %29, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:30 [#uses=1]
- zext i8 %30 to i32 ; <i32>:31 [#uses=1]
- load i8* @sc, align 1 ; <i8>:32 [#uses=1]
- sext i8 %32 to i32 ; <i32>:33 [#uses=1]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %34, i32 %31, i32 %33 ) ; <i32>:35 [#uses=1]
- store i32 %35, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:36 [#uses=1]
- zext i8 %36 to i32 ; <i32>:37 [#uses=1]
- load i8* @sc, align 1 ; <i8>:38 [#uses=1]
- sext i8 %38 to i32 ; <i32>:39 [#uses=1]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:40 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %40, i32 %37, i32 %39 ) ; <i32>:41 [#uses=1]
- store i32 %41, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:42 [#uses=2]
- load i8* @sc, align 1 ; <i8>:43 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %42, i8 %43 ) ; <i8>:44 [#uses=1]
- icmp eq i8 %44, %42 ; <i1>:45 [#uses=1]
- zext i1 %45 to i32 ; <i32>:46 [#uses=1]
- store i32 %46, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:47 [#uses=2]
- load i8* @sc, align 1 ; <i8>:48 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %47, i8 %48 ) ; <i8>:49 [#uses=1]
- icmp eq i8 %49, %47 ; <i1>:50 [#uses=1]
- zext i1 %50 to i32 ; <i32>:51 [#uses=1]
- store i32 %51, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:52 [#uses=1]
- zext i8 %52 to i16 ; <i16>:53 [#uses=2]
- load i8* @sc, align 1 ; <i8>:54 [#uses=1]
- sext i8 %54 to i16 ; <i16>:55 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:56 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %56, i16 %53, i16 %55 ) ; <i16>:57 [#uses=1]
- icmp eq i16 %57, %53 ; <i1>:58 [#uses=1]
- zext i1 %58 to i32 ; <i32>:59 [#uses=1]
- store i32 %59, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:60 [#uses=1]
- zext i8 %60 to i16 ; <i16>:61 [#uses=2]
- load i8* @sc, align 1 ; <i8>:62 [#uses=1]
- sext i8 %62 to i16 ; <i16>:63 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:64 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %64, i16 %61, i16 %63 ) ; <i16>:65 [#uses=1]
- icmp eq i16 %65, %61 ; <i1>:66 [#uses=1]
- zext i1 %66 to i32 ; <i32>:67 [#uses=1]
- store i32 %67, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:68 [#uses=1]
- zext i8 %68 to i32 ; <i32>:69 [#uses=2]
- load i8* @sc, align 1 ; <i8>:70 [#uses=1]
- sext i8 %70 to i32 ; <i32>:71 [#uses=1]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:72 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %72, i32 %69, i32 %71 ) ; <i32>:73 [#uses=1]
- icmp eq i32 %73, %69 ; <i1>:74 [#uses=1]
- zext i1 %74 to i32 ; <i32>:75 [#uses=1]
- store i32 %75, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:76 [#uses=1]
- zext i8 %76 to i32 ; <i32>:77 [#uses=2]
- load i8* @sc, align 1 ; <i8>:78 [#uses=1]
- sext i8 %78 to i32 ; <i32>:79 [#uses=1]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:80 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %80, i32 %77, i32 %79 ) ; <i32>:81 [#uses=1]
- icmp eq i32 %81, %77 ; <i1>:82 [#uses=1]
- zext i1 %82 to i32 ; <i32>:83 [#uses=1]
- store i32 %83, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:84 [#uses=1]
- zext i8 %84 to i32 ; <i32>:85 [#uses=2]
- load i8* @sc, align 1 ; <i8>:86 [#uses=1]
- sext i8 %86 to i32 ; <i32>:87 [#uses=1]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:88 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %88, i32 %85, i32 %87 ) ; <i32>:89 [#uses=1]
- icmp eq i32 %89, %85 ; <i1>:90 [#uses=1]
- zext i1 %90 to i32 ; <i32>:91 [#uses=1]
- store i32 %91, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:92 [#uses=1]
- zext i8 %92 to i32 ; <i32>:93 [#uses=2]
- load i8* @sc, align 1 ; <i8>:94 [#uses=1]
- sext i8 %94 to i32 ; <i32>:95 [#uses=1]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:96 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %96, i32 %93, i32 %95 ) ; <i32>:97 [#uses=1]
- icmp eq i32 %97, %93 ; <i1>:98 [#uses=1]
- zext i1 %98 to i32 ; <i32>:99 [#uses=1]
- store i32 %99, i32* @ui, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
+ %0 = load i8* @uc, align 1
+ %1 = load i8* @sc, align 1
+ %2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic
+ store i8 %2, i8* @sc, align 1
+ %3 = load i8* @uc, align 1
+ %4 = load i8* @sc, align 1
+ %5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic
+ store i8 %5, i8* @uc, align 1
+ %6 = load i8* @uc, align 1
+ %7 = zext i8 %6 to i16
+ %8 = load i8* @sc, align 1
+ %9 = sext i8 %8 to i16
+ %10 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic
+ store i16 %11, i16* @ss, align 2
+ %12 = load i8* @uc, align 1
+ %13 = zext i8 %12 to i16
+ %14 = load i8* @sc, align 1
+ %15 = sext i8 %14 to i16
+ %16 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic
+ store i16 %17, i16* @us, align 2
+ %18 = load i8* @uc, align 1
+ %19 = zext i8 %18 to i32
+ %20 = load i8* @sc, align 1
+ %21 = sext i8 %20 to i32
+ %22 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic
+ store i32 %23, i32* @si, align 4
+ %24 = load i8* @uc, align 1
+ %25 = zext i8 %24 to i32
+ %26 = load i8* @sc, align 1
+ %27 = sext i8 %26 to i32
+ %28 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic
+ store i32 %29, i32* @ui, align 4
+ %30 = load i8* @uc, align 1
+ %31 = zext i8 %30 to i32
+ %32 = load i8* @sc, align 1
+ %33 = sext i8 %32 to i32
+ %34 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %35 = cmpxchg i32* %34, i32 %31, i32 %33 monotonic
+ store i32 %35, i32* @sl, align 4
+ %36 = load i8* @uc, align 1
+ %37 = zext i8 %36 to i32
+ %38 = load i8* @sc, align 1
+ %39 = sext i8 %38 to i32
+ %40 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %41 = cmpxchg i32* %40, i32 %37, i32 %39 monotonic
+ store i32 %41, i32* @ul, align 4
+ %42 = load i8* @uc, align 1
+ %43 = load i8* @sc, align 1
+ %44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic
+ %45 = icmp eq i8 %44, %42
+ %46 = zext i1 %45 to i32
+ store i32 %46, i32* @ui, align 4
+ %47 = load i8* @uc, align 1
+ %48 = load i8* @sc, align 1
+ %49 = cmpxchg i8* @uc, i8 %47, i8 %48 monotonic
+ %50 = icmp eq i8 %49, %47
+ %51 = zext i1 %50 to i32
+ store i32 %51, i32* @ui, align 4
+ %52 = load i8* @uc, align 1
+ %53 = zext i8 %52 to i16
+ %54 = load i8* @sc, align 1
+ %55 = sext i8 %54 to i16
+ %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %57 = cmpxchg i16* %56, i16 %53, i16 %55 monotonic
+ %58 = icmp eq i16 %57, %53
+ %59 = zext i1 %58 to i32
+ store i32 %59, i32* @ui, align 4
+ %60 = load i8* @uc, align 1
+ %61 = zext i8 %60 to i16
+ %62 = load i8* @sc, align 1
+ %63 = sext i8 %62 to i16
+ %64 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %65 = cmpxchg i16* %64, i16 %61, i16 %63 monotonic
+ %66 = icmp eq i16 %65, %61
+ %67 = zext i1 %66 to i32
+ store i32 %67, i32* @ui, align 4
+ %68 = load i8* @uc, align 1
+ %69 = zext i8 %68 to i32
+ %70 = load i8* @sc, align 1
+ %71 = sext i8 %70 to i32
+ %72 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %73 = cmpxchg i32* %72, i32 %69, i32 %71 monotonic
+ %74 = icmp eq i32 %73, %69
+ %75 = zext i1 %74 to i32
+ store i32 %75, i32* @ui, align 4
+ %76 = load i8* @uc, align 1
+ %77 = zext i8 %76 to i32
+ %78 = load i8* @sc, align 1
+ %79 = sext i8 %78 to i32
+ %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %81 = cmpxchg i32* %80, i32 %77, i32 %79 monotonic
+ %82 = icmp eq i32 %81, %77
+ %83 = zext i1 %82 to i32
+ store i32 %83, i32* @ui, align 4
+ %84 = load i8* @uc, align 1
+ %85 = zext i8 %84 to i32
+ %86 = load i8* @sc, align 1
+ %87 = sext i8 %86 to i32
+ %88 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %89 = cmpxchg i32* %88, i32 %85, i32 %87 monotonic
+ %90 = icmp eq i32 %89, %85
+ %91 = zext i1 %90 to i32
+ store i32 %91, i32* @ui, align 4
+ %92 = load i8* @uc, align 1
+ %93 = zext i8 %92 to i32
+ %94 = load i8* @sc, align 1
+ %95 = sext i8 %94 to i32
+ %96 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %97 = cmpxchg i32* %96, i32 %93, i32 %95 monotonic
+ %98 = icmp eq i32 %97, %93
+ %99 = zext i1 %98 to i32
+ store i32 %99, i32* @ui, align 4
+ br label %return
+
+return: ; preds = %entry
+ ret void
}
-declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind
-
-declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind
-
-declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
-
define void @test_lock() nounwind {
entry:
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %10, i32 1 ) ; <i32>:11 [#uses=1]
- store i32 %11, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %12, i32 1 ) ; <i32>:13 [#uses=1]
- store i32 %13, i32* @ul, align 4
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false )
- volatile store i8 0, i8* @sc, align 1
- volatile store i8 0, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:14 [#uses=1]
- volatile store i16 0, i16* %14, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:15 [#uses=1]
- volatile store i16 0, i16* %15, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:16 [#uses=1]
- volatile store i32 0, i32* %16, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:17 [#uses=1]
- volatile store i32 0, i32* %17, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:18 [#uses=1]
- volatile store i32 0, i32* %18, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:19 [#uses=1]
- volatile store i32 0, i32* %19, align 4
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:20 [#uses=1]
- volatile store i64 0, i64* %20, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:21 [#uses=1]
- volatile store i64 0, i64* %21, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
+ %0 = atomicrmw xchg i8* @sc, i8 1 monotonic
+ store i8 %0, i8* @sc, align 1
+ %1 = atomicrmw xchg i8* @uc, i8 1 monotonic
+ store i8 %1, i8* @uc, align 1
+ %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %3 = atomicrmw xchg i16* %2, i16 1 monotonic
+ store i16 %3, i16* @ss, align 2
+ %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %5 = atomicrmw xchg i16* %4, i16 1 monotonic
+ store i16 %5, i16* @us, align 2
+ %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %7 = atomicrmw xchg i32* %6, i32 1 monotonic
+ store i32 %7, i32* @si, align 4
+ %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %9 = atomicrmw xchg i32* %8, i32 1 monotonic
+ store i32 %9, i32* @ui, align 4
+ %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ %11 = atomicrmw xchg i32* %10, i32 1 monotonic
+ store i32 %11, i32* @sl, align 4
+ %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ %13 = atomicrmw xchg i32* %12, i32 1 monotonic
+ store i32 %13, i32* @ul, align 4
+ fence seq_cst
+ store volatile i8 0, i8* @sc, align 1
+ store volatile i8 0, i8* @uc, align 1
+ %14 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ store volatile i16 0, i16* %14, align 2
+ %15 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ store volatile i16 0, i16* %15, align 2
+ %16 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ store volatile i32 0, i32* %16, align 4
+ %17 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ store volatile i32 0, i32* %17, align 4
+ %18 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
+ store volatile i32 0, i32* %18, align 4
+ %19 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
+ store volatile i32 0, i32* %19, align 4
+ %20 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ store volatile i64 0, i64* %20, align 8
+ %21 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ store volatile i64 0, i64* %21, align 8
+ br label %return
+
+return: ; preds = %entry
+ ret void
}
-
-declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
-
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
diff --git a/test/CodeGen/PowerPC/Atomics-64.ll b/test/CodeGen/PowerPC/Atomics-64.ll
index cfc1eb9..d35b848 100644
--- a/test/CodeGen/PowerPC/Atomics-64.ll
+++ b/test/CodeGen/PowerPC/Atomics-64.ll
@@ -8,772 +8,707 @@
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
target triple = "powerpc64-apple-darwin9"
-@sc = common global i8 0 ; <i8*> [#uses=52]
-@uc = common global i8 0 ; <i8*> [#uses=100]
-@ss = common global i16 0 ; <i16*> [#uses=15]
-@us = common global i16 0 ; <i16*> [#uses=15]
-@si = common global i32 0 ; <i32*> [#uses=15]
-@ui = common global i32 0 ; <i32*> [#uses=23]
-@sl = common global i64 0, align 8 ; <i64*> [#uses=15]
-@ul = common global i64 0, align 8 ; <i64*> [#uses=15]
-@sll = common global i64 0, align 8 ; <i64*> [#uses=1]
-@ull = common global i64 0, align 8 ; <i64*> [#uses=1]
+
+@sc = common global i8 0
+@uc = common global i8 0
+@ss = common global i16 0
+@us = common global i16 0
+@si = common global i32 0
+@ui = common global i32 0
+@sl = common global i64 0, align 8
+@ul = common global i64 0, align 8
+@sll = common global i64 0, align 8
+@ull = common global i64 0, align 8
define void @test_op_ignore() nounwind {
entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=0]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 1 ) ; <i64>:11 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 1 ) ; <i64>:13 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:14 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:15 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 1 ) ; <i16>:17 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 1 ) ; <i16>:19 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 1 ) ; <i32>:21 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 1 ) ; <i32>:23 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:24 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %24, i64 1 ) ; <i64>:25 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:26 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %26, i64 1 ) ; <i64>:27 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:28 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:29 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 1 ) ; <i16>:31 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 1 ) ; <i16>:33 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 1 ) ; <i32>:35 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 1 ) ; <i32>:37 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:38 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %38, i64 1 ) ; <i64>:39 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:40 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %40, i64 1 ) ; <i64>:41 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:42 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:43 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 1 ) ; <i16>:45 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 1 ) ; <i16>:47 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 1 ) ; <i32>:49 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 1 ) ; <i32>:51 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:52 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %52, i64 1 ) ; <i64>:53 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:54 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %54, i64 1 ) ; <i64>:55 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:56 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:57 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 1 ) ; <i16>:59 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 1 ) ; <i16>:61 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 1 ) ; <i32>:63 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 1 ) ; <i32>:65 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:66 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %66, i64 1 ) ; <i64>:67 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:68 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %68, i64 1 ) ; <i64>:69 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:70 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:71 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 1 ) ; <i16>:73 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 1 ) ; <i16>:75 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 1 ) ; <i32>:77 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 1 ) ; <i32>:79 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:80 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %80, i64 1 ) ; <i64>:81 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:82 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %82, i64 1 ) ; <i64>:83 [#uses=0]
- br label %return
-
-return: ; preds = %entry
- ret void
+ %0 = atomicrmw add i8* @sc, i8 1 monotonic
+ %1 = atomicrmw add i8* @uc, i8 1 monotonic
+ %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %3 = atomicrmw add i16* %2, i16 1 monotonic
+ %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %5 = atomicrmw add i16* %4, i16 1 monotonic
+ %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %7 = atomicrmw add i32* %6, i32 1 monotonic
+ %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %9 = atomicrmw add i32* %8, i32 1 monotonic
+ %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %11 = atomicrmw add i64* %10, i64 1 monotonic
+ %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %13 = atomicrmw add i64* %12, i64 1 monotonic
+ %14 = atomicrmw sub i8* @sc, i8 1 monotonic
+ %15 = atomicrmw sub i8* @uc, i8 1 monotonic
+ %16 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %17 = atomicrmw sub i16* %16, i16 1 monotonic
+ %18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %19 = atomicrmw sub i16* %18, i16 1 monotonic
+ %20 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %21 = atomicrmw sub i32* %20, i32 1 monotonic
+ %22 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %23 = atomicrmw sub i32* %22, i32 1 monotonic
+ %24 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %25 = atomicrmw sub i64* %24, i64 1 monotonic
+ %26 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %27 = atomicrmw sub i64* %26, i64 1 monotonic
+ %28 = atomicrmw or i8* @sc, i8 1 monotonic
+ %29 = atomicrmw or i8* @uc, i8 1 monotonic
+ %30 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %31 = atomicrmw or i16* %30, i16 1 monotonic
+ %32 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %33 = atomicrmw or i16* %32, i16 1 monotonic
+ %34 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %35 = atomicrmw or i32* %34, i32 1 monotonic
+ %36 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %37 = atomicrmw or i32* %36, i32 1 monotonic
+ %38 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %39 = atomicrmw or i64* %38, i64 1 monotonic
+ %40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %41 = atomicrmw or i64* %40, i64 1 monotonic
+ %42 = atomicrmw xor i8* @sc, i8 1 monotonic
+ %43 = atomicrmw xor i8* @uc, i8 1 monotonic
+ %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %45 = atomicrmw xor i16* %44, i16 1 monotonic
+ %46 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %47 = atomicrmw xor i16* %46, i16 1 monotonic
+ %48 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %49 = atomicrmw xor i32* %48, i32 1 monotonic
+ %50 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %51 = atomicrmw xor i32* %50, i32 1 monotonic
+ %52 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %53 = atomicrmw xor i64* %52, i64 1 monotonic
+ %54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %55 = atomicrmw xor i64* %54, i64 1 monotonic
+ %56 = atomicrmw and i8* @sc, i8 1 monotonic
+ %57 = atomicrmw and i8* @uc, i8 1 monotonic
+ %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %59 = atomicrmw and i16* %58, i16 1 monotonic
+ %60 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %61 = atomicrmw and i16* %60, i16 1 monotonic
+ %62 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %63 = atomicrmw and i32* %62, i32 1 monotonic
+ %64 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %65 = atomicrmw and i32* %64, i32 1 monotonic
+ %66 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %67 = atomicrmw and i64* %66, i64 1 monotonic
+ %68 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %69 = atomicrmw and i64* %68, i64 1 monotonic
+ %70 = atomicrmw nand i8* @sc, i8 1 monotonic
+ %71 = atomicrmw nand i8* @uc, i8 1 monotonic
+ %72 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %73 = atomicrmw nand i16* %72, i16 1 monotonic
+ %74 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %75 = atomicrmw nand i16* %74, i16 1 monotonic
+ %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %77 = atomicrmw nand i32* %76, i32 1 monotonic
+ %78 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %79 = atomicrmw nand i32* %78, i32 1 monotonic
+ %80 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %81 = atomicrmw nand i64* %80, i64 1 monotonic
+ %82 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %83 = atomicrmw nand i64* %82, i64 1 monotonic
+ br label %return
+
+return: ; preds = %entry
+ ret void
}
-declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.add.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.or.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.xor.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.and.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.nand.i64.p0i64(i64*, i64) nounwind
-
define void @test_fetch_and_op() nounwind {
entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 11 ) ; <i64>:11 [#uses=1]
- store i64 %11, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 11 ) ; <i64>:13 [#uses=1]
- store i64 %13, i64* @ul, align 8
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:14 [#uses=1]
- store i8 %14, i8* @sc, align 1
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:15 [#uses=1]
- store i8 %15, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 11 ) ; <i16>:17 [#uses=1]
- store i16 %17, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 11 ) ; <i16>:19 [#uses=1]
- store i16 %19, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 11 ) ; <i32>:21 [#uses=1]
- store i32 %21, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 11 ) ; <i32>:23 [#uses=1]
- store i32 %23, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:24 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %24, i64 11 ) ; <i64>:25 [#uses=1]
- store i64 %25, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:26 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %26, i64 11 ) ; <i64>:27 [#uses=1]
- store i64 %27, i64* @ul, align 8
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:28 [#uses=1]
- store i8 %28, i8* @sc, align 1
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:29 [#uses=1]
- store i8 %29, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 11 ) ; <i16>:31 [#uses=1]
- store i16 %31, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 11 ) ; <i16>:33 [#uses=1]
- store i16 %33, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 11 ) ; <i32>:35 [#uses=1]
- store i32 %35, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 11 ) ; <i32>:37 [#uses=1]
- store i32 %37, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:38 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %38, i64 11 ) ; <i64>:39 [#uses=1]
- store i64 %39, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:40 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %40, i64 11 ) ; <i64>:41 [#uses=1]
- store i64 %41, i64* @ul, align 8
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:42 [#uses=1]
- store i8 %42, i8* @sc, align 1
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:43 [#uses=1]
- store i8 %43, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 11 ) ; <i16>:45 [#uses=1]
- store i16 %45, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 11 ) ; <i16>:47 [#uses=1]
- store i16 %47, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 11 ) ; <i32>:49 [#uses=1]
- store i32 %49, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 11 ) ; <i32>:51 [#uses=1]
- store i32 %51, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:52 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %52, i64 11 ) ; <i64>:53 [#uses=1]
- store i64 %53, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:54 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %54, i64 11 ) ; <i64>:55 [#uses=1]
- store i64 %55, i64* @ul, align 8
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:56 [#uses=1]
- store i8 %56, i8* @sc, align 1
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:57 [#uses=1]
- store i8 %57, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 11 ) ; <i16>:59 [#uses=1]
- store i16 %59, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 11 ) ; <i16>:61 [#uses=1]
- store i16 %61, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 11 ) ; <i32>:63 [#uses=1]
- store i32 %63, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 11 ) ; <i32>:65 [#uses=1]
- store i32 %65, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:66 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %66, i64 11 ) ; <i64>:67 [#uses=1]
- store i64 %67, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:68 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %68, i64 11 ) ; <i64>:69 [#uses=1]
- store i64 %69, i64* @ul, align 8
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:70 [#uses=1]
- store i8 %70, i8* @sc, align 1
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:71 [#uses=1]
- store i8 %71, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 11 ) ; <i16>:73 [#uses=1]
- store i16 %73, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 11 ) ; <i16>:75 [#uses=1]
- store i16 %75, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 11 ) ; <i32>:77 [#uses=1]
- store i32 %77, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 11 ) ; <i32>:79 [#uses=1]
- store i32 %79, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:80 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %80, i64 11 ) ; <i64>:81 [#uses=1]
- store i64 %81, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:82 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %82, i64 11 ) ; <i64>:83 [#uses=1]
- store i64 %83, i64* @ul, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
+ %0 = atomicrmw add i8* @sc, i8 11 monotonic
+ store i8 %0, i8* @sc, align 1
+ %1 = atomicrmw add i8* @uc, i8 11 monotonic
+ store i8 %1, i8* @uc, align 1
+ %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %3 = atomicrmw add i16* %2, i16 11 monotonic
+ store i16 %3, i16* @ss, align 2
+ %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %5 = atomicrmw add i16* %4, i16 11 monotonic
+ store i16 %5, i16* @us, align 2
+ %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %7 = atomicrmw add i32* %6, i32 11 monotonic
+ store i32 %7, i32* @si, align 4
+ %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %9 = atomicrmw add i32* %8, i32 11 monotonic
+ store i32 %9, i32* @ui, align 4
+ %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %11 = atomicrmw add i64* %10, i64 11 monotonic
+ store i64 %11, i64* @sl, align 8
+ %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %13 = atomicrmw add i64* %12, i64 11 monotonic
+ store i64 %13, i64* @ul, align 8
+ %14 = atomicrmw sub i8* @sc, i8 11 monotonic
+ store i8 %14, i8* @sc, align 1
+ %15 = atomicrmw sub i8* @uc, i8 11 monotonic
+ store i8 %15, i8* @uc, align 1
+ %16 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %17 = atomicrmw sub i16* %16, i16 11 monotonic
+ store i16 %17, i16* @ss, align 2
+ %18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %19 = atomicrmw sub i16* %18, i16 11 monotonic
+ store i16 %19, i16* @us, align 2
+ %20 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %21 = atomicrmw sub i32* %20, i32 11 monotonic
+ store i32 %21, i32* @si, align 4
+ %22 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %23 = atomicrmw sub i32* %22, i32 11 monotonic
+ store i32 %23, i32* @ui, align 4
+ %24 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %25 = atomicrmw sub i64* %24, i64 11 monotonic
+ store i64 %25, i64* @sl, align 8
+ %26 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %27 = atomicrmw sub i64* %26, i64 11 monotonic
+ store i64 %27, i64* @ul, align 8
+ %28 = atomicrmw or i8* @sc, i8 11 monotonic
+ store i8 %28, i8* @sc, align 1
+ %29 = atomicrmw or i8* @uc, i8 11 monotonic
+ store i8 %29, i8* @uc, align 1
+ %30 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %31 = atomicrmw or i16* %30, i16 11 monotonic
+ store i16 %31, i16* @ss, align 2
+ %32 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %33 = atomicrmw or i16* %32, i16 11 monotonic
+ store i16 %33, i16* @us, align 2
+ %34 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %35 = atomicrmw or i32* %34, i32 11 monotonic
+ store i32 %35, i32* @si, align 4
+ %36 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %37 = atomicrmw or i32* %36, i32 11 monotonic
+ store i32 %37, i32* @ui, align 4
+ %38 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %39 = atomicrmw or i64* %38, i64 11 monotonic
+ store i64 %39, i64* @sl, align 8
+ %40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %41 = atomicrmw or i64* %40, i64 11 monotonic
+ store i64 %41, i64* @ul, align 8
+ %42 = atomicrmw xor i8* @sc, i8 11 monotonic
+ store i8 %42, i8* @sc, align 1
+ %43 = atomicrmw xor i8* @uc, i8 11 monotonic
+ store i8 %43, i8* @uc, align 1
+ %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %45 = atomicrmw xor i16* %44, i16 11 monotonic
+ store i16 %45, i16* @ss, align 2
+ %46 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %47 = atomicrmw xor i16* %46, i16 11 monotonic
+ store i16 %47, i16* @us, align 2
+ %48 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %49 = atomicrmw xor i32* %48, i32 11 monotonic
+ store i32 %49, i32* @si, align 4
+ %50 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %51 = atomicrmw xor i32* %50, i32 11 monotonic
+ store i32 %51, i32* @ui, align 4
+ %52 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %53 = atomicrmw xor i64* %52, i64 11 monotonic
+ store i64 %53, i64* @sl, align 8
+ %54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %55 = atomicrmw xor i64* %54, i64 11 monotonic
+ store i64 %55, i64* @ul, align 8
+ %56 = atomicrmw and i8* @sc, i8 11 monotonic
+ store i8 %56, i8* @sc, align 1
+ %57 = atomicrmw and i8* @uc, i8 11 monotonic
+ store i8 %57, i8* @uc, align 1
+ %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %59 = atomicrmw and i16* %58, i16 11 monotonic
+ store i16 %59, i16* @ss, align 2
+ %60 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %61 = atomicrmw and i16* %60, i16 11 monotonic
+ store i16 %61, i16* @us, align 2
+ %62 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %63 = atomicrmw and i32* %62, i32 11 monotonic
+ store i32 %63, i32* @si, align 4
+ %64 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %65 = atomicrmw and i32* %64, i32 11 monotonic
+ store i32 %65, i32* @ui, align 4
+ %66 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %67 = atomicrmw and i64* %66, i64 11 monotonic
+ store i64 %67, i64* @sl, align 8
+ %68 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %69 = atomicrmw and i64* %68, i64 11 monotonic
+ store i64 %69, i64* @ul, align 8
+ %70 = atomicrmw nand i8* @sc, i8 11 monotonic
+ store i8 %70, i8* @sc, align 1
+ %71 = atomicrmw nand i8* @uc, i8 11 monotonic
+ store i8 %71, i8* @uc, align 1
+ %72 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %73 = atomicrmw nand i16* %72, i16 11 monotonic
+ store i16 %73, i16* @ss, align 2
+ %74 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %75 = atomicrmw nand i16* %74, i16 11 monotonic
+ store i16 %75, i16* @us, align 2
+ %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %77 = atomicrmw nand i32* %76, i32 11 monotonic
+ store i32 %77, i32* @si, align 4
+ %78 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %79 = atomicrmw nand i32* %78, i32 11 monotonic
+ store i32 %79, i32* @ui, align 4
+ %80 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %81 = atomicrmw nand i64* %80, i64 11 monotonic
+ store i64 %81, i64* @sl, align 8
+ %82 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %83 = atomicrmw nand i64* %82, i64 11 monotonic
+ store i64 %83, i64* @ul, align 8
+ br label %return
+
+return: ; preds = %entry
+ ret void
}
define void @test_op_and_fetch() nounwind {
entry:
- load i8* @uc, align 1 ; <i8>:0 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %0 ) ; <i8>:1 [#uses=1]
- add i8 %1, %0 ; <i8>:2 [#uses=1]
- store i8 %2, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:3 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %3 ) ; <i8>:4 [#uses=1]
- add i8 %4, %3 ; <i8>:5 [#uses=1]
- store i8 %5, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:6 [#uses=1]
- zext i8 %6 to i16 ; <i16>:7 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:8 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %8, i16 %7 ) ; <i16>:9 [#uses=1]
- add i16 %9, %7 ; <i16>:10 [#uses=1]
- store i16 %10, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:11 [#uses=1]
- zext i8 %11 to i16 ; <i16>:12 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:13 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %13, i16 %12 ) ; <i16>:14 [#uses=1]
- add i16 %14, %12 ; <i16>:15 [#uses=1]
- store i16 %15, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:16 [#uses=1]
- zext i8 %16 to i32 ; <i32>:17 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:18 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %18, i32 %17 ) ; <i32>:19 [#uses=1]
- add i32 %19, %17 ; <i32>:20 [#uses=1]
- store i32 %20, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:21 [#uses=1]
- zext i8 %21 to i32 ; <i32>:22 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:23 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %23, i32 %22 ) ; <i32>:24 [#uses=1]
- add i32 %24, %22 ; <i32>:25 [#uses=1]
- store i32 %25, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:26 [#uses=1]
- zext i8 %26 to i64 ; <i64>:27 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:28 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %28, i64 %27 ) ; <i64>:29 [#uses=1]
- add i64 %29, %27 ; <i64>:30 [#uses=1]
- store i64 %30, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:31 [#uses=1]
- zext i8 %31 to i64 ; <i64>:32 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:33 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %33, i64 %32 ) ; <i64>:34 [#uses=1]
- add i64 %34, %32 ; <i64>:35 [#uses=1]
- store i64 %35, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:36 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %36 ) ; <i8>:37 [#uses=1]
- sub i8 %37, %36 ; <i8>:38 [#uses=1]
- store i8 %38, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:39 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %39 ) ; <i8>:40 [#uses=1]
- sub i8 %40, %39 ; <i8>:41 [#uses=1]
- store i8 %41, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:42 [#uses=1]
- zext i8 %42 to i16 ; <i16>:43 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %44, i16 %43 ) ; <i16>:45 [#uses=1]
- sub i16 %45, %43 ; <i16>:46 [#uses=1]
- store i16 %46, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:47 [#uses=1]
- zext i8 %47 to i16 ; <i16>:48 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:49 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %49, i16 %48 ) ; <i16>:50 [#uses=1]
- sub i16 %50, %48 ; <i16>:51 [#uses=1]
- store i16 %51, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:52 [#uses=1]
- zext i8 %52 to i32 ; <i32>:53 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:54 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %54, i32 %53 ) ; <i32>:55 [#uses=1]
- sub i32 %55, %53 ; <i32>:56 [#uses=1]
- store i32 %56, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:57 [#uses=1]
- zext i8 %57 to i32 ; <i32>:58 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:59 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %59, i32 %58 ) ; <i32>:60 [#uses=1]
- sub i32 %60, %58 ; <i32>:61 [#uses=1]
- store i32 %61, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:62 [#uses=1]
- zext i8 %62 to i64 ; <i64>:63 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:64 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %64, i64 %63 ) ; <i64>:65 [#uses=1]
- sub i64 %65, %63 ; <i64>:66 [#uses=1]
- store i64 %66, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:67 [#uses=1]
- zext i8 %67 to i64 ; <i64>:68 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:69 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %69, i64 %68 ) ; <i64>:70 [#uses=1]
- sub i64 %70, %68 ; <i64>:71 [#uses=1]
- store i64 %71, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:72 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %72 ) ; <i8>:73 [#uses=1]
- or i8 %73, %72 ; <i8>:74 [#uses=1]
- store i8 %74, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:75 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %75 ) ; <i8>:76 [#uses=1]
- or i8 %76, %75 ; <i8>:77 [#uses=1]
- store i8 %77, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:78 [#uses=1]
- zext i8 %78 to i16 ; <i16>:79 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:80 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %80, i16 %79 ) ; <i16>:81 [#uses=1]
- or i16 %81, %79 ; <i16>:82 [#uses=1]
- store i16 %82, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:83 [#uses=1]
- zext i8 %83 to i16 ; <i16>:84 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:85 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %85, i16 %84 ) ; <i16>:86 [#uses=1]
- or i16 %86, %84 ; <i16>:87 [#uses=1]
- store i16 %87, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:88 [#uses=1]
- zext i8 %88 to i32 ; <i32>:89 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:90 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %90, i32 %89 ) ; <i32>:91 [#uses=1]
- or i32 %91, %89 ; <i32>:92 [#uses=1]
- store i32 %92, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:93 [#uses=1]
- zext i8 %93 to i32 ; <i32>:94 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:95 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %95, i32 %94 ) ; <i32>:96 [#uses=1]
- or i32 %96, %94 ; <i32>:97 [#uses=1]
- store i32 %97, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:98 [#uses=1]
- zext i8 %98 to i64 ; <i64>:99 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:100 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %100, i64 %99 ) ; <i64>:101 [#uses=1]
- or i64 %101, %99 ; <i64>:102 [#uses=1]
- store i64 %102, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:103 [#uses=1]
- zext i8 %103 to i64 ; <i64>:104 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:105 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %105, i64 %104 ) ; <i64>:106 [#uses=1]
- or i64 %106, %104 ; <i64>:107 [#uses=1]
- store i64 %107, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:108 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %108 ) ; <i8>:109 [#uses=1]
- xor i8 %109, %108 ; <i8>:110 [#uses=1]
- store i8 %110, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:111 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %111 ) ; <i8>:112 [#uses=1]
- xor i8 %112, %111 ; <i8>:113 [#uses=1]
- store i8 %113, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:114 [#uses=1]
- zext i8 %114 to i16 ; <i16>:115 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:116 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %116, i16 %115 ) ; <i16>:117 [#uses=1]
- xor i16 %117, %115 ; <i16>:118 [#uses=1]
- store i16 %118, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:119 [#uses=1]
- zext i8 %119 to i16 ; <i16>:120 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:121 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %121, i16 %120 ) ; <i16>:122 [#uses=1]
- xor i16 %122, %120 ; <i16>:123 [#uses=1]
- store i16 %123, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:124 [#uses=1]
- zext i8 %124 to i32 ; <i32>:125 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:126 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %126, i32 %125 ) ; <i32>:127 [#uses=1]
- xor i32 %127, %125 ; <i32>:128 [#uses=1]
- store i32 %128, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:129 [#uses=1]
- zext i8 %129 to i32 ; <i32>:130 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:131 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %131, i32 %130 ) ; <i32>:132 [#uses=1]
- xor i32 %132, %130 ; <i32>:133 [#uses=1]
- store i32 %133, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:134 [#uses=1]
- zext i8 %134 to i64 ; <i64>:135 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:136 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %136, i64 %135 ) ; <i64>:137 [#uses=1]
- xor i64 %137, %135 ; <i64>:138 [#uses=1]
- store i64 %138, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:139 [#uses=1]
- zext i8 %139 to i64 ; <i64>:140 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:141 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %141, i64 %140 ) ; <i64>:142 [#uses=1]
- xor i64 %142, %140 ; <i64>:143 [#uses=1]
- store i64 %143, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:144 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %144 ) ; <i8>:145 [#uses=1]
- and i8 %145, %144 ; <i8>:146 [#uses=1]
- store i8 %146, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:147 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %147 ) ; <i8>:148 [#uses=1]
- and i8 %148, %147 ; <i8>:149 [#uses=1]
- store i8 %149, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:150 [#uses=1]
- zext i8 %150 to i16 ; <i16>:151 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:152 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %152, i16 %151 ) ; <i16>:153 [#uses=1]
- and i16 %153, %151 ; <i16>:154 [#uses=1]
- store i16 %154, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:155 [#uses=1]
- zext i8 %155 to i16 ; <i16>:156 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:157 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %157, i16 %156 ) ; <i16>:158 [#uses=1]
- and i16 %158, %156 ; <i16>:159 [#uses=1]
- store i16 %159, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:160 [#uses=1]
- zext i8 %160 to i32 ; <i32>:161 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:162 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %162, i32 %161 ) ; <i32>:163 [#uses=1]
- and i32 %163, %161 ; <i32>:164 [#uses=1]
- store i32 %164, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:165 [#uses=1]
- zext i8 %165 to i32 ; <i32>:166 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:167 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %167, i32 %166 ) ; <i32>:168 [#uses=1]
- and i32 %168, %166 ; <i32>:169 [#uses=1]
- store i32 %169, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:170 [#uses=1]
- zext i8 %170 to i64 ; <i64>:171 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:172 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %172, i64 %171 ) ; <i64>:173 [#uses=1]
- and i64 %173, %171 ; <i64>:174 [#uses=1]
- store i64 %174, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:175 [#uses=1]
- zext i8 %175 to i64 ; <i64>:176 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:177 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %177, i64 %176 ) ; <i64>:178 [#uses=1]
- and i64 %178, %176 ; <i64>:179 [#uses=1]
- store i64 %179, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:180 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %180 ) ; <i8>:181 [#uses=1]
- xor i8 %181, -1 ; <i8>:182 [#uses=1]
- and i8 %182, %180 ; <i8>:183 [#uses=1]
- store i8 %183, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:184 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %184 ) ; <i8>:185 [#uses=1]
- xor i8 %185, -1 ; <i8>:186 [#uses=1]
- and i8 %186, %184 ; <i8>:187 [#uses=1]
- store i8 %187, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:188 [#uses=1]
- zext i8 %188 to i16 ; <i16>:189 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:190 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %190, i16 %189 ) ; <i16>:191 [#uses=1]
- xor i16 %191, -1 ; <i16>:192 [#uses=1]
- and i16 %192, %189 ; <i16>:193 [#uses=1]
- store i16 %193, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:194 [#uses=1]
- zext i8 %194 to i16 ; <i16>:195 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:196 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %196, i16 %195 ) ; <i16>:197 [#uses=1]
- xor i16 %197, -1 ; <i16>:198 [#uses=1]
- and i16 %198, %195 ; <i16>:199 [#uses=1]
- store i16 %199, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:200 [#uses=1]
- zext i8 %200 to i32 ; <i32>:201 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:202 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %202, i32 %201 ) ; <i32>:203 [#uses=1]
- xor i32 %203, -1 ; <i32>:204 [#uses=1]
- and i32 %204, %201 ; <i32>:205 [#uses=1]
- store i32 %205, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:206 [#uses=1]
- zext i8 %206 to i32 ; <i32>:207 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:208 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %208, i32 %207 ) ; <i32>:209 [#uses=1]
- xor i32 %209, -1 ; <i32>:210 [#uses=1]
- and i32 %210, %207 ; <i32>:211 [#uses=1]
- store i32 %211, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:212 [#uses=1]
- zext i8 %212 to i64 ; <i64>:213 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:214 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %214, i64 %213 ) ; <i64>:215 [#uses=1]
- xor i64 %215, -1 ; <i64>:216 [#uses=1]
- and i64 %216, %213 ; <i64>:217 [#uses=1]
- store i64 %217, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:218 [#uses=1]
- zext i8 %218 to i64 ; <i64>:219 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:220 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %220, i64 %219 ) ; <i64>:221 [#uses=1]
- xor i64 %221, -1 ; <i64>:222 [#uses=1]
- and i64 %222, %219 ; <i64>:223 [#uses=1]
- store i64 %223, i64* @ul, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
+ %0 = load i8* @uc, align 1
+ %1 = atomicrmw add i8* @sc, i8 %0 monotonic
+ %2 = add i8 %1, %0
+ store i8 %2, i8* @sc, align 1
+ %3 = load i8* @uc, align 1
+ %4 = atomicrmw add i8* @uc, i8 %3 monotonic
+ %5 = add i8 %4, %3
+ store i8 %5, i8* @uc, align 1
+ %6 = load i8* @uc, align 1
+ %7 = zext i8 %6 to i16
+ %8 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %9 = atomicrmw add i16* %8, i16 %7 monotonic
+ %10 = add i16 %9, %7
+ store i16 %10, i16* @ss, align 2
+ %11 = load i8* @uc, align 1
+ %12 = zext i8 %11 to i16
+ %13 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %14 = atomicrmw add i16* %13, i16 %12 monotonic
+ %15 = add i16 %14, %12
+ store i16 %15, i16* @us, align 2
+ %16 = load i8* @uc, align 1
+ %17 = zext i8 %16 to i32
+ %18 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %19 = atomicrmw add i32* %18, i32 %17 monotonic
+ %20 = add i32 %19, %17
+ store i32 %20, i32* @si, align 4
+ %21 = load i8* @uc, align 1
+ %22 = zext i8 %21 to i32
+ %23 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %24 = atomicrmw add i32* %23, i32 %22 monotonic
+ %25 = add i32 %24, %22
+ store i32 %25, i32* @ui, align 4
+ %26 = load i8* @uc, align 1
+ %27 = zext i8 %26 to i64
+ %28 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %29 = atomicrmw add i64* %28, i64 %27 monotonic
+ %30 = add i64 %29, %27
+ store i64 %30, i64* @sl, align 8
+ %31 = load i8* @uc, align 1
+ %32 = zext i8 %31 to i64
+ %33 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %34 = atomicrmw add i64* %33, i64 %32 monotonic
+ %35 = add i64 %34, %32
+ store i64 %35, i64* @ul, align 8
+ %36 = load i8* @uc, align 1
+ %37 = atomicrmw sub i8* @sc, i8 %36 monotonic
+ %38 = sub i8 %37, %36
+ store i8 %38, i8* @sc, align 1
+ %39 = load i8* @uc, align 1
+ %40 = atomicrmw sub i8* @uc, i8 %39 monotonic
+ %41 = sub i8 %40, %39
+ store i8 %41, i8* @uc, align 1
+ %42 = load i8* @uc, align 1
+ %43 = zext i8 %42 to i16
+ %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %45 = atomicrmw sub i16* %44, i16 %43 monotonic
+ %46 = sub i16 %45, %43
+ store i16 %46, i16* @ss, align 2
+ %47 = load i8* @uc, align 1
+ %48 = zext i8 %47 to i16
+ %49 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %50 = atomicrmw sub i16* %49, i16 %48 monotonic
+ %51 = sub i16 %50, %48
+ store i16 %51, i16* @us, align 2
+ %52 = load i8* @uc, align 1
+ %53 = zext i8 %52 to i32
+ %54 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %55 = atomicrmw sub i32* %54, i32 %53 monotonic
+ %56 = sub i32 %55, %53
+ store i32 %56, i32* @si, align 4
+ %57 = load i8* @uc, align 1
+ %58 = zext i8 %57 to i32
+ %59 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %60 = atomicrmw sub i32* %59, i32 %58 monotonic
+ %61 = sub i32 %60, %58
+ store i32 %61, i32* @ui, align 4
+ %62 = load i8* @uc, align 1
+ %63 = zext i8 %62 to i64
+ %64 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %65 = atomicrmw sub i64* %64, i64 %63 monotonic
+ %66 = sub i64 %65, %63
+ store i64 %66, i64* @sl, align 8
+ %67 = load i8* @uc, align 1
+ %68 = zext i8 %67 to i64
+ %69 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %70 = atomicrmw sub i64* %69, i64 %68 monotonic
+ %71 = sub i64 %70, %68
+ store i64 %71, i64* @ul, align 8
+ %72 = load i8* @uc, align 1
+ %73 = atomicrmw or i8* @sc, i8 %72 monotonic
+ %74 = or i8 %73, %72
+ store i8 %74, i8* @sc, align 1
+ %75 = load i8* @uc, align 1
+ %76 = atomicrmw or i8* @uc, i8 %75 monotonic
+ %77 = or i8 %76, %75
+ store i8 %77, i8* @uc, align 1
+ %78 = load i8* @uc, align 1
+ %79 = zext i8 %78 to i16
+ %80 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %81 = atomicrmw or i16* %80, i16 %79 monotonic
+ %82 = or i16 %81, %79
+ store i16 %82, i16* @ss, align 2
+ %83 = load i8* @uc, align 1
+ %84 = zext i8 %83 to i16
+ %85 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %86 = atomicrmw or i16* %85, i16 %84 monotonic
+ %87 = or i16 %86, %84
+ store i16 %87, i16* @us, align 2
+ %88 = load i8* @uc, align 1
+ %89 = zext i8 %88 to i32
+ %90 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %91 = atomicrmw or i32* %90, i32 %89 monotonic
+ %92 = or i32 %91, %89
+ store i32 %92, i32* @si, align 4
+ %93 = load i8* @uc, align 1
+ %94 = zext i8 %93 to i32
+ %95 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %96 = atomicrmw or i32* %95, i32 %94 monotonic
+ %97 = or i32 %96, %94
+ store i32 %97, i32* @ui, align 4
+ %98 = load i8* @uc, align 1
+ %99 = zext i8 %98 to i64
+ %100 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %101 = atomicrmw or i64* %100, i64 %99 monotonic
+ %102 = or i64 %101, %99
+ store i64 %102, i64* @sl, align 8
+ %103 = load i8* @uc, align 1
+ %104 = zext i8 %103 to i64
+ %105 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %106 = atomicrmw or i64* %105, i64 %104 monotonic
+ %107 = or i64 %106, %104
+ store i64 %107, i64* @ul, align 8
+ %108 = load i8* @uc, align 1
+ %109 = atomicrmw xor i8* @sc, i8 %108 monotonic
+ %110 = xor i8 %109, %108
+ store i8 %110, i8* @sc, align 1
+ %111 = load i8* @uc, align 1
+ %112 = atomicrmw xor i8* @uc, i8 %111 monotonic
+ %113 = xor i8 %112, %111
+ store i8 %113, i8* @uc, align 1
+ %114 = load i8* @uc, align 1
+ %115 = zext i8 %114 to i16
+ %116 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %117 = atomicrmw xor i16* %116, i16 %115 monotonic
+ %118 = xor i16 %117, %115
+ store i16 %118, i16* @ss, align 2
+ %119 = load i8* @uc, align 1
+ %120 = zext i8 %119 to i16
+ %121 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %122 = atomicrmw xor i16* %121, i16 %120 monotonic
+ %123 = xor i16 %122, %120
+ store i16 %123, i16* @us, align 2
+ %124 = load i8* @uc, align 1
+ %125 = zext i8 %124 to i32
+ %126 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %127 = atomicrmw xor i32* %126, i32 %125 monotonic
+ %128 = xor i32 %127, %125
+ store i32 %128, i32* @si, align 4
+ %129 = load i8* @uc, align 1
+ %130 = zext i8 %129 to i32
+ %131 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %132 = atomicrmw xor i32* %131, i32 %130 monotonic
+ %133 = xor i32 %132, %130
+ store i32 %133, i32* @ui, align 4
+ %134 = load i8* @uc, align 1
+ %135 = zext i8 %134 to i64
+ %136 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %137 = atomicrmw xor i64* %136, i64 %135 monotonic
+ %138 = xor i64 %137, %135
+ store i64 %138, i64* @sl, align 8
+ %139 = load i8* @uc, align 1
+ %140 = zext i8 %139 to i64
+ %141 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %142 = atomicrmw xor i64* %141, i64 %140 monotonic
+ %143 = xor i64 %142, %140
+ store i64 %143, i64* @ul, align 8
+ %144 = load i8* @uc, align 1
+ %145 = atomicrmw and i8* @sc, i8 %144 monotonic
+ %146 = and i8 %145, %144
+ store i8 %146, i8* @sc, align 1
+ %147 = load i8* @uc, align 1
+ %148 = atomicrmw and i8* @uc, i8 %147 monotonic
+ %149 = and i8 %148, %147
+ store i8 %149, i8* @uc, align 1
+ %150 = load i8* @uc, align 1
+ %151 = zext i8 %150 to i16
+ %152 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %153 = atomicrmw and i16* %152, i16 %151 monotonic
+ %154 = and i16 %153, %151
+ store i16 %154, i16* @ss, align 2
+ %155 = load i8* @uc, align 1
+ %156 = zext i8 %155 to i16
+ %157 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %158 = atomicrmw and i16* %157, i16 %156 monotonic
+ %159 = and i16 %158, %156
+ store i16 %159, i16* @us, align 2
+ %160 = load i8* @uc, align 1
+ %161 = zext i8 %160 to i32
+ %162 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %163 = atomicrmw and i32* %162, i32 %161 monotonic
+ %164 = and i32 %163, %161
+ store i32 %164, i32* @si, align 4
+ %165 = load i8* @uc, align 1
+ %166 = zext i8 %165 to i32
+ %167 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %168 = atomicrmw and i32* %167, i32 %166 monotonic
+ %169 = and i32 %168, %166
+ store i32 %169, i32* @ui, align 4
+ %170 = load i8* @uc, align 1
+ %171 = zext i8 %170 to i64
+ %172 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %173 = atomicrmw and i64* %172, i64 %171 monotonic
+ %174 = and i64 %173, %171
+ store i64 %174, i64* @sl, align 8
+ %175 = load i8* @uc, align 1
+ %176 = zext i8 %175 to i64
+ %177 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %178 = atomicrmw and i64* %177, i64 %176 monotonic
+ %179 = and i64 %178, %176
+ store i64 %179, i64* @ul, align 8
+ %180 = load i8* @uc, align 1
+ %181 = atomicrmw nand i8* @sc, i8 %180 monotonic
+ %182 = xor i8 %181, -1
+ %183 = and i8 %182, %180
+ store i8 %183, i8* @sc, align 1
+ %184 = load i8* @uc, align 1
+ %185 = atomicrmw nand i8* @uc, i8 %184 monotonic
+ %186 = xor i8 %185, -1
+ %187 = and i8 %186, %184
+ store i8 %187, i8* @uc, align 1
+ %188 = load i8* @uc, align 1
+ %189 = zext i8 %188 to i16
+ %190 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %191 = atomicrmw nand i16* %190, i16 %189 monotonic
+ %192 = xor i16 %191, -1
+ %193 = and i16 %192, %189
+ store i16 %193, i16* @ss, align 2
+ %194 = load i8* @uc, align 1
+ %195 = zext i8 %194 to i16
+ %196 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %197 = atomicrmw nand i16* %196, i16 %195 monotonic
+ %198 = xor i16 %197, -1
+ %199 = and i16 %198, %195
+ store i16 %199, i16* @us, align 2
+ %200 = load i8* @uc, align 1
+ %201 = zext i8 %200 to i32
+ %202 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %203 = atomicrmw nand i32* %202, i32 %201 monotonic
+ %204 = xor i32 %203, -1
+ %205 = and i32 %204, %201
+ store i32 %205, i32* @si, align 4
+ %206 = load i8* @uc, align 1
+ %207 = zext i8 %206 to i32
+ %208 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %209 = atomicrmw nand i32* %208, i32 %207 monotonic
+ %210 = xor i32 %209, -1
+ %211 = and i32 %210, %207
+ store i32 %211, i32* @ui, align 4
+ %212 = load i8* @uc, align 1
+ %213 = zext i8 %212 to i64
+ %214 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %215 = atomicrmw nand i64* %214, i64 %213 monotonic
+ %216 = xor i64 %215, -1
+ %217 = and i64 %216, %213
+ store i64 %217, i64* @sl, align 8
+ %218 = load i8* @uc, align 1
+ %219 = zext i8 %218 to i64
+ %220 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %221 = atomicrmw nand i64* %220, i64 %219 monotonic
+ %222 = xor i64 %221, -1
+ %223 = and i64 %222, %219
+ store i64 %223, i64* @ul, align 8
+ br label %return
+
+return: ; preds = %entry
+ ret void
}
define void @test_compare_and_swap() nounwind {
entry:
- load i8* @uc, align 1 ; <i8>:0 [#uses=1]
- load i8* @sc, align 1 ; <i8>:1 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %0, i8 %1 ) ; <i8>:2 [#uses=1]
- store i8 %2, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:3 [#uses=1]
- load i8* @sc, align 1 ; <i8>:4 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %3, i8 %4 ) ; <i8>:5 [#uses=1]
- store i8 %5, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:6 [#uses=1]
- zext i8 %6 to i16 ; <i16>:7 [#uses=1]
- load i8* @sc, align 1 ; <i8>:8 [#uses=1]
- sext i8 %8 to i16 ; <i16>:9 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:10 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %10, i16 %7, i16 %9 ) ; <i16>:11 [#uses=1]
- store i16 %11, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:12 [#uses=1]
- zext i8 %12 to i16 ; <i16>:13 [#uses=1]
- load i8* @sc, align 1 ; <i8>:14 [#uses=1]
- sext i8 %14 to i16 ; <i16>:15 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %16, i16 %13, i16 %15 ) ; <i16>:17 [#uses=1]
- store i16 %17, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:18 [#uses=1]
- zext i8 %18 to i32 ; <i32>:19 [#uses=1]
- load i8* @sc, align 1 ; <i8>:20 [#uses=1]
- sext i8 %20 to i32 ; <i32>:21 [#uses=1]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %22, i32 %19, i32 %21 ) ; <i32>:23 [#uses=1]
- store i32 %23, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:24 [#uses=1]
- zext i8 %24 to i32 ; <i32>:25 [#uses=1]
- load i8* @sc, align 1 ; <i8>:26 [#uses=1]
- sext i8 %26 to i32 ; <i32>:27 [#uses=1]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:28 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %28, i32 %25, i32 %27 ) ; <i32>:29 [#uses=1]
- store i32 %29, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:30 [#uses=1]
- zext i8 %30 to i64 ; <i64>:31 [#uses=1]
- load i8* @sc, align 1 ; <i8>:32 [#uses=1]
- sext i8 %32 to i64 ; <i64>:33 [#uses=1]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:34 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %34, i64 %31, i64 %33 ) ; <i64>:35 [#uses=1]
- store i64 %35, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:36 [#uses=1]
- zext i8 %36 to i64 ; <i64>:37 [#uses=1]
- load i8* @sc, align 1 ; <i8>:38 [#uses=1]
- sext i8 %38 to i64 ; <i64>:39 [#uses=1]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:40 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %40, i64 %37, i64 %39 ) ; <i64>:41 [#uses=1]
- store i64 %41, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:42 [#uses=2]
- load i8* @sc, align 1 ; <i8>:43 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %42, i8 %43 ) ; <i8>:44 [#uses=1]
- icmp eq i8 %44, %42 ; <i1>:45 [#uses=1]
- zext i1 %45 to i8 ; <i8>:46 [#uses=1]
- zext i8 %46 to i32 ; <i32>:47 [#uses=1]
- store i32 %47, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:48 [#uses=2]
- load i8* @sc, align 1 ; <i8>:49 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %48, i8 %49 ) ; <i8>:50 [#uses=1]
- icmp eq i8 %50, %48 ; <i1>:51 [#uses=1]
- zext i1 %51 to i8 ; <i8>:52 [#uses=1]
- zext i8 %52 to i32 ; <i32>:53 [#uses=1]
- store i32 %53, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:54 [#uses=1]
- zext i8 %54 to i16 ; <i16>:55 [#uses=2]
- load i8* @sc, align 1 ; <i8>:56 [#uses=1]
- sext i8 %56 to i16 ; <i16>:57 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %58, i16 %55, i16 %57 ) ; <i16>:59 [#uses=1]
- icmp eq i16 %59, %55 ; <i1>:60 [#uses=1]
- zext i1 %60 to i8 ; <i8>:61 [#uses=1]
- zext i8 %61 to i32 ; <i32>:62 [#uses=1]
- store i32 %62, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:63 [#uses=1]
- zext i8 %63 to i16 ; <i16>:64 [#uses=2]
- load i8* @sc, align 1 ; <i8>:65 [#uses=1]
- sext i8 %65 to i16 ; <i16>:66 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:67 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %67, i16 %64, i16 %66 ) ; <i16>:68 [#uses=1]
- icmp eq i16 %68, %64 ; <i1>:69 [#uses=1]
- zext i1 %69 to i8 ; <i8>:70 [#uses=1]
- zext i8 %70 to i32 ; <i32>:71 [#uses=1]
- store i32 %71, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:72 [#uses=1]
- zext i8 %72 to i32 ; <i32>:73 [#uses=2]
- load i8* @sc, align 1 ; <i8>:74 [#uses=1]
- sext i8 %74 to i32 ; <i32>:75 [#uses=1]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %76, i32 %73, i32 %75 ) ; <i32>:77 [#uses=1]
- icmp eq i32 %77, %73 ; <i1>:78 [#uses=1]
- zext i1 %78 to i8 ; <i8>:79 [#uses=1]
- zext i8 %79 to i32 ; <i32>:80 [#uses=1]
- store i32 %80, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:81 [#uses=1]
- zext i8 %81 to i32 ; <i32>:82 [#uses=2]
- load i8* @sc, align 1 ; <i8>:83 [#uses=1]
- sext i8 %83 to i32 ; <i32>:84 [#uses=1]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:85 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %85, i32 %82, i32 %84 ) ; <i32>:86 [#uses=1]
- icmp eq i32 %86, %82 ; <i1>:87 [#uses=1]
- zext i1 %87 to i8 ; <i8>:88 [#uses=1]
- zext i8 %88 to i32 ; <i32>:89 [#uses=1]
- store i32 %89, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:90 [#uses=1]
- zext i8 %90 to i64 ; <i64>:91 [#uses=2]
- load i8* @sc, align 1 ; <i8>:92 [#uses=1]
- sext i8 %92 to i64 ; <i64>:93 [#uses=1]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:94 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %94, i64 %91, i64 %93 ) ; <i64>:95 [#uses=1]
- icmp eq i64 %95, %91 ; <i1>:96 [#uses=1]
- zext i1 %96 to i8 ; <i8>:97 [#uses=1]
- zext i8 %97 to i32 ; <i32>:98 [#uses=1]
- store i32 %98, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:99 [#uses=1]
- zext i8 %99 to i64 ; <i64>:100 [#uses=2]
- load i8* @sc, align 1 ; <i8>:101 [#uses=1]
- sext i8 %101 to i64 ; <i64>:102 [#uses=1]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:103 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %103, i64 %100, i64 %102 ) ; <i64>:104 [#uses=1]
- icmp eq i64 %104, %100 ; <i1>:105 [#uses=1]
- zext i1 %105 to i8 ; <i8>:106 [#uses=1]
- zext i8 %106 to i32 ; <i32>:107 [#uses=1]
- store i32 %107, i32* @ui, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
+ %0 = load i8* @uc, align 1
+ %1 = load i8* @sc, align 1
+ %2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic
+ store i8 %2, i8* @sc, align 1
+ %3 = load i8* @uc, align 1
+ %4 = load i8* @sc, align 1
+ %5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic
+ store i8 %5, i8* @uc, align 1
+ %6 = load i8* @uc, align 1
+ %7 = zext i8 %6 to i16
+ %8 = load i8* @sc, align 1
+ %9 = sext i8 %8 to i16
+ %10 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic
+ store i16 %11, i16* @ss, align 2
+ %12 = load i8* @uc, align 1
+ %13 = zext i8 %12 to i16
+ %14 = load i8* @sc, align 1
+ %15 = sext i8 %14 to i16
+ %16 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic
+ store i16 %17, i16* @us, align 2
+ %18 = load i8* @uc, align 1
+ %19 = zext i8 %18 to i32
+ %20 = load i8* @sc, align 1
+ %21 = sext i8 %20 to i32
+ %22 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic
+ store i32 %23, i32* @si, align 4
+ %24 = load i8* @uc, align 1
+ %25 = zext i8 %24 to i32
+ %26 = load i8* @sc, align 1
+ %27 = sext i8 %26 to i32
+ %28 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic
+ store i32 %29, i32* @ui, align 4
+ %30 = load i8* @uc, align 1
+ %31 = zext i8 %30 to i64
+ %32 = load i8* @sc, align 1
+ %33 = sext i8 %32 to i64
+ %34 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %35 = cmpxchg i64* %34, i64 %31, i64 %33 monotonic
+ store i64 %35, i64* @sl, align 8
+ %36 = load i8* @uc, align 1
+ %37 = zext i8 %36 to i64
+ %38 = load i8* @sc, align 1
+ %39 = sext i8 %38 to i64
+ %40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %41 = cmpxchg i64* %40, i64 %37, i64 %39 monotonic
+ store i64 %41, i64* @ul, align 8
+ %42 = load i8* @uc, align 1
+ %43 = load i8* @sc, align 1
+ %44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic
+ %45 = icmp eq i8 %44, %42
+ %46 = zext i1 %45 to i8
+ %47 = zext i8 %46 to i32
+ store i32 %47, i32* @ui, align 4
+ %48 = load i8* @uc, align 1
+ %49 = load i8* @sc, align 1
+ %50 = cmpxchg i8* @uc, i8 %48, i8 %49 monotonic
+ %51 = icmp eq i8 %50, %48
+ %52 = zext i1 %51 to i8
+ %53 = zext i8 %52 to i32
+ store i32 %53, i32* @ui, align 4
+ %54 = load i8* @uc, align 1
+ %55 = zext i8 %54 to i16
+ %56 = load i8* @sc, align 1
+ %57 = sext i8 %56 to i16
+ %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %59 = cmpxchg i16* %58, i16 %55, i16 %57 monotonic
+ %60 = icmp eq i16 %59, %55
+ %61 = zext i1 %60 to i8
+ %62 = zext i8 %61 to i32
+ store i32 %62, i32* @ui, align 4
+ %63 = load i8* @uc, align 1
+ %64 = zext i8 %63 to i16
+ %65 = load i8* @sc, align 1
+ %66 = sext i8 %65 to i16
+ %67 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %68 = cmpxchg i16* %67, i16 %64, i16 %66 monotonic
+ %69 = icmp eq i16 %68, %64
+ %70 = zext i1 %69 to i8
+ %71 = zext i8 %70 to i32
+ store i32 %71, i32* @ui, align 4
+ %72 = load i8* @uc, align 1
+ %73 = zext i8 %72 to i32
+ %74 = load i8* @sc, align 1
+ %75 = sext i8 %74 to i32
+ %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %77 = cmpxchg i32* %76, i32 %73, i32 %75 monotonic
+ %78 = icmp eq i32 %77, %73
+ %79 = zext i1 %78 to i8
+ %80 = zext i8 %79 to i32
+ store i32 %80, i32* @ui, align 4
+ %81 = load i8* @uc, align 1
+ %82 = zext i8 %81 to i32
+ %83 = load i8* @sc, align 1
+ %84 = sext i8 %83 to i32
+ %85 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %86 = cmpxchg i32* %85, i32 %82, i32 %84 monotonic
+ %87 = icmp eq i32 %86, %82
+ %88 = zext i1 %87 to i8
+ %89 = zext i8 %88 to i32
+ store i32 %89, i32* @ui, align 4
+ %90 = load i8* @uc, align 1
+ %91 = zext i8 %90 to i64
+ %92 = load i8* @sc, align 1
+ %93 = sext i8 %92 to i64
+ %94 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %95 = cmpxchg i64* %94, i64 %91, i64 %93 monotonic
+ %96 = icmp eq i64 %95, %91
+ %97 = zext i1 %96 to i8
+ %98 = zext i8 %97 to i32
+ store i32 %98, i32* @ui, align 4
+ %99 = load i8* @uc, align 1
+ %100 = zext i8 %99 to i64
+ %101 = load i8* @sc, align 1
+ %102 = sext i8 %101 to i64
+ %103 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %104 = cmpxchg i64* %103, i64 %100, i64 %102 monotonic
+ %105 = icmp eq i64 %104, %100
+ %106 = zext i1 %105 to i8
+ %107 = zext i8 %106 to i32
+ store i32 %107, i32* @ui, align 4
+ br label %return
+
+return: ; preds = %entry
+ ret void
}
-declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind
-
-declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind
-
-declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
-
-declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64*, i64, i64) nounwind
-
define void @test_lock() nounwind {
entry:
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1]
- call i64 @llvm.atomic.swap.i64.p0i64( i64* %10, i64 1 ) ; <i64>:11 [#uses=1]
- store i64 %11, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1]
- call i64 @llvm.atomic.swap.i64.p0i64( i64* %12, i64 1 ) ; <i64>:13 [#uses=1]
- store i64 %13, i64* @ul, align 8
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false )
- volatile store i8 0, i8* @sc, align 1
- volatile store i8 0, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:14 [#uses=1]
- volatile store i16 0, i16* %14, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:15 [#uses=1]
- volatile store i16 0, i16* %15, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:16 [#uses=1]
- volatile store i32 0, i32* %16, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:17 [#uses=1]
- volatile store i32 0, i32* %17, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:18 [#uses=1]
- volatile store i64 0, i64* %18, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:19 [#uses=1]
- volatile store i64 0, i64* %19, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:20 [#uses=1]
- volatile store i64 0, i64* %20, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:21 [#uses=1]
- volatile store i64 0, i64* %21, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
+ %0 = atomicrmw xchg i8* @sc, i8 1 monotonic
+ store i8 %0, i8* @sc, align 1
+ %1 = atomicrmw xchg i8* @uc, i8 1 monotonic
+ store i8 %1, i8* @uc, align 1
+ %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %3 = atomicrmw xchg i16* %2, i16 1 monotonic
+ store i16 %3, i16* @ss, align 2
+ %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %5 = atomicrmw xchg i16* %4, i16 1 monotonic
+ store i16 %5, i16* @us, align 2
+ %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %7 = atomicrmw xchg i32* %6, i32 1 monotonic
+ store i32 %7, i32* @si, align 4
+ %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %9 = atomicrmw xchg i32* %8, i32 1 monotonic
+ store i32 %9, i32* @ui, align 4
+ %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %11 = atomicrmw xchg i64* %10, i64 1 monotonic
+ store i64 %11, i64* @sl, align 8
+ %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %13 = atomicrmw xchg i64* %12, i64 1 monotonic
+ store i64 %13, i64* @ul, align 8
+ fence seq_cst
+ store volatile i8 0, i8* @sc, align 1
+ store volatile i8 0, i8* @uc, align 1
+ %14 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ store volatile i16 0, i16* %14, align 2
+ %15 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ store volatile i16 0, i16* %15, align 2
+ %16 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ store volatile i32 0, i32* %16, align 4
+ %17 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ store volatile i32 0, i32* %17, align 4
+ %18 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ store volatile i64 0, i64* %18, align 8
+ %19 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ store volatile i64 0, i64* %19, align 8
+ %20 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ store volatile i64 0, i64* %20, align 8
+ %21 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ store volatile i64 0, i64* %21, align 8
+ br label %return
+
+return: ; preds = %entry
+ ret void
}
-
-declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.swap.i64.p0i64(i64*, i64) nounwind
-
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
diff --git a/test/CodeGen/PowerPC/atomic-1.ll b/test/CodeGen/PowerPC/atomic-1.ll
index a2cf170..cbfa4094 100644
--- a/test/CodeGen/PowerPC/atomic-1.ll
+++ b/test/CodeGen/PowerPC/atomic-1.ll
@@ -1,23 +1,26 @@
-; RUN: llc < %s -march=ppc32 | grep lwarx | count 3
-; RUN: llc < %s -march=ppc32 | grep stwcx. | count 4
+; RUN: llc < %s -march=ppc32 | FileCheck %s
define i32 @exchange_and_add(i32* %mem, i32 %val) nounwind {
- %tmp = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %mem, i32 %val)
+; CHECK: exchange_and_add:
+; CHECK: lwarx
+ %tmp = atomicrmw add i32* %mem, i32 %val monotonic
+; CHECK: stwcx.
ret i32 %tmp
}
define i32 @exchange_and_cmp(i32* %mem) nounwind {
- %tmp = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %mem, i32 0, i32 1)
+; CHECK: exchange_and_cmp:
+; CHECK: lwarx
+ %tmp = cmpxchg i32* %mem, i32 0, i32 1 monotonic
+; CHECK: stwcx.
+; CHECK: stwcx.
ret i32 %tmp
}
define i32 @exchange(i32* %mem, i32 %val) nounwind {
- %tmp = call i32 @llvm.atomic.swap.i32.p0i32(i32* %mem, i32 1)
+; CHECK: exchange:
+; CHECK: lwarx
+ %tmp = atomicrmw xchg i32* %mem, i32 1 monotonic
+; CHECK: stwcx.
ret i32 %tmp
}
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32* nocapture, i32) nounwind
-
-declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* nocapture, i32, i32) nounwind
-
-declare i32 @llvm.atomic.swap.i32.p0i32(i32* nocapture, i32) nounwind
diff --git a/test/CodeGen/PowerPC/atomic-2.ll b/test/CodeGen/PowerPC/atomic-2.ll
index 0fa2a29..a427379 100644
--- a/test/CodeGen/PowerPC/atomic-2.ll
+++ b/test/CodeGen/PowerPC/atomic-2.ll
@@ -1,23 +1,26 @@
-; RUN: llc < %s -march=ppc64 | grep ldarx | count 3
-; RUN: llc < %s -march=ppc64 | grep stdcx. | count 4
+; RUN: llc < %s -march=ppc64 | FileCheck %s
define i64 @exchange_and_add(i64* %mem, i64 %val) nounwind {
- %tmp = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %mem, i64 %val)
+; CHECK: exchange_and_add:
+; CHECK: ldarx
+ %tmp = atomicrmw add i64* %mem, i64 %val monotonic
+; CHECK: stdcx.
ret i64 %tmp
}
define i64 @exchange_and_cmp(i64* %mem) nounwind {
- %tmp = call i64 @llvm.atomic.cmp.swap.i64.p0i64(i64* %mem, i64 0, i64 1)
+; CHECK: exchange_and_cmp:
+; CHECK: ldarx
+ %tmp = cmpxchg i64* %mem, i64 0, i64 1 monotonic
+; CHECK: stdcx.
+; CHECK: stdcx.
ret i64 %tmp
}
define i64 @exchange(i64* %mem, i64 %val) nounwind {
- %tmp = call i64 @llvm.atomic.swap.i64.p0i64(i64* %mem, i64 1)
+; CHECK: exchange:
+; CHECK: ldarx
+ %tmp = atomicrmw xchg i64* %mem, i64 1 monotonic
+; CHECK: stdcx.
ret i64 %tmp
}
-
-declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind
-
-declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64* nocapture, i64, i64) nounwind
-
-declare i64 @llvm.atomic.swap.i64.p0i64(i64* nocapture, i64) nounwind
diff --git a/test/CodeGen/PowerPC/cr1eq.ll b/test/CodeGen/PowerPC/cr1eq.ll
new file mode 100644
index 0000000..fb9c969
--- /dev/null
+++ b/test/CodeGen/PowerPC/cr1eq.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s | FileCheck %s
+; ModuleID = 'test.c'
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32"
+target triple = "powerpc-unknown-freebsd"
+
+@.str = private unnamed_addr constant [4 x i8] c"%i\0A\00", align 1
+@.str1 = private unnamed_addr constant [4 x i8] c"%f\0A\00", align 1
+
+define void @foo() nounwind {
+entry:
+; CHECK: crxor 6, 6, 6
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 1)
+; CHECK: creqv 6, 6, 6
+ %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str1, i32 0, i32 0), double 1.100000e+00)
+ ret void
+}
+
+declare i32 @printf(i8*, ...)
diff --git a/test/CodeGen/PowerPC/trampoline.ll b/test/CodeGen/PowerPC/trampoline.ll
index bc05bb1..91b2011 100644
--- a/test/CodeGen/PowerPC/trampoline.ll
+++ b/test/CodeGen/PowerPC/trampoline.ll
@@ -67,7 +67,8 @@ entry:
store %struct.NSBitmapImageRep* %4, %struct.NSBitmapImageRep** %3, align 4
%TRAMP.91 = bitcast %struct.__builtin_trampoline* %TRAMP.9 to i8* ; <i8*> [#uses=1]
%FRAME.72 = bitcast %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7 to i8* ; <i8*> [#uses=1]
- %tramp = call i8* @llvm.init.trampoline(i8* %TRAMP.91, i8* bitcast (void (%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*, %struct.__block_1*, %struct.CGImage*)* @__helper_1.1632 to i8*), i8* %FRAME.72) ; <i8*> [#uses=1]
+ call void @llvm.init.trampoline(i8* %TRAMP.91, i8* bitcast (void (%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*, %struct.__block_1*, %struct.CGImage*)* @__helper_1.1632 to i8*), i8* %FRAME.72) ; <i8*> [#uses=1]
+ %tramp = call i8* @llvm.adjust.trampoline(i8* %TRAMP.91)
store i8* %tramp, i8** %0, align 4
%5 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1 ; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
%6 = load i8** %0, align 4 ; <i8*> [#uses=1]
@@ -113,7 +114,8 @@ return: ; preds = %entry
ret %struct.objc_object* %retval5
}
-declare i8* @llvm.init.trampoline(i8*, i8*, i8*) nounwind
+declare void @llvm.init.trampoline(i8*, i8*, i8*) nounwind
+declare i8* @llvm.adjust.trampoline(i8*) nounwind
define internal void @__helper_1.1632(%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* nest %CHAIN.8, %struct.__block_1* %_self, %struct.CGImage* %cgImage) nounwind {
entry:
diff --git a/test/CodeGen/Thumb/2011-05-11-DAGLegalizer.ll b/test/CodeGen/Thumb/2011-05-11-DAGLegalizer.ll
index ed55bb5..2890c22 100644
--- a/test/CodeGen/Thumb/2011-05-11-DAGLegalizer.ll
+++ b/test/CodeGen/Thumb/2011-05-11-DAGLegalizer.ll
@@ -1,7 +1,11 @@
-; RUN: llc -mtriple=thumbv6-apple-darwin < %s
+; DISABLED: llc -mtriple=thumbv6-apple-darwin < %s
+; RUN: false
; rdar://problem/9416774
; ModuleID = 'reduced.ll'
+; byval is currently unsupported.
+; XFAIL: *
+
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
target triple = "thumbv7-apple-ios"
diff --git a/test/CodeGen/Thumb/barrier.ll b/test/CodeGen/Thumb/barrier.ll
index d39b50f..50d138f 100644
--- a/test/CodeGen/Thumb/barrier.ll
+++ b/test/CodeGen/Thumb/barrier.ll
@@ -2,24 +2,12 @@
; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=-db | FileCheck %s -check-prefix=V6
; RUN: llc < %s -march=thumb -mcpu=cortex-m0 | FileCheck %s -check-prefix=V6M
-declare void @llvm.memory.barrier(i1 , i1 , i1 , i1 , i1)
-
define void @t1() {
; V6: t1:
; V6: blx {{_*}}sync_synchronize
; V6M: t1:
-; V6M: dmb st
- call void @llvm.memory.barrier(i1 false, i1 false, i1 false, i1 true, i1 true)
- ret void
-}
-
-define void @t2() {
-; V6: t2:
-; V6: blx {{_*}}sync_synchronize
-
-; V6M: t2:
; V6M: dmb ish
- call void @llvm.memory.barrier(i1 true, i1 false, i1 false, i1 true, i1 false)
+ fence seq_cst
ret void
}
diff --git a/test/CodeGen/Thumb/iabs.ll b/test/CodeGen/Thumb/iabs.ll
index d7cdcd8..d03b5b2 100644
--- a/test/CodeGen/Thumb/iabs.ll
+++ b/test/CodeGen/Thumb/iabs.ll
@@ -3,9 +3,9 @@
;; Integer absolute value, should produce something as good as:
;; Thumb:
-;; asr r2, r0, #31
-;; add r0, r0, r2
-;; eor r0, r2
+;; movs r0, r0
+;; bpl
+;; rsb r0, r0, #0 (with opitmization, bpl + rsb is if-converted into rsbmi)
;; bx lr
define i32 @test(i32 %a) {
@@ -13,5 +13,10 @@ define i32 @test(i32 %a) {
%b = icmp sgt i32 %a, -1
%abs = select i1 %b, i32 %a, i32 %tmp1neg
ret i32 %abs
+; CHECK: movs r0, r0
+; CHECK: bpl
+; CHECK: rsb r0, r0, #0
+; CHECK: bx lr
}
+
diff --git a/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll b/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
index 8ca001c..034a28f 100644
--- a/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
+++ b/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
@@ -6,8 +6,8 @@ entry:
; -- The loop following the load should only use a single add-literation
; instruction.
; CHECK: ldr.64
-; CHECK: adds r{{[0-9]+}}, #1
-; CHECK-NOT: adds r{{[0-9]+}}, #1
+; CHECK: adds r{{[0-9]+.*}}#1
+; CHECK-NOT: adds
; CHECK: subsections_via_symbols
diff --git a/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll b/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
index f91e1c9..244d0bb 100644
--- a/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
+++ b/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
@@ -32,15 +32,15 @@
define fastcc i32 @parse_percent_token() nounwind {
entry:
-; CHECK: ittt eq
-; CHECK: ittt eq
-; CHECK: ittt eq
-; CHECK: ittt eq
-; CHECK: ittt eq
-; CHECK: moveq r0
-; CHECK-NOT: LBB0_
-; CHECK: ldreq
-; CHECK: popeq
+; CHECK: pop
+; CHECK: pop
+; CHECK: pop
+; CHECK: pop
+; CHECK: pop
+; CHECK: pop
+; CHECK: pop
+; Do not convert into single stream code. BranchProbability Analysis assumes
+; that branches which goes to "ret" intruction have lower probabilities.
switch i32 undef, label %bb7 [
i32 37, label %bb43
i32 48, label %bb5
diff --git a/test/CodeGen/Thumb2/machine-licm.ll b/test/CodeGen/Thumb2/machine-licm.ll
index b199d69..46937fc 100644
--- a/test/CodeGen/Thumb2/machine-licm.ll
+++ b/test/CodeGen/Thumb2/machine-licm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -relocation-model=dynamic-no-pic -disable-fp-elim | FileCheck %s
; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim | FileCheck %s --check-prefix=PIC
; rdar://7353541
; rdar://7354376
diff --git a/test/CodeGen/Thumb2/thumb2-barrier.ll b/test/CodeGen/Thumb2/thumb2-barrier.ll
deleted file mode 100644
index 93ae7c4..0000000
--- a/test/CodeGen/Thumb2/thumb2-barrier.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -march=thumb -mcpu=cortex-a8 | FileCheck %s
-
-declare void @llvm.memory.barrier(i1 , i1 , i1 , i1 , i1)
-
-define void @t_st() {
-; CHECK: t_st:
-; CHECK: dmb st
- call void @llvm.memory.barrier(i1 false, i1 false, i1 false, i1 true, i1 true)
- ret void
-}
-
-define void @t_sy() {
-; CHECK: t_sy:
-; CHECK: dmb sy
- call void @llvm.memory.barrier(i1 true, i1 false, i1 false, i1 true, i1 true)
- ret void
-}
-
-define void @t_ishst() {
-; CHECK: t_ishst:
-; CHECK: dmb ishst
- call void @llvm.memory.barrier(i1 false, i1 false, i1 false, i1 true, i1 false)
- ret void
-}
-
-define void @t_ish() {
-; CHECK: t_ish:
-; CHECK: dmb ish
- call void @llvm.memory.barrier(i1 true, i1 false, i1 false, i1 true, i1 false)
- ret void
-}
diff --git a/test/CodeGen/Thumb2/thumb2-bcc.ll b/test/CodeGen/Thumb2/thumb2-bcc.ll
index 70febc06..4a2d600 100644
--- a/test/CodeGen/Thumb2/thumb2-bcc.ll
+++ b/test/CodeGen/Thumb2/thumb2-bcc.ll
@@ -1,5 +1,4 @@
; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | not grep it
; If-conversion defeats the purpose of this test, which is to check CBZ
; generation, so use memory barrier instruction to make sure it doesn't
; happen and we get actual branches.
@@ -7,20 +6,18 @@
define i32 @t1(i32 %a, i32 %b, i32 %c) {
; CHECK: t1:
; CHECK: cbz
- %tmp2 = icmp eq i32 %a, 0
- br i1 %tmp2, label %cond_false, label %cond_true
+ %tmp2 = icmp eq i32 %a, 0
+ br i1 %tmp2, label %cond_false, label %cond_true
cond_true:
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false)
- %tmp5 = add i32 %b, 1
- %tmp6 = and i32 %tmp5, %c
- ret i32 %tmp6
+ fence seq_cst
+ %tmp5 = add i32 %b, 1
+ %tmp6 = and i32 %tmp5, %c
+ ret i32 %tmp6
cond_false:
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false)
- %tmp7 = add i32 %b, -1
- %tmp8 = xor i32 %tmp7, %c
- ret i32 %tmp8
+ fence seq_cst
+ %tmp7 = add i32 %b, -1
+ %tmp8 = xor i32 %tmp7, %c
+ ret i32 %tmp8
}
-
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
diff --git a/test/CodeGen/Thumb2/thumb2-branch.ll b/test/CodeGen/Thumb2/thumb2-branch.ll
index 4d9eda0..27d8e8f 100644
--- a/test/CodeGen/Thumb2/thumb2-branch.ll
+++ b/test/CodeGen/Thumb2/thumb2-branch.ll
@@ -11,12 +11,12 @@ entry:
br i1 %tmp, label %cond_true, label %return
cond_true: ; preds = %entry
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false)
+ fence seq_cst
store i32 0, i32* %v
ret i32 0
return: ; preds = %entry
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false)
+ fence seq_cst
ret i32 1
}
@@ -28,12 +28,12 @@ entry:
br i1 %tmp, label %cond_true, label %return
cond_true: ; preds = %entry
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false)
+ fence seq_cst
store i32 0, i32* %v
ret i32 0
return: ; preds = %entry
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false)
+ fence seq_cst
ret i32 1
}
@@ -45,12 +45,12 @@ entry:
br i1 %tmp, label %cond_true, label %return
cond_true: ; preds = %entry
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false)
+ fence seq_cst
store i32 0, i32* %v
ret i32 0
return: ; preds = %entry
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false)
+ fence seq_cst
ret i32 1
}
@@ -62,13 +62,11 @@ entry:
br i1 %tmp, label %return, label %cond_true
cond_true: ; preds = %entry
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false)
+ fence seq_cst
store i32 0, i32* %v
ret i32 0
return: ; preds = %entry
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false)
+ fence seq_cst
ret i32 1
}
-
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
diff --git a/test/CodeGen/Thumb2/thumb2-ifcvt1.ll b/test/CodeGen/Thumb2/thumb2-ifcvt1.ll
index a4035bb..af8fcc6 100644
--- a/test/CodeGen/Thumb2/thumb2-ifcvt1.ll
+++ b/test/CodeGen/Thumb2/thumb2-ifcvt1.ll
@@ -21,13 +21,13 @@ cond_next:
ret i32 %tmp15
}
-; FIXME: Check for # of unconditional branch after adding branch folding post ifcvt.
define i32 @t2(i32 %a, i32 %b) nounwind {
entry:
+; Do not if-convert when branches go to the different loops.
; CHECK: t2:
-; CHECK: ite gt
-; CHECK: subgt
-; CHECK: suble
+; CHECK-NOT: ite gt
+; CHECK-NOT: subgt
+; CHECK-NOT: suble
%tmp1434 = icmp eq i32 %a, %b ; <i1> [#uses=1]
br i1 %tmp1434, label %bb17, label %bb.outer
diff --git a/test/CodeGen/Thumb2/thumb2-ldm.ll b/test/CodeGen/Thumb2/thumb2-ldm.ll
index c5f7e84..4f2b7c1 100644
--- a/test/CodeGen/Thumb2/thumb2-ldm.ll
+++ b/test/CodeGen/Thumb2/thumb2-ldm.ll
@@ -15,7 +15,7 @@ define i32 @t1() {
define i32 @t2() {
; CHECK: t2:
; CHECK: push {r7, lr}
-; CHECK: ldmia
+; CHECK: ldm
; CHECK: pop {r7, pc}
%tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 2) ; <i32> [#uses=1]
%tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 3) ; <i32> [#uses=1]
diff --git a/test/CodeGen/Thumb2/thumb2-mls.ll b/test/CodeGen/Thumb2/thumb2-mls.ll
index fc9e6ba..24c45c5 100644
--- a/test/CodeGen/Thumb2/thumb2-mls.ll
+++ b/test/CodeGen/Thumb2/thumb2-mls.ll
@@ -15,5 +15,5 @@ define i32 @f2(i32 %a, i32 %b, i32 %c) {
ret i32 %tmp2
}
; CHECK: f2:
-; CHECK: muls r0, r1
+; CHECK: muls r0, r0, r1
diff --git a/test/CodeGen/Thumb2/thumb2-mul.ll b/test/CodeGen/Thumb2/thumb2-mul.ll
index 8d1de55..bb97d97 100644
--- a/test/CodeGen/Thumb2/thumb2-mul.ll
+++ b/test/CodeGen/Thumb2/thumb2-mul.ll
@@ -2,7 +2,7 @@
define i32 @f1(i32 %a, i32 %b, i32 %c) {
; CHECK: f1:
-; CHECK: muls r0, r1
+; CHECK: muls r0, r0, r1
%tmp = mul i32 %a, %b
ret i32 %tmp
}
diff --git a/test/CodeGen/Thumb2/thumb2-sxt-uxt.ll b/test/CodeGen/Thumb2/thumb2-sxt-uxt.ll
new file mode 100644
index 0000000..ab888e6
--- /dev/null
+++ b/test/CodeGen/Thumb2/thumb2-sxt-uxt.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s -march=thumb -mcpu=cortex-m3 | FileCheck %s
+
+define i32 @test1(i16 zeroext %z) nounwind {
+; CHECK: test1:
+; CHECK: sxth
+ %r = sext i16 %z to i32
+ ret i32 %r
+}
+
+define i32 @test2(i8 zeroext %z) nounwind {
+; CHECK: test2:
+; CHECK: sxtb
+ %r = sext i8 %z to i32
+ ret i32 %r
+}
+
+define i32 @test3(i16 signext %z) nounwind {
+; CHECK: test3:
+; CHECK: uxth
+ %r = zext i16 %z to i32
+ ret i32 %r
+}
+
+define i32 @test4(i8 signext %z) nounwind {
+; CHECK: test4:
+; CHECK: uxtb
+ %r = zext i8 %z to i32
+ ret i32 %r
+}
diff --git a/test/CodeGen/X86/2006-05-11-InstrSched.ll b/test/CodeGen/X86/2006-05-11-InstrSched.ll
index 56d6aa9..a871ea1 100644
--- a/test/CodeGen/X86/2006-05-11-InstrSched.ll
+++ b/test/CodeGen/X86/2006-05-11-InstrSched.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -stats -realign-stack=0 |&\
+; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -mattr=+sse2 -stats -realign-stack=0 |&\
; RUN: grep {asm-printer} | grep 34
target datalayout = "e-p:32:32"
diff --git a/test/CodeGen/X86/2006-07-19-ATTAsm.ll b/test/CodeGen/X86/2006-07-19-ATTAsm.ll
deleted file mode 100644
index c8fd10f..0000000
--- a/test/CodeGen/X86/2006-07-19-ATTAsm.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=att
-; PR834
-; END.
-
-target datalayout = "e-p:32:32"
-target triple = "i386-unknown-freebsd6.1"
- %llvm.dbg.anchor.type = type { i32, i32 }
- %llvm.dbg.basictype.type = type { i32, { }*, i8*, { }*, i32, i64, i64, i64, i32, i32 }
- %llvm.dbg.compile_unit.type = type { i32, { }*, i32, i8*, i8*, i8* }
- %llvm.dbg.global_variable.type = type { i32, { }*, { }*, i8*, i8 *, i8*, { }*, i32, { }*, i1, i1, { }* }
-@x = global i32 0 ; <i32*> [#uses=1]
-@llvm.dbg.global_variable = internal constant %llvm.dbg.global_variable.type {
- i32 327732,
- { }* bitcast (%llvm.dbg.anchor.type* @llvm.dbg.global_variables to { }*),
- { }* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to { }*),
- i8* getelementptr ([2 x i8]* @str, i64 0, i64 0),
- i8* getelementptr ([2 x i8]* @str, i64 0, i64 0),
- i8* null,
- { }* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to { }*),
- i32 1,
- { }* bitcast (%llvm.dbg.basictype.type* @llvm.dbg.basictype to { }*),
- i1 false,
- i1 true,
- { }* bitcast (i32* @x to { }*) }, section "llvm.metadata" ; <%llvm.dbg.global_variable.type*> [#uses=0]
-@llvm.dbg.global_variables = linkonce constant %llvm.dbg.anchor.type { i32 327680, i32 52 }, section "llvm.metadata" ; <%llvm.dbg.anchor.type*> [#uses=1]
-@llvm.dbg.compile_unit = internal constant %llvm.dbg.compile_unit.type {
- i32 327697,
- { }* bitcast (%llvm.dbg.anchor.type* @llvm.dbg.compile_units to { }*),
- i32 4,
- i8* getelementptr ([10 x i8]* @str1, i64 0, i64 0),
- i8* getelementptr ([32 x i8]* @str2, i64 0, i64 0),
- i8* getelementptr ([45 x i8]* @str3, i64 0, i64 0) }, section "llvm.metadata" ; <%llvm.dbg.compile_unit.type*> [#uses=1]
-@llvm.dbg.compile_units = linkonce constant %llvm.dbg.anchor.type { i32 327680, i32 17 }, section "llvm.metadata" ; <%llvm.dbg.anchor.type*> [#uses=1]
-@str1 = internal constant [10 x i8] c"testb.cpp\00", section "llvm.metadata" ; <[10 x i8]*> [#uses=1]
-@str2 = internal constant [32 x i8] c"/Sources/Projects/DwarfTesting/\00", section "llvm.metadata" ; <[32 x i8]*> [#uses=1]
-@str3 = internal constant [45 x i8] c"4.0.1 LLVM (Apple Computer, Inc. build 5400)\00", section "llvm.metadata" ; <[45 x i8]*> [#uses=1]
-@str = internal constant [2 x i8] c"x\00", section "llvm.metadata" ; <[2 x i8]*> [#uses=1]
-@llvm.dbg.basictype = internal constant %llvm.dbg.basictype.type {
- i32 327716,
- { }* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to { }*),
- i8* getelementptr ([4 x i8]* @str4, i64 0, i64 0),
- { }* null,
- i32 0,
- i64 32,
- i64 32,
- i64 0,
- i32 0,
- i32 5 }, section "llvm.metadata" ; <%llvm.dbg.basictype.type*> [#uses=1]
-@str4 = internal constant [4 x i8] c"int\00", section "llvm.metadata" ; <[4 x i8]*> [#uses=1]
diff --git a/test/CodeGen/X86/2007-05-07-InvokeSRet.ll b/test/CodeGen/X86/2007-05-07-InvokeSRet.ll
index 22e2750..deb3999 100644
--- a/test/CodeGen/X86/2007-05-07-InvokeSRet.ll
+++ b/test/CodeGen/X86/2007-05-07-InvokeSRet.ll
@@ -11,5 +11,9 @@ entry:
to label %return unwind label %return
return: ; preds = %entry, %entry
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
ret void
}
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll b/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll
index b040095..266fd7b 100644
--- a/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll
+++ b/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll
@@ -1,4 +1,12 @@
-; RUN: llc < %s -march=x86 | not grep pushf
+; RUN: llc < %s -march=x86 -mattr=+cmov | FileCheck %s
+;
+; Test scheduling a multi-use compare. We should neither spill flags
+; nor clone the compare.
+; CHECK: cmp
+; CHECK-NOT: pushf
+; CHECK: cmov
+; CHECK-NOT: cmp
+; CHECK: cmov
%struct.indexentry = type { i32, i8*, i8*, i8*, i8*, i8* }
diff --git a/test/CodeGen/X86/2008-04-17-CoalescerBug.ll b/test/CodeGen/X86/2008-04-17-CoalescerBug.ll
index 77720aa..859041e 100644
--- a/test/CodeGen/X86/2008-04-17-CoalescerBug.ll
+++ b/test/CodeGen/X86/2008-04-17-CoalescerBug.ll
@@ -151,8 +151,12 @@ bb7819: ; preds = %bb3314
bb7834: ; preds = %bb7806, %invcont5831
br label %bb3261
lpad: ; preds = %bb7806, %bb5968, %invcont5814, %bb440.i8663, %bb155.i8541, %bb5657, %bb3306
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
ret void
lpad8185: ; preds = %invcont5831
+ %exn8185 = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
ret void
}
@@ -169,3 +173,5 @@ declare %struct.wxStringBase* @_ZN12wxStringBase6appendEmw(%struct.wxStringBase*
declare %struct.wxStringBase* @_ZN12wxStringBaseaSEPKw(%struct.wxStringBase*, i32*)
declare void @_ZN8wxString6FormatEPKwz(%struct.wxString* noalias sret , i32*, ...)
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll b/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll
index 94c95d4..0d11546 100644
--- a/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll
+++ b/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll
@@ -23,8 +23,12 @@ tmp12.i.i.i.i.i.noexc65: ; preds = %bb37
unreachable
lpad243: ; preds = %bb37
- %eh_ptr244 = call i8* @llvm.eh.exception( ) ; <i8*> [#uses=1]
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
+ %eh_ptr244 = extractvalue { i8*, i32 } %exn, 0
store i32 (...)** getelementptr ([5 x i32 (...)*]* @_ZTVN10Evaluation10GridOutputILi3EEE, i32 0, i32 2), i32 (...)*** null, align 8
%tmp133 = call i8* @__cxa_begin_catch( i8* %eh_ptr244 ) nounwind ; <i8*> [#uses=0]
unreachable
}
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/2008-08-19-SubAndFetch.ll b/test/CodeGen/X86/2008-08-19-SubAndFetch.ll
index 8475e8d..360ec73 100644
--- a/test/CodeGen/X86/2008-08-19-SubAndFetch.ll
+++ b/test/CodeGen/X86/2008-08-19-SubAndFetch.ll
@@ -7,8 +7,6 @@ entry:
; CHECK: main:
; CHECK: lock
; CHECK: decq
- tail call i64 @llvm.atomic.load.sub.i64.p0i64( i64* @var, i64 1 ) ; <i64>:0 [#uses=0]
+ atomicrmw sub i64* @var, i64 1 monotonic
unreachable
}
-
-declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind
diff --git a/test/CodeGen/X86/2008-09-18-inline-asm-2.ll b/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
index dfd165c..511c7b5 100644
--- a/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
+++ b/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
@@ -18,7 +18,7 @@
; CHECK-NOT: [[A3]]
; CHECK: 5th=[[A5:%...]]
; CHECK-NOT: [[A1]]
-; CHECK-NOT; [[A5]]
+; CHECK-NOT: [[A5]]
; CHECK: =4th
; The 6th operand is an 8-bit register, and it mustn't alias the 1st and 5th.
diff --git a/test/CodeGen/X86/2008-10-02-Atomics32-2.ll b/test/CodeGen/X86/2008-10-02-Atomics32-2.ll
deleted file mode 100644
index b48c4ad..0000000
--- a/test/CodeGen/X86/2008-10-02-Atomics32-2.ll
+++ /dev/null
@@ -1,969 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-;; This version includes 64-bit version of binary operators (in 32-bit mode).
-;; Swap, cmp-and-swap not supported yet in this mode.
-; ModuleID = 'Atomics.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
-@sc = common global i8 0 ; <i8*> [#uses=52]
-@uc = common global i8 0 ; <i8*> [#uses=112]
-@ss = common global i16 0 ; <i16*> [#uses=15]
-@us = common global i16 0 ; <i16*> [#uses=15]
-@si = common global i32 0 ; <i32*> [#uses=15]
-@ui = common global i32 0 ; <i32*> [#uses=23]
-@sl = common global i32 0 ; <i32*> [#uses=15]
-@ul = common global i32 0 ; <i32*> [#uses=15]
-@sll = common global i64 0, align 8 ; <i64*> [#uses=13]
-@ull = common global i64 0, align 8 ; <i64*> [#uses=13]
-
-define void @test_op_ignore() nounwind {
-entry:
- %0 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0]
- %1 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0]
- %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %3 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %2, i16 1) ; <i16> [#uses=0]
- %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %5 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %4, i16 1) ; <i16> [#uses=0]
- %6 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %7 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %6, i32 1) ; <i32> [#uses=0]
- %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %9 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %8, i32 1) ; <i32> [#uses=0]
- %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %11 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %10, i32 1) ; <i32> [#uses=0]
- %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %13 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %12, i32 1) ; <i32> [#uses=0]
- %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %15 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %14, i64 1) ; <i64> [#uses=0]
- %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %17 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %16, i64 1) ; <i64> [#uses=0]
- %18 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0]
- %19 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0]
- %20 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %21 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %20, i16 1) ; <i16> [#uses=0]
- %22 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %23 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %22, i16 1) ; <i16> [#uses=0]
- %24 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %25 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %24, i32 1) ; <i32> [#uses=0]
- %26 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %27 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %26, i32 1) ; <i32> [#uses=0]
- %28 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %29 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %28, i32 1) ; <i32> [#uses=0]
- %30 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %31 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %30, i32 1) ; <i32> [#uses=0]
- %32 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %33 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %32, i64 1) ; <i64> [#uses=0]
- %34 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %35 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %34, i64 1) ; <i64> [#uses=0]
- %36 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0]
- %37 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0]
- %38 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %39 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %38, i16 1) ; <i16> [#uses=0]
- %40 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %41 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %40, i16 1) ; <i16> [#uses=0]
- %42 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %43 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %42, i32 1) ; <i32> [#uses=0]
- %44 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %45 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %44, i32 1) ; <i32> [#uses=0]
- %46 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %47 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %46, i32 1) ; <i32> [#uses=0]
- %48 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %49 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %48, i32 1) ; <i32> [#uses=0]
- %50 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %51 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %50, i64 1) ; <i64> [#uses=0]
- %52 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %53 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %52, i64 1) ; <i64> [#uses=0]
- %54 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0]
- %55 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0]
- %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %57 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %56, i16 1) ; <i16> [#uses=0]
- %58 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %59 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %58, i16 1) ; <i16> [#uses=0]
- %60 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %61 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %60, i32 1) ; <i32> [#uses=0]
- %62 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %63 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %62, i32 1) ; <i32> [#uses=0]
- %64 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %65 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %64, i32 1) ; <i32> [#uses=0]
- %66 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %67 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %66, i32 1) ; <i32> [#uses=0]
- %68 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %69 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %68, i64 1) ; <i64> [#uses=0]
- %70 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %71 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %70, i64 1) ; <i64> [#uses=0]
- %72 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0]
- %73 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0]
- %74 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %75 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %74, i16 1) ; <i16> [#uses=0]
- %76 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %77 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %76, i16 1) ; <i16> [#uses=0]
- %78 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %79 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %78, i32 1) ; <i32> [#uses=0]
- %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %81 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %80, i32 1) ; <i32> [#uses=0]
- %82 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %83 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %82, i32 1) ; <i32> [#uses=0]
- %84 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %85 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %84, i32 1) ; <i32> [#uses=0]
- %86 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %87 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %86, i64 1) ; <i64> [#uses=0]
- %88 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %89 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %88, i64 1) ; <i64> [#uses=0]
- %90 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0]
- %91 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0]
- %92 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %93 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %92, i16 1) ; <i16> [#uses=0]
- %94 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %95 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %94, i16 1) ; <i16> [#uses=0]
- %96 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %97 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %96, i32 1) ; <i32> [#uses=0]
- %98 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %99 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %98, i32 1) ; <i32> [#uses=0]
- %100 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %101 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %100, i32 1) ; <i32> [#uses=0]
- %102 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %103 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %102, i32 1) ; <i32> [#uses=0]
- %104 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %105 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %104, i64 1) ; <i64> [#uses=0]
- %106 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %107 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %106, i64 1) ; <i64> [#uses=0]
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.add.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.or.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.xor.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.and.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.nand.i64.p0i64(i64*, i64) nounwind
-
-define void @test_fetch_and_op() nounwind {
-entry:
- %0 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1]
- store i8 %0, i8* @sc, align 1
- %1 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1]
- store i8 %1, i8* @uc, align 1
- %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %3 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %2, i16 11) ; <i16> [#uses=1]
- store i16 %3, i16* @ss, align 2
- %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %5 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %4, i16 11) ; <i16> [#uses=1]
- store i16 %5, i16* @us, align 2
- %6 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %7 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %6, i32 11) ; <i32> [#uses=1]
- store i32 %7, i32* @si, align 4
- %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %9 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %8, i32 11) ; <i32> [#uses=1]
- store i32 %9, i32* @ui, align 4
- %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %11 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %10, i32 11) ; <i32> [#uses=1]
- store i32 %11, i32* @sl, align 4
- %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %13 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %12, i32 11) ; <i32> [#uses=1]
- store i32 %13, i32* @ul, align 4
- %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %15 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %14, i64 11) ; <i64> [#uses=1]
- store i64 %15, i64* @sll, align 8
- %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %17 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %16, i64 11) ; <i64> [#uses=1]
- store i64 %17, i64* @ull, align 8
- %18 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1]
- store i8 %18, i8* @sc, align 1
- %19 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1]
- store i8 %19, i8* @uc, align 1
- %20 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %21 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %20, i16 11) ; <i16> [#uses=1]
- store i16 %21, i16* @ss, align 2
- %22 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %23 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %22, i16 11) ; <i16> [#uses=1]
- store i16 %23, i16* @us, align 2
- %24 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %25 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %24, i32 11) ; <i32> [#uses=1]
- store i32 %25, i32* @si, align 4
- %26 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %27 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %26, i32 11) ; <i32> [#uses=1]
- store i32 %27, i32* @ui, align 4
- %28 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %29 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %28, i32 11) ; <i32> [#uses=1]
- store i32 %29, i32* @sl, align 4
- %30 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %31 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %30, i32 11) ; <i32> [#uses=1]
- store i32 %31, i32* @ul, align 4
- %32 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %33 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %32, i64 11) ; <i64> [#uses=1]
- store i64 %33, i64* @sll, align 8
- %34 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %35 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %34, i64 11) ; <i64> [#uses=1]
- store i64 %35, i64* @ull, align 8
- %36 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1]
- store i8 %36, i8* @sc, align 1
- %37 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1]
- store i8 %37, i8* @uc, align 1
- %38 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %39 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %38, i16 11) ; <i16> [#uses=1]
- store i16 %39, i16* @ss, align 2
- %40 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %41 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %40, i16 11) ; <i16> [#uses=1]
- store i16 %41, i16* @us, align 2
- %42 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %43 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %42, i32 11) ; <i32> [#uses=1]
- store i32 %43, i32* @si, align 4
- %44 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %45 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %44, i32 11) ; <i32> [#uses=1]
- store i32 %45, i32* @ui, align 4
- %46 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %47 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %46, i32 11) ; <i32> [#uses=1]
- store i32 %47, i32* @sl, align 4
- %48 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %49 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %48, i32 11) ; <i32> [#uses=1]
- store i32 %49, i32* @ul, align 4
- %50 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %51 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %50, i64 11) ; <i64> [#uses=1]
- store i64 %51, i64* @sll, align 8
- %52 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %53 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %52, i64 11) ; <i64> [#uses=1]
- store i64 %53, i64* @ull, align 8
- %54 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1]
- store i8 %54, i8* @sc, align 1
- %55 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1]
- store i8 %55, i8* @uc, align 1
- %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %57 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %56, i16 11) ; <i16> [#uses=1]
- store i16 %57, i16* @ss, align 2
- %58 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %59 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %58, i16 11) ; <i16> [#uses=1]
- store i16 %59, i16* @us, align 2
- %60 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %61 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %60, i32 11) ; <i32> [#uses=1]
- store i32 %61, i32* @si, align 4
- %62 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %63 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %62, i32 11) ; <i32> [#uses=1]
- store i32 %63, i32* @ui, align 4
- %64 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %65 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %64, i32 11) ; <i32> [#uses=1]
- store i32 %65, i32* @sl, align 4
- %66 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %67 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %66, i32 11) ; <i32> [#uses=1]
- store i32 %67, i32* @ul, align 4
- %68 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %69 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %68, i64 11) ; <i64> [#uses=1]
- store i64 %69, i64* @sll, align 8
- %70 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %71 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %70, i64 11) ; <i64> [#uses=1]
- store i64 %71, i64* @ull, align 8
- %72 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1]
- store i8 %72, i8* @sc, align 1
- %73 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1]
- store i8 %73, i8* @uc, align 1
- %74 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %75 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %74, i16 11) ; <i16> [#uses=1]
- store i16 %75, i16* @ss, align 2
- %76 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %77 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %76, i16 11) ; <i16> [#uses=1]
- store i16 %77, i16* @us, align 2
- %78 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %79 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %78, i32 11) ; <i32> [#uses=1]
- store i32 %79, i32* @si, align 4
- %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %81 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %80, i32 11) ; <i32> [#uses=1]
- store i32 %81, i32* @ui, align 4
- %82 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %83 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %82, i32 11) ; <i32> [#uses=1]
- store i32 %83, i32* @sl, align 4
- %84 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %85 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %84, i32 11) ; <i32> [#uses=1]
- store i32 %85, i32* @ul, align 4
- %86 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %87 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %86, i64 11) ; <i64> [#uses=1]
- store i64 %87, i64* @sll, align 8
- %88 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %89 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %88, i64 11) ; <i64> [#uses=1]
- store i64 %89, i64* @ull, align 8
- %90 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1]
- store i8 %90, i8* @sc, align 1
- %91 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1]
- store i8 %91, i8* @uc, align 1
- %92 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %93 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %92, i16 11) ; <i16> [#uses=1]
- store i16 %93, i16* @ss, align 2
- %94 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %95 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %94, i16 11) ; <i16> [#uses=1]
- store i16 %95, i16* @us, align 2
- %96 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %97 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %96, i32 11) ; <i32> [#uses=1]
- store i32 %97, i32* @si, align 4
- %98 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %99 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %98, i32 11) ; <i32> [#uses=1]
- store i32 %99, i32* @ui, align 4
- %100 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %101 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %100, i32 11) ; <i32> [#uses=1]
- store i32 %101, i32* @sl, align 4
- %102 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %103 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %102, i32 11) ; <i32> [#uses=1]
- store i32 %103, i32* @ul, align 4
- %104 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %105 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %104, i64 11) ; <i64> [#uses=1]
- store i64 %105, i64* @sll, align 8
- %106 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %107 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %106, i64 11) ; <i64> [#uses=1]
- store i64 %107, i64* @ull, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define void @test_op_and_fetch() nounwind {
-entry:
- %0 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %1 = zext i8 %0 to i32 ; <i32> [#uses=1]
- %2 = trunc i32 %1 to i8 ; <i8> [#uses=2]
- %3 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @sc, i8 %2) ; <i8> [#uses=1]
- %4 = add i8 %3, %2 ; <i8> [#uses=1]
- store i8 %4, i8* @sc, align 1
- %5 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %6 = zext i8 %5 to i32 ; <i32> [#uses=1]
- %7 = trunc i32 %6 to i8 ; <i8> [#uses=2]
- %8 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @uc, i8 %7) ; <i8> [#uses=1]
- %9 = add i8 %8, %7 ; <i8> [#uses=1]
- store i8 %9, i8* @uc, align 1
- %10 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %11 = zext i8 %10 to i32 ; <i32> [#uses=1]
- %12 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %13 = trunc i32 %11 to i16 ; <i16> [#uses=2]
- %14 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %12, i16 %13) ; <i16> [#uses=1]
- %15 = add i16 %14, %13 ; <i16> [#uses=1]
- store i16 %15, i16* @ss, align 2
- %16 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %17 = zext i8 %16 to i32 ; <i32> [#uses=1]
- %18 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %19 = trunc i32 %17 to i16 ; <i16> [#uses=2]
- %20 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %18, i16 %19) ; <i16> [#uses=1]
- %21 = add i16 %20, %19 ; <i16> [#uses=1]
- store i16 %21, i16* @us, align 2
- %22 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %23 = zext i8 %22 to i32 ; <i32> [#uses=2]
- %24 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %25 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %24, i32 %23) ; <i32> [#uses=1]
- %26 = add i32 %25, %23 ; <i32> [#uses=1]
- store i32 %26, i32* @si, align 4
- %27 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %28 = zext i8 %27 to i32 ; <i32> [#uses=2]
- %29 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %30 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %29, i32 %28) ; <i32> [#uses=1]
- %31 = add i32 %30, %28 ; <i32> [#uses=1]
- store i32 %31, i32* @ui, align 4
- %32 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %33 = zext i8 %32 to i32 ; <i32> [#uses=2]
- %34 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %35 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %34, i32 %33) ; <i32> [#uses=1]
- %36 = add i32 %35, %33 ; <i32> [#uses=1]
- store i32 %36, i32* @sl, align 4
- %37 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %38 = zext i8 %37 to i32 ; <i32> [#uses=2]
- %39 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %40 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %39, i32 %38) ; <i32> [#uses=1]
- %41 = add i32 %40, %38 ; <i32> [#uses=1]
- store i32 %41, i32* @ul, align 4
- %42 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %43 = zext i8 %42 to i64 ; <i64> [#uses=2]
- %44 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %45 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %44, i64 %43) ; <i64> [#uses=1]
- %46 = add i64 %45, %43 ; <i64> [#uses=1]
- store i64 %46, i64* @sll, align 8
- %47 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %48 = zext i8 %47 to i64 ; <i64> [#uses=2]
- %49 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %50 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %49, i64 %48) ; <i64> [#uses=1]
- %51 = add i64 %50, %48 ; <i64> [#uses=1]
- store i64 %51, i64* @ull, align 8
- %52 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %53 = zext i8 %52 to i32 ; <i32> [#uses=1]
- %54 = trunc i32 %53 to i8 ; <i8> [#uses=2]
- %55 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @sc, i8 %54) ; <i8> [#uses=1]
- %56 = sub i8 %55, %54 ; <i8> [#uses=1]
- store i8 %56, i8* @sc, align 1
- %57 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %58 = zext i8 %57 to i32 ; <i32> [#uses=1]
- %59 = trunc i32 %58 to i8 ; <i8> [#uses=2]
- %60 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @uc, i8 %59) ; <i8> [#uses=1]
- %61 = sub i8 %60, %59 ; <i8> [#uses=1]
- store i8 %61, i8* @uc, align 1
- %62 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %63 = zext i8 %62 to i32 ; <i32> [#uses=1]
- %64 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %65 = trunc i32 %63 to i16 ; <i16> [#uses=2]
- %66 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %64, i16 %65) ; <i16> [#uses=1]
- %67 = sub i16 %66, %65 ; <i16> [#uses=1]
- store i16 %67, i16* @ss, align 2
- %68 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %69 = zext i8 %68 to i32 ; <i32> [#uses=1]
- %70 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %71 = trunc i32 %69 to i16 ; <i16> [#uses=2]
- %72 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %70, i16 %71) ; <i16> [#uses=1]
- %73 = sub i16 %72, %71 ; <i16> [#uses=1]
- store i16 %73, i16* @us, align 2
- %74 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %75 = zext i8 %74 to i32 ; <i32> [#uses=2]
- %76 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %77 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %76, i32 %75) ; <i32> [#uses=1]
- %78 = sub i32 %77, %75 ; <i32> [#uses=1]
- store i32 %78, i32* @si, align 4
- %79 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %80 = zext i8 %79 to i32 ; <i32> [#uses=2]
- %81 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %82 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %81, i32 %80) ; <i32> [#uses=1]
- %83 = sub i32 %82, %80 ; <i32> [#uses=1]
- store i32 %83, i32* @ui, align 4
- %84 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %85 = zext i8 %84 to i32 ; <i32> [#uses=2]
- %86 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %87 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %86, i32 %85) ; <i32> [#uses=1]
- %88 = sub i32 %87, %85 ; <i32> [#uses=1]
- store i32 %88, i32* @sl, align 4
- %89 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %90 = zext i8 %89 to i32 ; <i32> [#uses=2]
- %91 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %92 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %91, i32 %90) ; <i32> [#uses=1]
- %93 = sub i32 %92, %90 ; <i32> [#uses=1]
- store i32 %93, i32* @ul, align 4
- %94 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %95 = zext i8 %94 to i64 ; <i64> [#uses=2]
- %96 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %97 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %96, i64 %95) ; <i64> [#uses=1]
- %98 = sub i64 %97, %95 ; <i64> [#uses=1]
- store i64 %98, i64* @sll, align 8
- %99 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %100 = zext i8 %99 to i64 ; <i64> [#uses=2]
- %101 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %102 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %101, i64 %100) ; <i64> [#uses=1]
- %103 = sub i64 %102, %100 ; <i64> [#uses=1]
- store i64 %103, i64* @ull, align 8
- %104 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %105 = zext i8 %104 to i32 ; <i32> [#uses=1]
- %106 = trunc i32 %105 to i8 ; <i8> [#uses=2]
- %107 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @sc, i8 %106) ; <i8> [#uses=1]
- %108 = or i8 %107, %106 ; <i8> [#uses=1]
- store i8 %108, i8* @sc, align 1
- %109 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %110 = zext i8 %109 to i32 ; <i32> [#uses=1]
- %111 = trunc i32 %110 to i8 ; <i8> [#uses=2]
- %112 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @uc, i8 %111) ; <i8> [#uses=1]
- %113 = or i8 %112, %111 ; <i8> [#uses=1]
- store i8 %113, i8* @uc, align 1
- %114 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %115 = zext i8 %114 to i32 ; <i32> [#uses=1]
- %116 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %117 = trunc i32 %115 to i16 ; <i16> [#uses=2]
- %118 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %116, i16 %117) ; <i16> [#uses=1]
- %119 = or i16 %118, %117 ; <i16> [#uses=1]
- store i16 %119, i16* @ss, align 2
- %120 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %121 = zext i8 %120 to i32 ; <i32> [#uses=1]
- %122 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %123 = trunc i32 %121 to i16 ; <i16> [#uses=2]
- %124 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %122, i16 %123) ; <i16> [#uses=1]
- %125 = or i16 %124, %123 ; <i16> [#uses=1]
- store i16 %125, i16* @us, align 2
- %126 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %127 = zext i8 %126 to i32 ; <i32> [#uses=2]
- %128 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %129 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %128, i32 %127) ; <i32> [#uses=1]
- %130 = or i32 %129, %127 ; <i32> [#uses=1]
- store i32 %130, i32* @si, align 4
- %131 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %132 = zext i8 %131 to i32 ; <i32> [#uses=2]
- %133 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %134 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %133, i32 %132) ; <i32> [#uses=1]
- %135 = or i32 %134, %132 ; <i32> [#uses=1]
- store i32 %135, i32* @ui, align 4
- %136 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %137 = zext i8 %136 to i32 ; <i32> [#uses=2]
- %138 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %139 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %138, i32 %137) ; <i32> [#uses=1]
- %140 = or i32 %139, %137 ; <i32> [#uses=1]
- store i32 %140, i32* @sl, align 4
- %141 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %142 = zext i8 %141 to i32 ; <i32> [#uses=2]
- %143 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %144 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %143, i32 %142) ; <i32> [#uses=1]
- %145 = or i32 %144, %142 ; <i32> [#uses=1]
- store i32 %145, i32* @ul, align 4
- %146 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %147 = zext i8 %146 to i64 ; <i64> [#uses=2]
- %148 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %149 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %148, i64 %147) ; <i64> [#uses=1]
- %150 = or i64 %149, %147 ; <i64> [#uses=1]
- store i64 %150, i64* @sll, align 8
- %151 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %152 = zext i8 %151 to i64 ; <i64> [#uses=2]
- %153 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %154 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %153, i64 %152) ; <i64> [#uses=1]
- %155 = or i64 %154, %152 ; <i64> [#uses=1]
- store i64 %155, i64* @ull, align 8
- %156 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %157 = zext i8 %156 to i32 ; <i32> [#uses=1]
- %158 = trunc i32 %157 to i8 ; <i8> [#uses=2]
- %159 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @sc, i8 %158) ; <i8> [#uses=1]
- %160 = xor i8 %159, %158 ; <i8> [#uses=1]
- store i8 %160, i8* @sc, align 1
- %161 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %162 = zext i8 %161 to i32 ; <i32> [#uses=1]
- %163 = trunc i32 %162 to i8 ; <i8> [#uses=2]
- %164 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @uc, i8 %163) ; <i8> [#uses=1]
- %165 = xor i8 %164, %163 ; <i8> [#uses=1]
- store i8 %165, i8* @uc, align 1
- %166 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %167 = zext i8 %166 to i32 ; <i32> [#uses=1]
- %168 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %169 = trunc i32 %167 to i16 ; <i16> [#uses=2]
- %170 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %168, i16 %169) ; <i16> [#uses=1]
- %171 = xor i16 %170, %169 ; <i16> [#uses=1]
- store i16 %171, i16* @ss, align 2
- %172 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %173 = zext i8 %172 to i32 ; <i32> [#uses=1]
- %174 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %175 = trunc i32 %173 to i16 ; <i16> [#uses=2]
- %176 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %174, i16 %175) ; <i16> [#uses=1]
- %177 = xor i16 %176, %175 ; <i16> [#uses=1]
- store i16 %177, i16* @us, align 2
- %178 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %179 = zext i8 %178 to i32 ; <i32> [#uses=2]
- %180 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %181 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %180, i32 %179) ; <i32> [#uses=1]
- %182 = xor i32 %181, %179 ; <i32> [#uses=1]
- store i32 %182, i32* @si, align 4
- %183 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %184 = zext i8 %183 to i32 ; <i32> [#uses=2]
- %185 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %186 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %185, i32 %184) ; <i32> [#uses=1]
- %187 = xor i32 %186, %184 ; <i32> [#uses=1]
- store i32 %187, i32* @ui, align 4
- %188 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %189 = zext i8 %188 to i32 ; <i32> [#uses=2]
- %190 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %191 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %190, i32 %189) ; <i32> [#uses=1]
- %192 = xor i32 %191, %189 ; <i32> [#uses=1]
- store i32 %192, i32* @sl, align 4
- %193 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %194 = zext i8 %193 to i32 ; <i32> [#uses=2]
- %195 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %196 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %195, i32 %194) ; <i32> [#uses=1]
- %197 = xor i32 %196, %194 ; <i32> [#uses=1]
- store i32 %197, i32* @ul, align 4
- %198 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %199 = zext i8 %198 to i64 ; <i64> [#uses=2]
- %200 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %201 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %200, i64 %199) ; <i64> [#uses=1]
- %202 = xor i64 %201, %199 ; <i64> [#uses=1]
- store i64 %202, i64* @sll, align 8
- %203 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %204 = zext i8 %203 to i64 ; <i64> [#uses=2]
- %205 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %206 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %205, i64 %204) ; <i64> [#uses=1]
- %207 = xor i64 %206, %204 ; <i64> [#uses=1]
- store i64 %207, i64* @ull, align 8
- %208 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %209 = zext i8 %208 to i32 ; <i32> [#uses=1]
- %210 = trunc i32 %209 to i8 ; <i8> [#uses=2]
- %211 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @sc, i8 %210) ; <i8> [#uses=1]
- %212 = and i8 %211, %210 ; <i8> [#uses=1]
- store i8 %212, i8* @sc, align 1
- %213 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %214 = zext i8 %213 to i32 ; <i32> [#uses=1]
- %215 = trunc i32 %214 to i8 ; <i8> [#uses=2]
- %216 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @uc, i8 %215) ; <i8> [#uses=1]
- %217 = and i8 %216, %215 ; <i8> [#uses=1]
- store i8 %217, i8* @uc, align 1
- %218 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %219 = zext i8 %218 to i32 ; <i32> [#uses=1]
- %220 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %221 = trunc i32 %219 to i16 ; <i16> [#uses=2]
- %222 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %220, i16 %221) ; <i16> [#uses=1]
- %223 = and i16 %222, %221 ; <i16> [#uses=1]
- store i16 %223, i16* @ss, align 2
- %224 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %225 = zext i8 %224 to i32 ; <i32> [#uses=1]
- %226 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %227 = trunc i32 %225 to i16 ; <i16> [#uses=2]
- %228 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %226, i16 %227) ; <i16> [#uses=1]
- %229 = and i16 %228, %227 ; <i16> [#uses=1]
- store i16 %229, i16* @us, align 2
- %230 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %231 = zext i8 %230 to i32 ; <i32> [#uses=2]
- %232 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %233 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %232, i32 %231) ; <i32> [#uses=1]
- %234 = and i32 %233, %231 ; <i32> [#uses=1]
- store i32 %234, i32* @si, align 4
- %235 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %236 = zext i8 %235 to i32 ; <i32> [#uses=2]
- %237 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %238 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %237, i32 %236) ; <i32> [#uses=1]
- %239 = and i32 %238, %236 ; <i32> [#uses=1]
- store i32 %239, i32* @ui, align 4
- %240 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %241 = zext i8 %240 to i32 ; <i32> [#uses=2]
- %242 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %243 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %242, i32 %241) ; <i32> [#uses=1]
- %244 = and i32 %243, %241 ; <i32> [#uses=1]
- store i32 %244, i32* @sl, align 4
- %245 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %246 = zext i8 %245 to i32 ; <i32> [#uses=2]
- %247 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %248 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %247, i32 %246) ; <i32> [#uses=1]
- %249 = and i32 %248, %246 ; <i32> [#uses=1]
- store i32 %249, i32* @ul, align 4
- %250 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %251 = zext i8 %250 to i64 ; <i64> [#uses=2]
- %252 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %253 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %252, i64 %251) ; <i64> [#uses=1]
- %254 = and i64 %253, %251 ; <i64> [#uses=1]
- store i64 %254, i64* @sll, align 8
- %255 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %256 = zext i8 %255 to i64 ; <i64> [#uses=2]
- %257 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %258 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %257, i64 %256) ; <i64> [#uses=1]
- %259 = and i64 %258, %256 ; <i64> [#uses=1]
- store i64 %259, i64* @ull, align 8
- %260 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %261 = zext i8 %260 to i32 ; <i32> [#uses=1]
- %262 = trunc i32 %261 to i8 ; <i8> [#uses=2]
- %263 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @sc, i8 %262) ; <i8> [#uses=1]
- %264 = xor i8 %263, -1 ; <i8> [#uses=1]
- %265 = and i8 %264, %262 ; <i8> [#uses=1]
- store i8 %265, i8* @sc, align 1
- %266 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %267 = zext i8 %266 to i32 ; <i32> [#uses=1]
- %268 = trunc i32 %267 to i8 ; <i8> [#uses=2]
- %269 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @uc, i8 %268) ; <i8> [#uses=1]
- %270 = xor i8 %269, -1 ; <i8> [#uses=1]
- %271 = and i8 %270, %268 ; <i8> [#uses=1]
- store i8 %271, i8* @uc, align 1
- %272 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %273 = zext i8 %272 to i32 ; <i32> [#uses=1]
- %274 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %275 = trunc i32 %273 to i16 ; <i16> [#uses=2]
- %276 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %274, i16 %275) ; <i16> [#uses=1]
- %277 = xor i16 %276, -1 ; <i16> [#uses=1]
- %278 = and i16 %277, %275 ; <i16> [#uses=1]
- store i16 %278, i16* @ss, align 2
- %279 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %280 = zext i8 %279 to i32 ; <i32> [#uses=1]
- %281 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %282 = trunc i32 %280 to i16 ; <i16> [#uses=2]
- %283 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %281, i16 %282) ; <i16> [#uses=1]
- %284 = xor i16 %283, -1 ; <i16> [#uses=1]
- %285 = and i16 %284, %282 ; <i16> [#uses=1]
- store i16 %285, i16* @us, align 2
- %286 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %287 = zext i8 %286 to i32 ; <i32> [#uses=2]
- %288 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %289 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %288, i32 %287) ; <i32> [#uses=1]
- %290 = xor i32 %289, -1 ; <i32> [#uses=1]
- %291 = and i32 %290, %287 ; <i32> [#uses=1]
- store i32 %291, i32* @si, align 4
- %292 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %293 = zext i8 %292 to i32 ; <i32> [#uses=2]
- %294 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %295 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %294, i32 %293) ; <i32> [#uses=1]
- %296 = xor i32 %295, -1 ; <i32> [#uses=1]
- %297 = and i32 %296, %293 ; <i32> [#uses=1]
- store i32 %297, i32* @ui, align 4
- %298 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %299 = zext i8 %298 to i32 ; <i32> [#uses=2]
- %300 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %301 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %300, i32 %299) ; <i32> [#uses=1]
- %302 = xor i32 %301, -1 ; <i32> [#uses=1]
- %303 = and i32 %302, %299 ; <i32> [#uses=1]
- store i32 %303, i32* @sl, align 4
- %304 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %305 = zext i8 %304 to i32 ; <i32> [#uses=2]
- %306 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %307 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %306, i32 %305) ; <i32> [#uses=1]
- %308 = xor i32 %307, -1 ; <i32> [#uses=1]
- %309 = and i32 %308, %305 ; <i32> [#uses=1]
- store i32 %309, i32* @ul, align 4
- %310 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %311 = zext i8 %310 to i64 ; <i64> [#uses=2]
- %312 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %313 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %312, i64 %311) ; <i64> [#uses=1]
- %314 = xor i64 %313, -1 ; <i64> [#uses=1]
- %315 = and i64 %314, %311 ; <i64> [#uses=1]
- store i64 %315, i64* @sll, align 8
- %316 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %317 = zext i8 %316 to i64 ; <i64> [#uses=2]
- %318 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %319 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %318, i64 %317) ; <i64> [#uses=1]
- %320 = xor i64 %319, -1 ; <i64> [#uses=1]
- %321 = and i64 %320, %317 ; <i64> [#uses=1]
- store i64 %321, i64* @ull, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define void @test_compare_and_swap() nounwind {
-entry:
- %0 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %1 = zext i8 %0 to i32 ; <i32> [#uses=1]
- %2 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %3 = zext i8 %2 to i32 ; <i32> [#uses=1]
- %4 = trunc i32 %3 to i8 ; <i8> [#uses=1]
- %5 = trunc i32 %1 to i8 ; <i8> [#uses=1]
- %6 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @sc, i8 %4, i8 %5) ; <i8> [#uses=1]
- store i8 %6, i8* @sc, align 1
- %7 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %8 = zext i8 %7 to i32 ; <i32> [#uses=1]
- %9 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %10 = zext i8 %9 to i32 ; <i32> [#uses=1]
- %11 = trunc i32 %10 to i8 ; <i8> [#uses=1]
- %12 = trunc i32 %8 to i8 ; <i8> [#uses=1]
- %13 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @uc, i8 %11, i8 %12) ; <i8> [#uses=1]
- store i8 %13, i8* @uc, align 1
- %14 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %15 = sext i8 %14 to i16 ; <i16> [#uses=1]
- %16 = zext i16 %15 to i32 ; <i32> [#uses=1]
- %17 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %18 = zext i8 %17 to i32 ; <i32> [#uses=1]
- %19 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %20 = trunc i32 %18 to i16 ; <i16> [#uses=1]
- %21 = trunc i32 %16 to i16 ; <i16> [#uses=1]
- %22 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %19, i16 %20, i16 %21) ; <i16> [#uses=1]
- store i16 %22, i16* @ss, align 2
- %23 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %24 = sext i8 %23 to i16 ; <i16> [#uses=1]
- %25 = zext i16 %24 to i32 ; <i32> [#uses=1]
- %26 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %27 = zext i8 %26 to i32 ; <i32> [#uses=1]
- %28 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %29 = trunc i32 %27 to i16 ; <i16> [#uses=1]
- %30 = trunc i32 %25 to i16 ; <i16> [#uses=1]
- %31 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %28, i16 %29, i16 %30) ; <i16> [#uses=1]
- store i16 %31, i16* @us, align 2
- %32 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %33 = sext i8 %32 to i32 ; <i32> [#uses=1]
- %34 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %35 = zext i8 %34 to i32 ; <i32> [#uses=1]
- %36 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %37 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %36, i32 %35, i32 %33) ; <i32> [#uses=1]
- store i32 %37, i32* @si, align 4
- %38 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %39 = sext i8 %38 to i32 ; <i32> [#uses=1]
- %40 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %41 = zext i8 %40 to i32 ; <i32> [#uses=1]
- %42 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %43 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %42, i32 %41, i32 %39) ; <i32> [#uses=1]
- store i32 %43, i32* @ui, align 4
- %44 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %45 = sext i8 %44 to i32 ; <i32> [#uses=1]
- %46 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %47 = zext i8 %46 to i32 ; <i32> [#uses=1]
- %48 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %49 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %48, i32 %47, i32 %45) ; <i32> [#uses=1]
- store i32 %49, i32* @sl, align 4
- %50 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %51 = sext i8 %50 to i32 ; <i32> [#uses=1]
- %52 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %53 = zext i8 %52 to i32 ; <i32> [#uses=1]
- %54 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %55 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %54, i32 %53, i32 %51) ; <i32> [#uses=1]
- store i32 %55, i32* @ul, align 4
- %56 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %57 = zext i8 %56 to i32 ; <i32> [#uses=1]
- %58 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %59 = zext i8 %58 to i32 ; <i32> [#uses=1]
- %60 = trunc i32 %59 to i8 ; <i8> [#uses=2]
- %61 = trunc i32 %57 to i8 ; <i8> [#uses=1]
- %62 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @sc, i8 %60, i8 %61) ; <i8> [#uses=1]
- %63 = icmp eq i8 %62, %60 ; <i1> [#uses=1]
- %64 = zext i1 %63 to i8 ; <i8> [#uses=1]
- %65 = zext i8 %64 to i32 ; <i32> [#uses=1]
- store i32 %65, i32* @ui, align 4
- %66 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %67 = zext i8 %66 to i32 ; <i32> [#uses=1]
- %68 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %69 = zext i8 %68 to i32 ; <i32> [#uses=1]
- %70 = trunc i32 %69 to i8 ; <i8> [#uses=2]
- %71 = trunc i32 %67 to i8 ; <i8> [#uses=1]
- %72 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @uc, i8 %70, i8 %71) ; <i8> [#uses=1]
- %73 = icmp eq i8 %72, %70 ; <i1> [#uses=1]
- %74 = zext i1 %73 to i8 ; <i8> [#uses=1]
- %75 = zext i8 %74 to i32 ; <i32> [#uses=1]
- store i32 %75, i32* @ui, align 4
- %76 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %77 = sext i8 %76 to i16 ; <i16> [#uses=1]
- %78 = zext i16 %77 to i32 ; <i32> [#uses=1]
- %79 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %80 = zext i8 %79 to i32 ; <i32> [#uses=1]
- %81 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %82 = trunc i32 %80 to i16 ; <i16> [#uses=2]
- %83 = trunc i32 %78 to i16 ; <i16> [#uses=1]
- %84 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %81, i16 %82, i16 %83) ; <i16> [#uses=1]
- %85 = icmp eq i16 %84, %82 ; <i1> [#uses=1]
- %86 = zext i1 %85 to i8 ; <i8> [#uses=1]
- %87 = zext i8 %86 to i32 ; <i32> [#uses=1]
- store i32 %87, i32* @ui, align 4
- %88 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %89 = sext i8 %88 to i16 ; <i16> [#uses=1]
- %90 = zext i16 %89 to i32 ; <i32> [#uses=1]
- %91 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %92 = zext i8 %91 to i32 ; <i32> [#uses=1]
- %93 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %94 = trunc i32 %92 to i16 ; <i16> [#uses=2]
- %95 = trunc i32 %90 to i16 ; <i16> [#uses=1]
- %96 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %93, i16 %94, i16 %95) ; <i16> [#uses=1]
- %97 = icmp eq i16 %96, %94 ; <i1> [#uses=1]
- %98 = zext i1 %97 to i8 ; <i8> [#uses=1]
- %99 = zext i8 %98 to i32 ; <i32> [#uses=1]
- store i32 %99, i32* @ui, align 4
- %100 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %101 = sext i8 %100 to i32 ; <i32> [#uses=1]
- %102 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %103 = zext i8 %102 to i32 ; <i32> [#uses=2]
- %104 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %105 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %104, i32 %103, i32 %101) ; <i32> [#uses=1]
- %106 = icmp eq i32 %105, %103 ; <i1> [#uses=1]
- %107 = zext i1 %106 to i8 ; <i8> [#uses=1]
- %108 = zext i8 %107 to i32 ; <i32> [#uses=1]
- store i32 %108, i32* @ui, align 4
- %109 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %110 = sext i8 %109 to i32 ; <i32> [#uses=1]
- %111 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %112 = zext i8 %111 to i32 ; <i32> [#uses=2]
- %113 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %114 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %113, i32 %112, i32 %110) ; <i32> [#uses=1]
- %115 = icmp eq i32 %114, %112 ; <i1> [#uses=1]
- %116 = zext i1 %115 to i8 ; <i8> [#uses=1]
- %117 = zext i8 %116 to i32 ; <i32> [#uses=1]
- store i32 %117, i32* @ui, align 4
- %118 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %119 = sext i8 %118 to i32 ; <i32> [#uses=1]
- %120 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %121 = zext i8 %120 to i32 ; <i32> [#uses=2]
- %122 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %123 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %122, i32 %121, i32 %119) ; <i32> [#uses=1]
- %124 = icmp eq i32 %123, %121 ; <i1> [#uses=1]
- %125 = zext i1 %124 to i8 ; <i8> [#uses=1]
- %126 = zext i8 %125 to i32 ; <i32> [#uses=1]
- store i32 %126, i32* @ui, align 4
- %127 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %128 = sext i8 %127 to i32 ; <i32> [#uses=1]
- %129 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %130 = zext i8 %129 to i32 ; <i32> [#uses=2]
- %131 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %132 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %131, i32 %130, i32 %128) ; <i32> [#uses=1]
- %133 = icmp eq i32 %132, %130 ; <i1> [#uses=1]
- %134 = zext i1 %133 to i8 ; <i8> [#uses=1]
- %135 = zext i8 %134 to i32 ; <i32> [#uses=1]
- store i32 %135, i32* @ui, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind
-
-declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind
-
-declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
-
-define void @test_lock() nounwind {
-entry:
- %0 = call i8 @llvm.atomic.swap.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=1]
- store i8 %0, i8* @sc, align 1
- %1 = call i8 @llvm.atomic.swap.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=1]
- store i8 %1, i8* @uc, align 1
- %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %3 = call i16 @llvm.atomic.swap.i16.p0i16(i16* %2, i16 1) ; <i16> [#uses=1]
- store i16 %3, i16* @ss, align 2
- %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %5 = call i16 @llvm.atomic.swap.i16.p0i16(i16* %4, i16 1) ; <i16> [#uses=1]
- store i16 %5, i16* @us, align 2
- %6 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %7 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %6, i32 1) ; <i32> [#uses=1]
- store i32 %7, i32* @si, align 4
- %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %9 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %8, i32 1) ; <i32> [#uses=1]
- store i32 %9, i32* @ui, align 4
- %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %11 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %10, i32 1) ; <i32> [#uses=1]
- store i32 %11, i32* @sl, align 4
- %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %13 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %12, i32 1) ; <i32> [#uses=1]
- store i32 %13, i32* @ul, align 4
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false)
- volatile store i8 0, i8* @sc, align 1
- volatile store i8 0, i8* @uc, align 1
- %14 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- volatile store i16 0, i16* %14, align 2
- %15 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- volatile store i16 0, i16* %15, align 2
- %16 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- volatile store i32 0, i32* %16, align 4
- %17 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- volatile store i32 0, i32* %17, align 4
- %18 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- volatile store i32 0, i32* %18, align 4
- %19 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- volatile store i32 0, i32* %19, align 4
- %20 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- volatile store i64 0, i64* %20, align 8
- %21 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- volatile store i64 0, i64* %21, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
-
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
diff --git a/test/CodeGen/X86/2009-03-13-PHIElimBug.ll b/test/CodeGen/X86/2009-03-13-PHIElimBug.ll
index 45fc269..e14c30a 100644
--- a/test/CodeGen/X86/2009-03-13-PHIElimBug.ll
+++ b/test/CodeGen/X86/2009-03-13-PHIElimBug.ll
@@ -24,9 +24,13 @@ cont2: ; preds = %cont
lpad: ; preds = %cont, %entry
%y = phi i32 [ %a, %entry ], [ %aa, %cont ] ; <i32> [#uses=1]
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
ret i32 %y
}
; CHECK: call{{.*}}f
; CHECK: movl %eax, %esi
; CHECK: call{{.*}}g
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll b/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll
index b13d33e..f8c7a15 100644
--- a/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll
+++ b/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll
@@ -17,8 +17,12 @@ cont2: ; preds = %cont
lpad: ; preds = %cont, %entry
%v = phi i32 [ %x, %entry ], [ %a, %cont ] ; <i32> [#uses=1]
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
ret i32 %v
}
; CHECK: lpad
; CHECK-NEXT: Ltmp
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll b/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll
index 01852a6..3076322 100644
--- a/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll
+++ b/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | not grep movl
+; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -mattr=+mmx,+sse2 | not grep movl
define <8 x i8> @a(i8 zeroext %x) nounwind {
%r = insertelement <8 x i8> undef, i8 %x, i32 0
diff --git a/test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll b/test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll
index 228cd48..8ea70b4 100644
--- a/test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll
+++ b/test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll
@@ -1,8 +1,9 @@
-; RUN: llc < %s -march=x86 -mattr=+sse,-sse2
+; RUN: llc < %s -march=x86 -mattr=+sse,-sse2 | FileCheck %s
; PR2484
define <4 x float> @f4523(<4 x float> %a,<4 x float> %b) nounwind {
entry:
+; CHECK: shufps $-28, %xmm
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4,i32
5,i32 2,i32 3>
ret <4 x float> %shuffle
diff --git a/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll b/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll
index 7b5e871..5483b73 100644
--- a/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll
+++ b/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll
@@ -34,6 +34,8 @@ invcont2: ; preds = %invcont1
ret i32 0
lpad: ; preds = %invcont1, %invcont, %entry
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
%8 = call i32 @vm_deallocate(i32 undef, i64 0, i64 %0) ; <i32> [#uses=0]
unreachable
}
@@ -45,3 +47,5 @@ declare i8* @pluginInstance(i8*, i32)
declare zeroext i8 @invoke(i8*, i32, i8*, i64, i32, i64*, i32*)
declare void @booleanAndDataReply(i32, i32, i32, i32, i64, i32)
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/2009-10-16-Scope.ll b/test/CodeGen/X86/2009-10-16-Scope.ll
index 86c2024..e41038d 100644
--- a/test/CodeGen/X86/2009-10-16-Scope.ll
+++ b/test/CodeGen/X86/2009-10-16-Scope.ll
@@ -23,10 +23,10 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
declare i32 @foo(i32) ssp
!0 = metadata !{i32 5, i32 2, metadata !1, null}
-!1 = metadata !{i32 458763, metadata !2}; [DW_TAG_lexical_block ]
+!1 = metadata !{i32 458763, metadata !2, i32 1, i32 1}; [DW_TAG_lexical_block ]
!2 = metadata !{i32 458798, i32 0, metadata !3, metadata !"bar", metadata !"bar", metadata !"bar", metadata !3, i32 4, null, i1 false, i1 true}; [DW_TAG_subprogram ]
!3 = metadata !{i32 458769, i32 0, i32 12, metadata !"genmodes.i", metadata !"/Users/yash/Downloads", metadata !"clang 1.1", i1 true, i1 false, metadata !"", i32 0}; [DW_TAG_compile_unit ]
!4 = metadata !{i32 459008, metadata !5, metadata !"count_", metadata !3, i32 5, metadata !6}; [ DW_TAG_auto_variable ]
-!5 = metadata !{i32 458763, metadata !1}; [DW_TAG_lexical_block ]
+!5 = metadata !{i32 458763, metadata !1, i32 1, i32 1}; [DW_TAG_lexical_block ]
!6 = metadata !{i32 458788, metadata !3, metadata !"int", metadata !3, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5}; [DW_TAG_base_type ]
!7 = metadata !{i32 6, i32 1, metadata !2, null}
diff --git a/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll b/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll
index d7f0c1a..006a02a 100644
--- a/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll
+++ b/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll
@@ -36,7 +36,7 @@ lt_init.exit: ; preds = %if.end.i, %if.then.
call void asm sideeffect "cpuid", "~{ax},~{bx},~{cx},~{dx},~{memory},~{dirflag},~{fpsr},~{flags}"() nounwind
%4 = call i64 @llvm.readcyclecounter() nounwind ; <i64> [#uses=1]
%5 = sub i64 %4, %2 ; <i64> [#uses=1]
- %6 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* getelementptr inbounds ([1216 x i64]* @__profiling_callsite_timestamps_live, i32 0, i32 51), i64 %5) nounwind ; <i64> [#uses=0]
+ %6 = atomicrmw add i64* getelementptr inbounds ([1216 x i64]* @__profiling_callsite_timestamps_live, i32 0, i32 51), i64 %5 monotonic
;CHECK: lock
;CHECK-NEXT: {{xadd|addq}} %rdx, __profiling_callsite_timestamps_live
;CHECK-NEXT: cmpl $0,
@@ -54,7 +54,7 @@ if.end: ; preds = %if.then, %lt_init.e
tail call void asm sideeffect "cpuid", "~{ax},~{bx},~{cx},~{dx},~{memory},~{dirflag},~{fpsr},~{flags}"() nounwind
%8 = tail call i64 @llvm.readcyclecounter() nounwind ; <i64> [#uses=1]
%9 = sub i64 %8, %0 ; <i64> [#uses=1]
- %10 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* getelementptr inbounds ([1216 x i64]* @__profiling_callsite_timestamps_live, i32 0, i32 50), i64 %9) ; <i64> [#uses=0]
+ %10 = atomicrmw add i64* getelementptr inbounds ([1216 x i64]* @__profiling_callsite_timestamps_live, i32 0, i32 50), i64 %9 monotonic
ret i32 %7
}
@@ -64,6 +64,4 @@ declare i32 @lt_dlinit()
declare i32 @warn_dlerror(i8*) nounwind
-declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind
-
declare i64 @llvm.readcyclecounter() nounwind
diff --git a/test/CodeGen/X86/2009-11-25-ImpDefBug.ll b/test/CodeGen/X86/2009-11-25-ImpDefBug.ll
index 7606c0e..396638f 100644
--- a/test/CodeGen/X86/2009-11-25-ImpDefBug.ll
+++ b/test/CodeGen/X86/2009-11-25-ImpDefBug.ll
@@ -46,6 +46,8 @@ bb1.i5: ; preds = %bb.i1
lpad: ; preds = %bb1.i.fragment.cl, %bb1.i.fragment, %bb5
%.SV10.phi807 = phi i8* [ undef, %bb1.i.fragment.cl ], [ undef, %bb1.i.fragment ], [ undef, %bb5 ] ; <i8*> [#uses=1]
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
%1 = load i8* %.SV10.phi807, align 8 ; <i8> [#uses=0]
br i1 undef, label %meshBB81.bbcl.disp, label %bb13.fragment.bbcl.disp
@@ -114,3 +116,5 @@ meshBB81.bbcl.disp: ; preds = %meshBB81.cl141, %me
i8 51, label %meshBB81.cl141
]
}
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
index c693636..8b55bd7 100644
--- a/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
+++ b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
@@ -18,12 +18,6 @@ entry:
; CHECK: lock
; CHECK: cmpxchg8b
; CHECK: jne
- tail call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0]
- tail call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ %0 = atomicrmw add i64* %p, i64 1 seq_cst
ret void
}
-
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
-
-declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind
diff --git a/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll b/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll
index ef1798d..864ebf1 100644
--- a/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll
+++ b/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll
@@ -60,5 +60,9 @@ bb92: ; preds = %entry
unreachable
lpad159: ; preds = %bb58
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
}
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll b/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
index 3738f80..7af58dc 100644
--- a/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
+++ b/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
@@ -84,8 +84,6 @@ cleanup.switch: ; preds = %invoke.cont5
]
cleanup.end: ; preds = %cleanup.switch
- %exc6 = call i8* @llvm.eh.exception() ; <i8*> [#uses=1]
- store i8* %exc6, i8** %_rethrow
store i32 2, i32* %cleanup.dst7
br label %finally
diff --git a/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll b/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll
index 40e7f01..0e4118a 100644
--- a/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll
+++ b/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll
@@ -18,11 +18,9 @@ entry:
loop:
; CHECK: lock
; CHECK-NEXT: cmpxchg8b
- %r = call i64 @llvm.atomic.cmp.swap.i64.p0i64(i64* %ptr, i64 0, i64 1)
+ %r = cmpxchg i64* %ptr, i64 0, i64 1 monotonic
%stored1 = icmp eq i64 %r, 0
br i1 %stored1, label %loop, label %continue
continue:
ret void
}
-
-declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64* nocapture, i64, i64) nounwind
diff --git a/test/CodeGen/X86/2011-08-23-PerformSubCombine128.ll b/test/CodeGen/X86/2011-08-23-PerformSubCombine128.ll
new file mode 100644
index 0000000..12171ac
--- /dev/null
+++ b/test/CodeGen/X86/2011-08-23-PerformSubCombine128.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=x86-64 -O2 < %s
+
+define void @test(i64 %add127.tr.i2686) {
+entry:
+ %conv143.i2687 = and i64 %add127.tr.i2686, 72057594037927935
+ %conv76.i2623 = zext i64 %conv143.i2687 to i128
+ %mul148.i2338 = mul i128 0, %conv76.i2623
+ %add149.i2339 = add i128 %mul148.i2338, 0
+ %add.i2303 = add i128 0, 170141183460469229370468033484042534912
+ %add6.i2270 = add i128 %add.i2303, 0
+ %sub58.i2271 = sub i128 %add6.i2270, %add149.i2339
+ %add71.i2272 = add i128 %sub58.i2271, 0
+ %add105.i2273 = add i128 %add71.i2272, 0
+ %add116.i2274 = add i128 %add105.i2273, 0
+ %shr124.i2277 = lshr i128 %add116.i2274, 56
+ %add116.tr.i2280 = trunc i128 %add116.i2274 to i64
+ ret void
+}
diff --git a/test/CodeGen/X86/2011-08-23-Trampoline.ll b/test/CodeGen/X86/2011-08-23-Trampoline.ll
new file mode 100644
index 0000000..7a5a0f8
--- /dev/null
+++ b/test/CodeGen/X86/2011-08-23-Trampoline.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s -march=x86
+; RUN: llc < %s -march=x86-64
+
+ %struct.FRAME.gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets = type { i32, i32, void (i32, i32)*, i8 (i32, i32)* }
+
+define fastcc i32 @gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets.5146(i64 %table.0.0, i64 %table.0.1, i32 %last, i32 %pos) {
+entry:
+ call void @llvm.init.trampoline( i8* null, i8* bitcast (void (%struct.FRAME.gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets*, i32, i32)* @gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets__move.5177 to i8*), i8* null ) ; <i8*> [#uses=0]
+ %tramp22 = call i8* @llvm.adjust.trampoline( i8* null)
+ unreachable
+}
+
+declare void @gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets__move.5177(%struct.FRAME.gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets* nest , i32, i32) nounwind
+
+declare void @llvm.init.trampoline(i8*, i8*, i8*) nounwind
+declare i8* @llvm.adjust.trampoline(i8*) nounwind
diff --git a/test/CodeGen/X86/2011-08-29-BlockConstant.ll b/test/CodeGen/X86/2011-08-29-BlockConstant.ll
new file mode 100644
index 0000000..83e4bcc
--- /dev/null
+++ b/test/CodeGen/X86/2011-08-29-BlockConstant.ll
@@ -0,0 +1,34 @@
+; RUN: llc -march=x86-64 < %s | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+@x = global [500 x i64] zeroinitializer, align 64 ; <[500 x i64]*>
+; CHECK: x:
+; CHECK: .zero 4000
+
+@y = global [63 x i64] [
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262,
+ i64 6799976246779207262, i64 6799976246779207262, i64 6799976246779207262],
+ align 64 ; <[63 x i64]*> 0x5e5e5e5e
+; CHECK: y:
+; CHECK: .zero 504,94
diff --git a/test/CodeGen/X86/2011-08-29-InitOrder.ll b/test/CodeGen/X86/2011-08-29-InitOrder.ll
new file mode 100644
index 0000000..72c79d2
--- /dev/null
+++ b/test/CodeGen/X86/2011-08-29-InitOrder.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -mtriple=i386-linux-gnu | FileCheck %s --check-prefix=CHECK-DEFAULT
+; RUN: llc < %s -mtriple=i386-apple-darwin | FileCheck %s --check-prefix=CHECK-DARWIN
+; PR5329
+
+@llvm.global_ctors = appending global [3 x { i32, void ()* }] [{ i32, void ()* } { i32 2000, void ()* @construct_2 }, { i32, void ()* } { i32 3000, void ()* @construct_3 }, { i32, void ()* } { i32 1000, void ()* @construct_1 }]
+; CHECK-DEFAULT: construct_3
+; CHECK-DEFAULT: construct_2
+; CHECK-DEFAULT: construct_1
+
+; CHECK-DARWIN: construct_1
+; CHECK-DARWIN: construct_2
+; CHECK-DARWIN: construct_3
+
+@llvm.global_dtors = appending global [3 x { i32, void ()* }] [{ i32, void ()* } { i32 2000, void ()* @destruct_2 }, { i32, void ()* } { i32 1000, void ()* @destruct_1 }, { i32, void ()* } { i32 3000, void ()* @destruct_3 }]
+; CHECK-DEFAULT: destruct_3
+; CHECK-DEFAULT: destruct_2
+; CHECK-DEFAULT: destruct_1
+
+; CHECK-DARWIN: destruct_1
+; CHECK-DARWIN: destruct_2
+; CHECK-DARWIN: destruct_3
+
+declare void @construct_1()
+declare void @construct_2()
+declare void @construct_3()
+declare void @destruct_1()
+declare void @destruct_2()
+declare void @destruct_3()
diff --git a/test/CodeGen/X86/2011-09-14-valcoalesce.ll b/test/CodeGen/X86/2011-09-14-valcoalesce.ll
new file mode 100644
index 0000000..1068d1b
--- /dev/null
+++ b/test/CodeGen/X86/2011-09-14-valcoalesce.ll
@@ -0,0 +1,174 @@
+; RUN: llc < %s -march=x86 | FileCheck %s
+;
+; Test RegistersDefinedFromSameValue. We have multiple copies of the same vreg:
+; while.body85.i:
+; vreg1 = copy vreg2
+; vreg2 = add
+; critical edge from land.lhs.true.i -> if.end117.i:
+; vreg27 = vreg2
+; critical edge from land.lhs.true103.i -> if.end117.i:
+; vreg27 = vreg2
+; if.then108.i:
+; vreg27 = vreg1
+;
+; Prior to fixing PR10920 401.bzip miscompile, the coalescer would
+; consider vreg1 and vreg27 to be copies of the same value. It would
+; then remove one of the critical edge copes, which cannot safely be removed.
+;
+; CHECK: # %while.body85.i
+; CHECK-NOT: # %
+; CHECK-NOT: add
+; CHECK: movl %[[POSTR:e[abcdxi]+]], %[[PRER:e[abcdxi]+]]
+; CHECK: addl %{{.*}}, %[[POSTR]]
+; CHECK: # %while.end.i
+; CHECK: movl %[[POSTR]], %[[USER:e[abcdxi]+]]
+; CHECK: # %land.lhs.true.i
+; CHECK: movl %[[POSTR]], %[[USER]]
+; CHECK: # %land.lhs.true103.i
+; CHECK: movl %[[POSTR]], %[[USER]]
+; CHECK: # %if.then108.i
+; [[PRER] live out, so nothing on this path should define it.
+; CHECK-NOT: , %[[PRER]]
+; CHECK: # %if.end117.i
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
+
+@.str3 = external unnamed_addr constant [59 x i8], align 1
+
+define void @BZ2_compressBlock() nounwind ssp {
+entry:
+ br i1 undef, label %if.then68, label %if.end85
+
+if.then68: ; preds = %entry
+ br label %for.body.i.i
+
+for.body.i.i: ; preds = %for.inc.i.i, %if.then68
+ br i1 undef, label %for.inc.i.i, label %if.then.i.i
+
+if.then.i.i: ; preds = %for.body.i.i
+ br label %for.inc.i.i
+
+for.inc.i.i: ; preds = %if.then.i.i, %for.body.i.i
+ br i1 undef, label %makeMaps_e.exit.i, label %for.body.i.i
+
+makeMaps_e.exit.i: ; preds = %for.inc.i.i
+ br i1 undef, label %for.cond19.preheader.i, label %for.cond.for.cond19.preheader_crit_edge.i
+
+for.cond.for.cond19.preheader_crit_edge.i: ; preds = %makeMaps_e.exit.i
+ unreachable
+
+for.cond19.preheader.i: ; preds = %makeMaps_e.exit.i
+ br i1 undef, label %for.body25.lr.ph.i, label %for.cond33.preheader.i
+
+for.body25.lr.ph.i: ; preds = %for.cond19.preheader.i
+ br label %for.body25.i
+
+for.cond33.preheader.i: ; preds = %for.body25.i, %for.cond19.preheader.i
+ br i1 undef, label %if.then.i, label %if.end.i
+
+for.body25.i: ; preds = %for.body25.i, %for.body25.lr.ph.i
+ br i1 undef, label %for.body25.i, label %for.cond33.preheader.i
+
+if.then.i: ; preds = %for.cond33.preheader.i
+ br label %if.end.i
+
+if.end.i: ; preds = %if.then.i, %for.cond33.preheader.i
+ br i1 undef, label %for.inc27.us.5.i, label %for.end30.i
+
+for.end30.i: ; preds = %for.inc27.us.5.i, %if.end.i
+ br i1 undef, label %if.end36.i, label %if.then35.i
+
+if.then35.i: ; preds = %for.end30.i
+ unreachable
+
+if.end36.i: ; preds = %for.end30.i
+ %sub83.i = add nsw i32 undef, 1
+ br label %while.body.i188
+
+for.cond182.preheader.i: ; preds = %for.end173.i
+ br i1 undef, label %for.inc220.us.i, label %while.body300.preheader.i
+
+while.body.i188: ; preds = %for.end173.i, %if.end36.i
+ %gs.0526.i = phi i32 [ 0, %if.end36.i ], [ %add177.i, %for.end173.i ]
+ %or.cond514517.i = and i1 false, undef
+ br i1 %or.cond514517.i, label %while.body85.i, label %if.end117.i
+
+while.body85.i: ; preds = %while.body85.i, %while.body.i188
+ %aFreq.0518.i = phi i32 [ %add93.i, %while.body85.i ], [ 0, %while.body.i188 ]
+ %inc87.i = add nsw i32 0, 1
+ %tmp91.i = load i32* undef, align 4, !tbaa !0
+ %add93.i = add nsw i32 %tmp91.i, %aFreq.0518.i
+ %or.cond514.i = and i1 undef, false
+ br i1 %or.cond514.i, label %while.body85.i, label %while.end.i
+
+while.end.i: ; preds = %while.body85.i
+ br i1 undef, label %land.lhs.true.i, label %if.end117.i
+
+land.lhs.true.i: ; preds = %while.end.i
+ br i1 undef, label %land.lhs.true103.i, label %if.end117.i
+
+land.lhs.true103.i: ; preds = %land.lhs.true.i
+ br i1 undef, label %if.then108.i, label %if.end117.i
+
+if.then108.i: ; preds = %land.lhs.true103.i
+ br label %if.end117.i
+
+if.end117.i: ; preds = %if.then108.i, %land.lhs.true103.i, %land.lhs.true.i, %while.end.i, %while.body.i188
+ %aFreq.1.i = phi i32 [ %aFreq.0518.i, %if.then108.i ], [ %add93.i, %land.lhs.true103.i ], [ %add93.i, %land.lhs.true.i ], [ %add93.i, %while.end.i ], [ 0, %while.body.i188 ]
+ %ge.1.i = phi i32 [ 0, %if.then108.i ], [ %inc87.i, %land.lhs.true103.i ], [ %inc87.i, %land.lhs.true.i ], [ %inc87.i, %while.end.i ], [ 0, %while.body.i188 ]
+ br i1 undef, label %if.then122.i, label %for.cond138.preheader.i
+
+if.then122.i: ; preds = %if.end117.i
+ call void (...)* @fprintf(i32 undef, i32 %gs.0526.i, i32 %ge.1.i, i32 %aFreq.1.i, double undef) nounwind
+ br label %for.cond138.preheader.i
+
+for.cond138.preheader.i: ; preds = %if.then122.i, %if.end117.i
+ br i1 undef, label %for.body143.lr.ph.i, label %for.end173.i
+
+for.body143.lr.ph.i: ; preds = %for.cond138.preheader.i
+ br label %for.body143.i
+
+for.body143.i: ; preds = %for.body143.i, %for.body143.lr.ph.i
+ br i1 undef, label %for.end173.i, label %for.body143.i
+
+for.end173.i: ; preds = %for.body143.i, %for.cond138.preheader.i
+ %add177.i = add nsw i32 %ge.1.i, 1
+ %cmp73.i = icmp sgt i32 undef, 0
+ br i1 %cmp73.i, label %while.body.i188, label %for.cond182.preheader.i
+
+for.inc220.us.i: ; preds = %for.cond182.preheader.i
+ unreachable
+
+while.body300.preheader.i: ; preds = %for.cond182.preheader.i
+ br i1 undef, label %for.end335.i, label %while.end2742.i
+
+for.end335.i: ; preds = %for.end2039.i, %while.body300.preheader.i
+ br label %for.body2021.i
+
+for.body2021.i: ; preds = %for.body2021.i, %for.end335.i
+ br i1 undef, label %for.body2021.i, label %for.end2039.i
+
+for.end2039.i: ; preds = %for.body2021.i
+ br label %for.end335.i
+
+while.end2742.i: ; preds = %while.body300.preheader.i
+ br i1 undef, label %if.then2748.i, label %for.body2778.i
+
+if.then2748.i: ; preds = %while.end2742.i
+ unreachable
+
+for.body2778.i: ; preds = %while.end2742.i
+ unreachable
+
+for.inc27.us.5.i: ; preds = %if.end.i
+ br label %for.end30.i
+
+if.end85: ; preds = %entry
+ ret void
+}
+
+declare void @fprintf(...) nounwind
+
+!0 = metadata !{metadata !"int", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/X86/2011-09-18-sse2cmp.ll b/test/CodeGen/X86/2011-09-18-sse2cmp.ll
new file mode 100644
index 0000000..844d674
--- /dev/null
+++ b/test/CodeGen/X86/2011-09-18-sse2cmp.ll
@@ -0,0 +1,12 @@
+;RUN: llc < %s -march=x86 -mcpu=yonah -promote-elements -mattr=+sse2,-sse41 | FileCheck %s
+
+;CHECK: @max
+;CHECK: cmplepd
+;CHECK: ret
+
+define <2 x double> @max(<2 x double> %x, <2 x double> %y) {
+ %max_is_x = fcmp oge <2 x double> %x, %y
+ %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %max
+}
+
diff --git a/test/CodeGen/X86/2011-09-21-setcc-bug.ll b/test/CodeGen/X86/2011-09-21-setcc-bug.ll
new file mode 100644
index 0000000..ed5649c
--- /dev/null
+++ b/test/CodeGen/X86/2011-09-21-setcc-bug.ll
@@ -0,0 +1,27 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 -promote-elements -mattr=+sse41
+
+; Make sure we are not crashing on this code.
+
+define void @load_4_i8(<4 x i8>* %k, <4 x i8>* %y, <4 x double>* %A1, <4 x double>* %A0) {
+ %A = load <4 x i8>* %k
+ %B = load <4 x i8>* %y
+ %C = load <4 x double>* %A0
+ %D= load <4 x double>* %A1
+ %M = icmp uge <4 x i8> %A, %B
+ %T = select <4 x i1> %M, <4 x double> %C, <4 x double> %D
+ store <4 x double> %T, <4 x double>* undef
+ ret void
+}
+
+
+define void @load_256_i8(<256 x i8>* %k, <256 x i8>* %y, <256 x double>* %A1, <256 x double>* %A0) {
+ %A = load <256 x i8>* %k
+ %B = load <256 x i8>* %y
+ %C = load <256 x double>* %A0
+ %D= load <256 x double>* %A1
+ %M = icmp uge <256 x i8> %A, %B
+ %T = select <256 x i1> %M, <256 x double> %C, <256 x double> %D
+ store <256 x double> %T, <256 x double>* undef
+ ret void
+}
+
diff --git a/test/CodeGen/X86/2011-10-11-SpillDead.ll b/test/CodeGen/X86/2011-10-11-SpillDead.ll
new file mode 100644
index 0000000..8e70d65
--- /dev/null
+++ b/test/CodeGen/X86/2011-10-11-SpillDead.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -verify-regalloc
+; PR11125
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.7"
+
+; The inline asm takes %x as a GR32_ABCD virtual register.
+; The call to @g forces a spill of that register.
+;
+; The asm has a dead output tied to %x.
+; Verify that the spiller creates a value number for that dead def.
+;
+define void @f(i32 %x) nounwind uwtable ssp {
+entry:
+ tail call void @g() nounwind
+ %0 = tail call i32 asm sideeffect "foo $0", "=Q,0,~{ebx},~{dirflag},~{fpsr},~{flags}"(i32 %x) nounwind
+ ret void
+}
+
+declare void @g()
diff --git a/test/CodeGen/X86/2011-10-11-srl.ll b/test/CodeGen/X86/2011-10-11-srl.ll
new file mode 100644
index 0000000..cf9d36f
--- /dev/null
+++ b/test/CodeGen/X86/2011-10-11-srl.ll
@@ -0,0 +1,11 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -promote-elements -mattr=-sse41
+
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @m387(<2 x i8>* %p, <2 x i16>* %q) {
+ %t = load <2 x i8>* %p
+ %r = sext <2 x i8> %t to <2 x i16>
+ store <2 x i16> %r, <2 x i16>* %q
+ ret void
+}
+
diff --git a/test/CodeGen/X86/2011-10-12-MachineCSE.ll b/test/CodeGen/X86/2011-10-12-MachineCSE.ll
new file mode 100644
index 0000000..cd15f84
--- /dev/null
+++ b/test/CodeGen/X86/2011-10-12-MachineCSE.ll
@@ -0,0 +1,116 @@
+; RUN: llc -verify-machineinstrs < %s
+; <rdar://problem/10270968>
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.7.2"
+
+%struct.optab = type { i32, [59 x %struct.anon.3] }
+%struct.anon.3 = type { i32, %struct.rtx_def* }
+%struct.rtx_def = type { [2 x i8], i8, i8, [1 x %union.rtunion_def] }
+%union.rtunion_def = type { i64 }
+%struct.insn_data = type { i8*, i8*, %struct.rtx_def* (%struct.rtx_def*, ...)*, %struct.insn_operand_data*, i8, i8, i8, i8 }
+%struct.insn_operand_data = type { i32 (%struct.rtx_def*, i32)*, i8*, [2 x i8], i8, i8 }
+
+@optab_table = external global [49 x %struct.optab*], align 16
+@insn_data = external constant [0 x %struct.insn_data]
+
+define %struct.rtx_def* @gen_add3_insn(%struct.rtx_def* %r0, %struct.rtx_def* %r1, %struct.rtx_def* %c) nounwind uwtable ssp {
+entry:
+ %0 = bitcast %struct.rtx_def* %r0 to i32*
+ %1 = load i32* %0, align 8
+ %2 = lshr i32 %1, 16
+ %bf.clear = and i32 %2, 255
+ %idxprom = sext i32 %bf.clear to i64
+ %3 = load %struct.optab** getelementptr inbounds ([49 x %struct.optab*]* @optab_table, i32 0, i64 0), align 8, !tbaa !0
+ %handlers = getelementptr inbounds %struct.optab* %3, i32 0, i32 1
+ %arrayidx = getelementptr inbounds [59 x %struct.anon.3]* %handlers, i32 0, i64 %idxprom
+ %insn_code = getelementptr inbounds %struct.anon.3* %arrayidx, i32 0, i32 0
+ %4 = load i32* %insn_code, align 4, !tbaa !3
+ %cmp = icmp eq i32 %4, 1317
+ br i1 %cmp, label %if.then, label %lor.lhs.false
+
+lor.lhs.false: ; preds = %entry
+ %idxprom1 = sext i32 %4 to i64
+ %arrayidx2 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom1
+ %operand = getelementptr inbounds %struct.insn_data* %arrayidx2, i32 0, i32 3
+ %5 = load %struct.insn_operand_data** %operand, align 8, !tbaa !0
+ %arrayidx3 = getelementptr inbounds %struct.insn_operand_data* %5, i64 0
+ %predicate = getelementptr inbounds %struct.insn_operand_data* %arrayidx3, i32 0, i32 0
+ %6 = load i32 (%struct.rtx_def*, i32)** %predicate, align 8, !tbaa !0
+ %idxprom4 = sext i32 %4 to i64
+ %arrayidx5 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom4
+ %operand6 = getelementptr inbounds %struct.insn_data* %arrayidx5, i32 0, i32 3
+ %7 = load %struct.insn_operand_data** %operand6, align 8, !tbaa !0
+ %arrayidx7 = getelementptr inbounds %struct.insn_operand_data* %7, i64 0
+ %8 = bitcast %struct.insn_operand_data* %arrayidx7 to i8*
+ %bf.field.offs = getelementptr i8* %8, i32 16
+ %9 = bitcast i8* %bf.field.offs to i32*
+ %10 = load i32* %9, align 8
+ %bf.clear8 = and i32 %10, 65535
+ %call = tail call i32 %6(%struct.rtx_def* %r0, i32 %bf.clear8)
+ %tobool = icmp ne i32 %call, 0
+ br i1 %tobool, label %lor.lhs.false9, label %if.then
+
+lor.lhs.false9: ; preds = %lor.lhs.false
+ %idxprom10 = sext i32 %4 to i64
+ %arrayidx11 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom10
+ %operand12 = getelementptr inbounds %struct.insn_data* %arrayidx11, i32 0, i32 3
+ %11 = load %struct.insn_operand_data** %operand12, align 8, !tbaa !0
+ %arrayidx13 = getelementptr inbounds %struct.insn_operand_data* %11, i64 1
+ %predicate14 = getelementptr inbounds %struct.insn_operand_data* %arrayidx13, i32 0, i32 0
+ %12 = load i32 (%struct.rtx_def*, i32)** %predicate14, align 8, !tbaa !0
+ %idxprom15 = sext i32 %4 to i64
+ %arrayidx16 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom15
+ %operand17 = getelementptr inbounds %struct.insn_data* %arrayidx16, i32 0, i32 3
+ %13 = load %struct.insn_operand_data** %operand17, align 8, !tbaa !0
+ %arrayidx18 = getelementptr inbounds %struct.insn_operand_data* %13, i64 1
+ %14 = bitcast %struct.insn_operand_data* %arrayidx18 to i8*
+ %bf.field.offs19 = getelementptr i8* %14, i32 16
+ %15 = bitcast i8* %bf.field.offs19 to i32*
+ %16 = load i32* %15, align 8
+ %bf.clear20 = and i32 %16, 65535
+ %call21 = tail call i32 %12(%struct.rtx_def* %r1, i32 %bf.clear20)
+ %tobool22 = icmp ne i32 %call21, 0
+ br i1 %tobool22, label %lor.lhs.false23, label %if.then
+
+lor.lhs.false23: ; preds = %lor.lhs.false9
+ %idxprom24 = sext i32 %4 to i64
+ %arrayidx25 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom24
+ %operand26 = getelementptr inbounds %struct.insn_data* %arrayidx25, i32 0, i32 3
+ %17 = load %struct.insn_operand_data** %operand26, align 8, !tbaa !0
+ %arrayidx27 = getelementptr inbounds %struct.insn_operand_data* %17, i64 2
+ %predicate28 = getelementptr inbounds %struct.insn_operand_data* %arrayidx27, i32 0, i32 0
+ %18 = load i32 (%struct.rtx_def*, i32)** %predicate28, align 8, !tbaa !0
+ %idxprom29 = sext i32 %4 to i64
+ %arrayidx30 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom29
+ %operand31 = getelementptr inbounds %struct.insn_data* %arrayidx30, i32 0, i32 3
+ %19 = load %struct.insn_operand_data** %operand31, align 8, !tbaa !0
+ %arrayidx32 = getelementptr inbounds %struct.insn_operand_data* %19, i64 2
+ %20 = bitcast %struct.insn_operand_data* %arrayidx32 to i8*
+ %bf.field.offs33 = getelementptr i8* %20, i32 16
+ %21 = bitcast i8* %bf.field.offs33 to i32*
+ %22 = load i32* %21, align 8
+ %bf.clear34 = and i32 %22, 65535
+ %call35 = tail call i32 %18(%struct.rtx_def* %c, i32 %bf.clear34)
+ %tobool36 = icmp ne i32 %call35, 0
+ br i1 %tobool36, label %if.end, label %if.then
+
+if.then: ; preds = %lor.lhs.false23, %lor.lhs.false9, %lor.lhs.false, %entry
+ br label %return
+
+if.end: ; preds = %lor.lhs.false23
+ %idxprom37 = sext i32 %4 to i64
+ %arrayidx38 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom37
+ %genfun = getelementptr inbounds %struct.insn_data* %arrayidx38, i32 0, i32 2
+ %23 = load %struct.rtx_def* (%struct.rtx_def*, ...)** %genfun, align 8, !tbaa !0
+ %call39 = tail call %struct.rtx_def* (%struct.rtx_def*, ...)* %23(%struct.rtx_def* %r0, %struct.rtx_def* %r1, %struct.rtx_def* %c)
+ br label %return
+
+return: ; preds = %if.end, %if.then
+ %24 = phi %struct.rtx_def* [ %call39, %if.end ], [ null, %if.then ]
+ ret %struct.rtx_def* %24
+}
+
+!0 = metadata !{metadata !"any pointer", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
+!3 = metadata !{metadata !"_ZTS9insn_code", metadata !1}
diff --git a/test/CodeGen/X86/Atomics-32.ll b/test/CodeGen/X86/Atomics-32.ll
deleted file mode 100644
index 0e9b73e..0000000
--- a/test/CodeGen/X86/Atomics-32.ll
+++ /dev/null
@@ -1,818 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-;; Note the 64-bit variants are not supported yet (in 32-bit mode).
-; ModuleID = 'Atomics.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
-@sc = common global i8 0 ; <i8*> [#uses=52]
-@uc = common global i8 0 ; <i8*> [#uses=100]
-@ss = common global i16 0 ; <i16*> [#uses=15]
-@us = common global i16 0 ; <i16*> [#uses=15]
-@si = common global i32 0 ; <i32*> [#uses=15]
-@ui = common global i32 0 ; <i32*> [#uses=23]
-@sl = common global i32 0 ; <i32*> [#uses=15]
-@ul = common global i32 0 ; <i32*> [#uses=15]
-
-define void @test_op_ignore() nounwind {
-entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=0]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 1 ) ; <i32>:11 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 1 ) ; <i32>:13 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:14 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:15 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 1 ) ; <i16>:17 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 1 ) ; <i16>:19 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 1 ) ; <i32>:21 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 1 ) ; <i32>:23 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 1 ) ; <i32>:25 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:26 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 1 ) ; <i32>:27 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:28 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:29 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 1 ) ; <i16>:31 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 1 ) ; <i16>:33 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 1 ) ; <i32>:35 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 1 ) ; <i32>:37 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:38 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 1 ) ; <i32>:39 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:40 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 1 ) ; <i32>:41 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:42 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:43 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 1 ) ; <i16>:45 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 1 ) ; <i16>:47 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 1 ) ; <i32>:49 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 1 ) ; <i32>:51 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:52 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 1 ) ; <i32>:53 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 1 ) ; <i32>:55 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:56 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:57 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 1 ) ; <i16>:59 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 1 ) ; <i16>:61 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 1 ) ; <i32>:63 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 1 ) ; <i32>:65 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:66 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 1 ) ; <i32>:67 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:68 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 1 ) ; <i32>:69 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:70 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:71 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 1 ) ; <i16>:73 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 1 ) ; <i16>:75 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 1 ) ; <i32>:77 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 1 ) ; <i32>:79 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:80 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 1 ) ; <i32>:81 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:82 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 1 ) ; <i32>:83 [#uses=0]
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind
-
-define void @test_fetch_and_op() nounwind {
-entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 11 ) ; <i32>:11 [#uses=1]
- store i32 %11, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 11 ) ; <i32>:13 [#uses=1]
- store i32 %13, i32* @ul, align 4
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:14 [#uses=1]
- store i8 %14, i8* @sc, align 1
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:15 [#uses=1]
- store i8 %15, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 11 ) ; <i16>:17 [#uses=1]
- store i16 %17, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 11 ) ; <i16>:19 [#uses=1]
- store i16 %19, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 11 ) ; <i32>:21 [#uses=1]
- store i32 %21, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 11 ) ; <i32>:23 [#uses=1]
- store i32 %23, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 11 ) ; <i32>:25 [#uses=1]
- store i32 %25, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:26 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 11 ) ; <i32>:27 [#uses=1]
- store i32 %27, i32* @ul, align 4
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:28 [#uses=1]
- store i8 %28, i8* @sc, align 1
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:29 [#uses=1]
- store i8 %29, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 11 ) ; <i16>:31 [#uses=1]
- store i16 %31, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 11 ) ; <i16>:33 [#uses=1]
- store i16 %33, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 11 ) ; <i32>:35 [#uses=1]
- store i32 %35, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 11 ) ; <i32>:37 [#uses=1]
- store i32 %37, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:38 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 11 ) ; <i32>:39 [#uses=1]
- store i32 %39, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:40 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 11 ) ; <i32>:41 [#uses=1]
- store i32 %41, i32* @ul, align 4
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:42 [#uses=1]
- store i8 %42, i8* @sc, align 1
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:43 [#uses=1]
- store i8 %43, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 11 ) ; <i16>:45 [#uses=1]
- store i16 %45, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 11 ) ; <i16>:47 [#uses=1]
- store i16 %47, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 11 ) ; <i32>:49 [#uses=1]
- store i32 %49, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 11 ) ; <i32>:51 [#uses=1]
- store i32 %51, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:52 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 11 ) ; <i32>:53 [#uses=1]
- store i32 %53, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 11 ) ; <i32>:55 [#uses=1]
- store i32 %55, i32* @ul, align 4
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:56 [#uses=1]
- store i8 %56, i8* @sc, align 1
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:57 [#uses=1]
- store i8 %57, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 11 ) ; <i16>:59 [#uses=1]
- store i16 %59, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 11 ) ; <i16>:61 [#uses=1]
- store i16 %61, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 11 ) ; <i32>:63 [#uses=1]
- store i32 %63, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 11 ) ; <i32>:65 [#uses=1]
- store i32 %65, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:66 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 11 ) ; <i32>:67 [#uses=1]
- store i32 %67, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:68 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 11 ) ; <i32>:69 [#uses=1]
- store i32 %69, i32* @ul, align 4
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:70 [#uses=1]
- store i8 %70, i8* @sc, align 1
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:71 [#uses=1]
- store i8 %71, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 11 ) ; <i16>:73 [#uses=1]
- store i16 %73, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 11 ) ; <i16>:75 [#uses=1]
- store i16 %75, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 11 ) ; <i32>:77 [#uses=1]
- store i32 %77, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 11 ) ; <i32>:79 [#uses=1]
- store i32 %79, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:80 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 11 ) ; <i32>:81 [#uses=1]
- store i32 %81, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:82 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 11 ) ; <i32>:83 [#uses=1]
- store i32 %83, i32* @ul, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define void @test_op_and_fetch() nounwind {
-entry:
- load i8* @uc, align 1 ; <i8>:0 [#uses=1]
- zext i8 %0 to i32 ; <i32>:1 [#uses=1]
- trunc i32 %1 to i8 ; <i8>:2 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %2 ) ; <i8>:3 [#uses=1]
- add i8 %3, %2 ; <i8>:4 [#uses=1]
- store i8 %4, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:5 [#uses=1]
- zext i8 %5 to i32 ; <i32>:6 [#uses=1]
- trunc i32 %6 to i8 ; <i8>:7 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %7 ) ; <i8>:8 [#uses=1]
- add i8 %8, %7 ; <i8>:9 [#uses=1]
- store i8 %9, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:10 [#uses=1]
- zext i8 %10 to i32 ; <i32>:11 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:12 [#uses=1]
- trunc i32 %11 to i16 ; <i16>:13 [#uses=2]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %12, i16 %13 ) ; <i16>:14 [#uses=1]
- add i16 %14, %13 ; <i16>:15 [#uses=1]
- store i16 %15, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:16 [#uses=1]
- zext i8 %16 to i32 ; <i32>:17 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- trunc i32 %17 to i16 ; <i16>:19 [#uses=2]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %18, i16 %19 ) ; <i16>:20 [#uses=1]
- add i16 %20, %19 ; <i16>:21 [#uses=1]
- store i16 %21, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:22 [#uses=1]
- zext i8 %22 to i32 ; <i32>:23 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %24, i32 %23 ) ; <i32>:25 [#uses=1]
- add i32 %25, %23 ; <i32>:26 [#uses=1]
- store i32 %26, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:27 [#uses=1]
- zext i8 %27 to i32 ; <i32>:28 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:29 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %29, i32 %28 ) ; <i32>:30 [#uses=1]
- add i32 %30, %28 ; <i32>:31 [#uses=1]
- store i32 %31, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:32 [#uses=1]
- zext i8 %32 to i32 ; <i32>:33 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %34, i32 %33 ) ; <i32>:35 [#uses=1]
- add i32 %35, %33 ; <i32>:36 [#uses=1]
- store i32 %36, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:37 [#uses=1]
- zext i8 %37 to i32 ; <i32>:38 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:39 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %39, i32 %38 ) ; <i32>:40 [#uses=1]
- add i32 %40, %38 ; <i32>:41 [#uses=1]
- store i32 %41, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:42 [#uses=1]
- zext i8 %42 to i32 ; <i32>:43 [#uses=1]
- trunc i32 %43 to i8 ; <i8>:44 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %44 ) ; <i8>:45 [#uses=1]
- sub i8 %45, %44 ; <i8>:46 [#uses=1]
- store i8 %46, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:47 [#uses=1]
- zext i8 %47 to i32 ; <i32>:48 [#uses=1]
- trunc i32 %48 to i8 ; <i8>:49 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %49 ) ; <i8>:50 [#uses=1]
- sub i8 %50, %49 ; <i8>:51 [#uses=1]
- store i8 %51, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:52 [#uses=1]
- zext i8 %52 to i32 ; <i32>:53 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:54 [#uses=1]
- trunc i32 %53 to i16 ; <i16>:55 [#uses=2]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %54, i16 %55 ) ; <i16>:56 [#uses=1]
- sub i16 %56, %55 ; <i16>:57 [#uses=1]
- store i16 %57, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:58 [#uses=1]
- zext i8 %58 to i32 ; <i32>:59 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
- trunc i32 %59 to i16 ; <i16>:61 [#uses=2]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %60, i16 %61 ) ; <i16>:62 [#uses=1]
- sub i16 %62, %61 ; <i16>:63 [#uses=1]
- store i16 %63, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:64 [#uses=1]
- zext i8 %64 to i32 ; <i32>:65 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:66 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %66, i32 %65 ) ; <i32>:67 [#uses=1]
- sub i32 %67, %65 ; <i32>:68 [#uses=1]
- store i32 %68, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:69 [#uses=1]
- zext i8 %69 to i32 ; <i32>:70 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:71 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %71, i32 %70 ) ; <i32>:72 [#uses=1]
- sub i32 %72, %70 ; <i32>:73 [#uses=1]
- store i32 %73, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:74 [#uses=1]
- zext i8 %74 to i32 ; <i32>:75 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %76, i32 %75 ) ; <i32>:77 [#uses=1]
- sub i32 %77, %75 ; <i32>:78 [#uses=1]
- store i32 %78, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:79 [#uses=1]
- zext i8 %79 to i32 ; <i32>:80 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:81 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %81, i32 %80 ) ; <i32>:82 [#uses=1]
- sub i32 %82, %80 ; <i32>:83 [#uses=1]
- store i32 %83, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:84 [#uses=1]
- zext i8 %84 to i32 ; <i32>:85 [#uses=1]
- trunc i32 %85 to i8 ; <i8>:86 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %86 ) ; <i8>:87 [#uses=1]
- or i8 %87, %86 ; <i8>:88 [#uses=1]
- store i8 %88, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:89 [#uses=1]
- zext i8 %89 to i32 ; <i32>:90 [#uses=1]
- trunc i32 %90 to i8 ; <i8>:91 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %91 ) ; <i8>:92 [#uses=1]
- or i8 %92, %91 ; <i8>:93 [#uses=1]
- store i8 %93, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:94 [#uses=1]
- zext i8 %94 to i32 ; <i32>:95 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:96 [#uses=1]
- trunc i32 %95 to i16 ; <i16>:97 [#uses=2]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %96, i16 %97 ) ; <i16>:98 [#uses=1]
- or i16 %98, %97 ; <i16>:99 [#uses=1]
- store i16 %99, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:100 [#uses=1]
- zext i8 %100 to i32 ; <i32>:101 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:102 [#uses=1]
- trunc i32 %101 to i16 ; <i16>:103 [#uses=2]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %102, i16 %103 ) ; <i16>:104 [#uses=1]
- or i16 %104, %103 ; <i16>:105 [#uses=1]
- store i16 %105, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:106 [#uses=1]
- zext i8 %106 to i32 ; <i32>:107 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:108 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %108, i32 %107 ) ; <i32>:109 [#uses=1]
- or i32 %109, %107 ; <i32>:110 [#uses=1]
- store i32 %110, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:111 [#uses=1]
- zext i8 %111 to i32 ; <i32>:112 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:113 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %113, i32 %112 ) ; <i32>:114 [#uses=1]
- or i32 %114, %112 ; <i32>:115 [#uses=1]
- store i32 %115, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:116 [#uses=1]
- zext i8 %116 to i32 ; <i32>:117 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:118 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %118, i32 %117 ) ; <i32>:119 [#uses=1]
- or i32 %119, %117 ; <i32>:120 [#uses=1]
- store i32 %120, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:121 [#uses=1]
- zext i8 %121 to i32 ; <i32>:122 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:123 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %123, i32 %122 ) ; <i32>:124 [#uses=1]
- or i32 %124, %122 ; <i32>:125 [#uses=1]
- store i32 %125, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:126 [#uses=1]
- zext i8 %126 to i32 ; <i32>:127 [#uses=1]
- trunc i32 %127 to i8 ; <i8>:128 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %128 ) ; <i8>:129 [#uses=1]
- xor i8 %129, %128 ; <i8>:130 [#uses=1]
- store i8 %130, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:131 [#uses=1]
- zext i8 %131 to i32 ; <i32>:132 [#uses=1]
- trunc i32 %132 to i8 ; <i8>:133 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %133 ) ; <i8>:134 [#uses=1]
- xor i8 %134, %133 ; <i8>:135 [#uses=1]
- store i8 %135, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:136 [#uses=1]
- zext i8 %136 to i32 ; <i32>:137 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:138 [#uses=1]
- trunc i32 %137 to i16 ; <i16>:139 [#uses=2]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %138, i16 %139 ) ; <i16>:140 [#uses=1]
- xor i16 %140, %139 ; <i16>:141 [#uses=1]
- store i16 %141, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:142 [#uses=1]
- zext i8 %142 to i32 ; <i32>:143 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:144 [#uses=1]
- trunc i32 %143 to i16 ; <i16>:145 [#uses=2]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %144, i16 %145 ) ; <i16>:146 [#uses=1]
- xor i16 %146, %145 ; <i16>:147 [#uses=1]
- store i16 %147, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:148 [#uses=1]
- zext i8 %148 to i32 ; <i32>:149 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:150 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %150, i32 %149 ) ; <i32>:151 [#uses=1]
- xor i32 %151, %149 ; <i32>:152 [#uses=1]
- store i32 %152, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:153 [#uses=1]
- zext i8 %153 to i32 ; <i32>:154 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:155 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %155, i32 %154 ) ; <i32>:156 [#uses=1]
- xor i32 %156, %154 ; <i32>:157 [#uses=1]
- store i32 %157, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:158 [#uses=1]
- zext i8 %158 to i32 ; <i32>:159 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:160 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %160, i32 %159 ) ; <i32>:161 [#uses=1]
- xor i32 %161, %159 ; <i32>:162 [#uses=1]
- store i32 %162, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:163 [#uses=1]
- zext i8 %163 to i32 ; <i32>:164 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:165 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %165, i32 %164 ) ; <i32>:166 [#uses=1]
- xor i32 %166, %164 ; <i32>:167 [#uses=1]
- store i32 %167, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:168 [#uses=1]
- zext i8 %168 to i32 ; <i32>:169 [#uses=1]
- trunc i32 %169 to i8 ; <i8>:170 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %170 ) ; <i8>:171 [#uses=1]
- and i8 %171, %170 ; <i8>:172 [#uses=1]
- store i8 %172, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:173 [#uses=1]
- zext i8 %173 to i32 ; <i32>:174 [#uses=1]
- trunc i32 %174 to i8 ; <i8>:175 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %175 ) ; <i8>:176 [#uses=1]
- and i8 %176, %175 ; <i8>:177 [#uses=1]
- store i8 %177, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:178 [#uses=1]
- zext i8 %178 to i32 ; <i32>:179 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:180 [#uses=1]
- trunc i32 %179 to i16 ; <i16>:181 [#uses=2]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %180, i16 %181 ) ; <i16>:182 [#uses=1]
- and i16 %182, %181 ; <i16>:183 [#uses=1]
- store i16 %183, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:184 [#uses=1]
- zext i8 %184 to i32 ; <i32>:185 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:186 [#uses=1]
- trunc i32 %185 to i16 ; <i16>:187 [#uses=2]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %186, i16 %187 ) ; <i16>:188 [#uses=1]
- and i16 %188, %187 ; <i16>:189 [#uses=1]
- store i16 %189, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:190 [#uses=1]
- zext i8 %190 to i32 ; <i32>:191 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:192 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %192, i32 %191 ) ; <i32>:193 [#uses=1]
- and i32 %193, %191 ; <i32>:194 [#uses=1]
- store i32 %194, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:195 [#uses=1]
- zext i8 %195 to i32 ; <i32>:196 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:197 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %197, i32 %196 ) ; <i32>:198 [#uses=1]
- and i32 %198, %196 ; <i32>:199 [#uses=1]
- store i32 %199, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:200 [#uses=1]
- zext i8 %200 to i32 ; <i32>:201 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:202 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %202, i32 %201 ) ; <i32>:203 [#uses=1]
- and i32 %203, %201 ; <i32>:204 [#uses=1]
- store i32 %204, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:205 [#uses=1]
- zext i8 %205 to i32 ; <i32>:206 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:207 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %207, i32 %206 ) ; <i32>:208 [#uses=1]
- and i32 %208, %206 ; <i32>:209 [#uses=1]
- store i32 %209, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:210 [#uses=1]
- zext i8 %210 to i32 ; <i32>:211 [#uses=1]
- trunc i32 %211 to i8 ; <i8>:212 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %212 ) ; <i8>:213 [#uses=1]
- xor i8 %213, -1 ; <i8>:214 [#uses=1]
- and i8 %214, %212 ; <i8>:215 [#uses=1]
- store i8 %215, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:216 [#uses=1]
- zext i8 %216 to i32 ; <i32>:217 [#uses=1]
- trunc i32 %217 to i8 ; <i8>:218 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %218 ) ; <i8>:219 [#uses=1]
- xor i8 %219, -1 ; <i8>:220 [#uses=1]
- and i8 %220, %218 ; <i8>:221 [#uses=1]
- store i8 %221, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:222 [#uses=1]
- zext i8 %222 to i32 ; <i32>:223 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:224 [#uses=1]
- trunc i32 %223 to i16 ; <i16>:225 [#uses=2]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %224, i16 %225 ) ; <i16>:226 [#uses=1]
- xor i16 %226, -1 ; <i16>:227 [#uses=1]
- and i16 %227, %225 ; <i16>:228 [#uses=1]
- store i16 %228, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:229 [#uses=1]
- zext i8 %229 to i32 ; <i32>:230 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:231 [#uses=1]
- trunc i32 %230 to i16 ; <i16>:232 [#uses=2]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %231, i16 %232 ) ; <i16>:233 [#uses=1]
- xor i16 %233, -1 ; <i16>:234 [#uses=1]
- and i16 %234, %232 ; <i16>:235 [#uses=1]
- store i16 %235, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:236 [#uses=1]
- zext i8 %236 to i32 ; <i32>:237 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:238 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %238, i32 %237 ) ; <i32>:239 [#uses=1]
- xor i32 %239, -1 ; <i32>:240 [#uses=1]
- and i32 %240, %237 ; <i32>:241 [#uses=1]
- store i32 %241, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:242 [#uses=1]
- zext i8 %242 to i32 ; <i32>:243 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:244 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %244, i32 %243 ) ; <i32>:245 [#uses=1]
- xor i32 %245, -1 ; <i32>:246 [#uses=1]
- and i32 %246, %243 ; <i32>:247 [#uses=1]
- store i32 %247, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:248 [#uses=1]
- zext i8 %248 to i32 ; <i32>:249 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:250 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %250, i32 %249 ) ; <i32>:251 [#uses=1]
- xor i32 %251, -1 ; <i32>:252 [#uses=1]
- and i32 %252, %249 ; <i32>:253 [#uses=1]
- store i32 %253, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:254 [#uses=1]
- zext i8 %254 to i32 ; <i32>:255 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:256 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %256, i32 %255 ) ; <i32>:257 [#uses=1]
- xor i32 %257, -1 ; <i32>:258 [#uses=1]
- and i32 %258, %255 ; <i32>:259 [#uses=1]
- store i32 %259, i32* @ul, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define void @test_compare_and_swap() nounwind {
-entry:
- load i8* @sc, align 1 ; <i8>:0 [#uses=1]
- zext i8 %0 to i32 ; <i32>:1 [#uses=1]
- load i8* @uc, align 1 ; <i8>:2 [#uses=1]
- zext i8 %2 to i32 ; <i32>:3 [#uses=1]
- trunc i32 %3 to i8 ; <i8>:4 [#uses=1]
- trunc i32 %1 to i8 ; <i8>:5 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %4, i8 %5 ) ; <i8>:6 [#uses=1]
- store i8 %6, i8* @sc, align 1
- load i8* @sc, align 1 ; <i8>:7 [#uses=1]
- zext i8 %7 to i32 ; <i32>:8 [#uses=1]
- load i8* @uc, align 1 ; <i8>:9 [#uses=1]
- zext i8 %9 to i32 ; <i32>:10 [#uses=1]
- trunc i32 %10 to i8 ; <i8>:11 [#uses=1]
- trunc i32 %8 to i8 ; <i8>:12 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %11, i8 %12 ) ; <i8>:13 [#uses=1]
- store i8 %13, i8* @uc, align 1
- load i8* @sc, align 1 ; <i8>:14 [#uses=1]
- sext i8 %14 to i16 ; <i16>:15 [#uses=1]
- zext i16 %15 to i32 ; <i32>:16 [#uses=1]
- load i8* @uc, align 1 ; <i8>:17 [#uses=1]
- zext i8 %17 to i32 ; <i32>:18 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:19 [#uses=1]
- trunc i32 %18 to i16 ; <i16>:20 [#uses=1]
- trunc i32 %16 to i16 ; <i16>:21 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %19, i16 %20, i16 %21 ) ; <i16>:22 [#uses=1]
- store i16 %22, i16* @ss, align 2
- load i8* @sc, align 1 ; <i8>:23 [#uses=1]
- sext i8 %23 to i16 ; <i16>:24 [#uses=1]
- zext i16 %24 to i32 ; <i32>:25 [#uses=1]
- load i8* @uc, align 1 ; <i8>:26 [#uses=1]
- zext i8 %26 to i32 ; <i32>:27 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:28 [#uses=1]
- trunc i32 %27 to i16 ; <i16>:29 [#uses=1]
- trunc i32 %25 to i16 ; <i16>:30 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %28, i16 %29, i16 %30 ) ; <i16>:31 [#uses=1]
- store i16 %31, i16* @us, align 2
- load i8* @sc, align 1 ; <i8>:32 [#uses=1]
- sext i8 %32 to i32 ; <i32>:33 [#uses=1]
- load i8* @uc, align 1 ; <i8>:34 [#uses=1]
- zext i8 %34 to i32 ; <i32>:35 [#uses=1]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %36, i32 %35, i32 %33 ) ; <i32>:37 [#uses=1]
- store i32 %37, i32* @si, align 4
- load i8* @sc, align 1 ; <i8>:38 [#uses=1]
- sext i8 %38 to i32 ; <i32>:39 [#uses=1]
- load i8* @uc, align 1 ; <i8>:40 [#uses=1]
- zext i8 %40 to i32 ; <i32>:41 [#uses=1]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:42 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %42, i32 %41, i32 %39 ) ; <i32>:43 [#uses=1]
- store i32 %43, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:44 [#uses=1]
- sext i8 %44 to i32 ; <i32>:45 [#uses=1]
- load i8* @uc, align 1 ; <i8>:46 [#uses=1]
- zext i8 %46 to i32 ; <i32>:47 [#uses=1]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:48 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %48, i32 %47, i32 %45 ) ; <i32>:49 [#uses=1]
- store i32 %49, i32* @sl, align 4
- load i8* @sc, align 1 ; <i8>:50 [#uses=1]
- sext i8 %50 to i32 ; <i32>:51 [#uses=1]
- load i8* @uc, align 1 ; <i8>:52 [#uses=1]
- zext i8 %52 to i32 ; <i32>:53 [#uses=1]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %54, i32 %53, i32 %51 ) ; <i32>:55 [#uses=1]
- store i32 %55, i32* @ul, align 4
- load i8* @sc, align 1 ; <i8>:56 [#uses=1]
- zext i8 %56 to i32 ; <i32>:57 [#uses=1]
- load i8* @uc, align 1 ; <i8>:58 [#uses=1]
- zext i8 %58 to i32 ; <i32>:59 [#uses=1]
- trunc i32 %59 to i8 ; <i8>:60 [#uses=2]
- trunc i32 %57 to i8 ; <i8>:61 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %60, i8 %61 ) ; <i8>:62 [#uses=1]
- icmp eq i8 %62, %60 ; <i1>:63 [#uses=1]
- zext i1 %63 to i8 ; <i8>:64 [#uses=1]
- zext i8 %64 to i32 ; <i32>:65 [#uses=1]
- store i32 %65, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:66 [#uses=1]
- zext i8 %66 to i32 ; <i32>:67 [#uses=1]
- load i8* @uc, align 1 ; <i8>:68 [#uses=1]
- zext i8 %68 to i32 ; <i32>:69 [#uses=1]
- trunc i32 %69 to i8 ; <i8>:70 [#uses=2]
- trunc i32 %67 to i8 ; <i8>:71 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %70, i8 %71 ) ; <i8>:72 [#uses=1]
- icmp eq i8 %72, %70 ; <i1>:73 [#uses=1]
- zext i1 %73 to i8 ; <i8>:74 [#uses=1]
- zext i8 %74 to i32 ; <i32>:75 [#uses=1]
- store i32 %75, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:76 [#uses=1]
- sext i8 %76 to i16 ; <i16>:77 [#uses=1]
- zext i16 %77 to i32 ; <i32>:78 [#uses=1]
- load i8* @uc, align 1 ; <i8>:79 [#uses=1]
- zext i8 %79 to i32 ; <i32>:80 [#uses=1]
- trunc i32 %80 to i8 ; <i8>:81 [#uses=2]
- trunc i32 %78 to i8 ; <i8>:82 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @ss to i8*), i8 %81, i8 %82 ) ; <i8>:83 [#uses=1]
- icmp eq i8 %83, %81 ; <i1>:84 [#uses=1]
- zext i1 %84 to i8 ; <i8>:85 [#uses=1]
- zext i8 %85 to i32 ; <i32>:86 [#uses=1]
- store i32 %86, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:87 [#uses=1]
- sext i8 %87 to i16 ; <i16>:88 [#uses=1]
- zext i16 %88 to i32 ; <i32>:89 [#uses=1]
- load i8* @uc, align 1 ; <i8>:90 [#uses=1]
- zext i8 %90 to i32 ; <i32>:91 [#uses=1]
- trunc i32 %91 to i8 ; <i8>:92 [#uses=2]
- trunc i32 %89 to i8 ; <i8>:93 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @us to i8*), i8 %92, i8 %93 ) ; <i8>:94 [#uses=1]
- icmp eq i8 %94, %92 ; <i1>:95 [#uses=1]
- zext i1 %95 to i8 ; <i8>:96 [#uses=1]
- zext i8 %96 to i32 ; <i32>:97 [#uses=1]
- store i32 %97, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:98 [#uses=1]
- sext i8 %98 to i32 ; <i32>:99 [#uses=1]
- load i8* @uc, align 1 ; <i8>:100 [#uses=1]
- zext i8 %100 to i32 ; <i32>:101 [#uses=1]
- trunc i32 %101 to i8 ; <i8>:102 [#uses=2]
- trunc i32 %99 to i8 ; <i8>:103 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @si to i8*), i8 %102, i8 %103 ) ; <i8>:104 [#uses=1]
- icmp eq i8 %104, %102 ; <i1>:105 [#uses=1]
- zext i1 %105 to i8 ; <i8>:106 [#uses=1]
- zext i8 %106 to i32 ; <i32>:107 [#uses=1]
- store i32 %107, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:108 [#uses=1]
- sext i8 %108 to i32 ; <i32>:109 [#uses=1]
- load i8* @uc, align 1 ; <i8>:110 [#uses=1]
- zext i8 %110 to i32 ; <i32>:111 [#uses=1]
- trunc i32 %111 to i8 ; <i8>:112 [#uses=2]
- trunc i32 %109 to i8 ; <i8>:113 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @ui to i8*), i8 %112, i8 %113 ) ; <i8>:114 [#uses=1]
- icmp eq i8 %114, %112 ; <i1>:115 [#uses=1]
- zext i1 %115 to i8 ; <i8>:116 [#uses=1]
- zext i8 %116 to i32 ; <i32>:117 [#uses=1]
- store i32 %117, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:118 [#uses=1]
- sext i8 %118 to i32 ; <i32>:119 [#uses=1]
- load i8* @uc, align 1 ; <i8>:120 [#uses=1]
- zext i8 %120 to i32 ; <i32>:121 [#uses=1]
- trunc i32 %121 to i8 ; <i8>:122 [#uses=2]
- trunc i32 %119 to i8 ; <i8>:123 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @sl to i8*), i8 %122, i8 %123 ) ; <i8>:124 [#uses=1]
- icmp eq i8 %124, %122 ; <i1>:125 [#uses=1]
- zext i1 %125 to i8 ; <i8>:126 [#uses=1]
- zext i8 %126 to i32 ; <i32>:127 [#uses=1]
- store i32 %127, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:128 [#uses=1]
- sext i8 %128 to i32 ; <i32>:129 [#uses=1]
- load i8* @uc, align 1 ; <i8>:130 [#uses=1]
- zext i8 %130 to i32 ; <i32>:131 [#uses=1]
- trunc i32 %131 to i8 ; <i8>:132 [#uses=2]
- trunc i32 %129 to i8 ; <i8>:133 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @ul to i8*), i8 %132, i8 %133 ) ; <i8>:134 [#uses=1]
- icmp eq i8 %134, %132 ; <i1>:135 [#uses=1]
- zext i1 %135 to i8 ; <i8>:136 [#uses=1]
- zext i8 %136 to i32 ; <i32>:137 [#uses=1]
- store i32 %137, i32* @ui, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind
-
-declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind
-
-declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
-
-define void @test_lock() nounwind {
-entry:
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %10, i32 1 ) ; <i32>:11 [#uses=1]
- store i32 %11, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %12, i32 1 ) ; <i32>:13 [#uses=1]
- store i32 %13, i32* @ul, align 4
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false )
- volatile store i8 0, i8* @sc, align 1
- volatile store i8 0, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:14 [#uses=1]
- volatile store i16 0, i16* %14, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:15 [#uses=1]
- volatile store i16 0, i16* %15, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:16 [#uses=1]
- volatile store i32 0, i32* %16, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:17 [#uses=1]
- volatile store i32 0, i32* %17, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:18 [#uses=1]
- volatile store i32 0, i32* %18, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:19 [#uses=1]
- volatile store i32 0, i32* %19, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
-
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
diff --git a/test/CodeGen/X86/Atomics-64.ll b/test/CodeGen/X86/Atomics-64.ll
index ac174b9..8e93762 100644
--- a/test/CodeGen/X86/Atomics-64.ll
+++ b/test/CodeGen/X86/Atomics-64.ll
@@ -1,1015 +1,950 @@
; RUN: llc < %s -march=x86-64 > %t
-; ModuleID = 'Atomics.c'
+; RUN: llc < %s -march=x86 > %t
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-apple-darwin8"
-@sc = common global i8 0 ; <i8*> [#uses=56]
-@uc = common global i8 0 ; <i8*> [#uses=116]
-@ss = common global i16 0 ; <i16*> [#uses=15]
-@us = common global i16 0 ; <i16*> [#uses=15]
-@si = common global i32 0 ; <i32*> [#uses=15]
-@ui = common global i32 0 ; <i32*> [#uses=25]
-@sl = common global i64 0 ; <i64*> [#uses=15]
-@ul = common global i64 0 ; <i64*> [#uses=15]
-@sll = common global i64 0 ; <i64*> [#uses=15]
-@ull = common global i64 0 ; <i64*> [#uses=15]
+
+@sc = common global i8 0
+@uc = common global i8 0
+@ss = common global i16 0
+@us = common global i16 0
+@si = common global i32 0
+@ui = common global i32 0
+@sl = common global i64 0
+@ul = common global i64 0
+@sll = common global i64 0
+@ull = common global i64 0
define void @test_op_ignore() nounwind {
entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=0]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 1 ) ; <i64>:11 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 1 ) ; <i64>:13 [#uses=0]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:14 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %14, i64 1 ) ; <i64>:15 [#uses=0]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:16 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %16, i64 1 ) ; <i64>:17 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:18 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:19 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:20 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %20, i16 1 ) ; <i16>:21 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:22 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %22, i16 1 ) ; <i16>:23 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 1 ) ; <i32>:25 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:26 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 1 ) ; <i32>:27 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:28 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %28, i64 1 ) ; <i64>:29 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:30 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %30, i64 1 ) ; <i64>:31 [#uses=0]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:32 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %32, i64 1 ) ; <i64>:33 [#uses=0]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:34 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %34, i64 1 ) ; <i64>:35 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:36 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:37 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:38 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %38, i16 1 ) ; <i16>:39 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:40 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %40, i16 1 ) ; <i16>:41 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:42 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %42, i32 1 ) ; <i32>:43 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:44 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %44, i32 1 ) ; <i32>:45 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:46 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %46, i64 1 ) ; <i64>:47 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:48 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %48, i64 1 ) ; <i64>:49 [#uses=0]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:50 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %50, i64 1 ) ; <i64>:51 [#uses=0]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:52 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %52, i64 1 ) ; <i64>:53 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:54 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:55 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:56 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %56, i16 1 ) ; <i16>:57 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %58, i16 1 ) ; <i16>:59 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:60 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %60, i32 1 ) ; <i32>:61 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %62, i32 1 ) ; <i32>:63 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:64 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %64, i64 1 ) ; <i64>:65 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:66 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %66, i64 1 ) ; <i64>:67 [#uses=0]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:68 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %68, i64 1 ) ; <i64>:69 [#uses=0]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:70 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %70, i64 1 ) ; <i64>:71 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:72 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:73 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %74, i16 1 ) ; <i16>:75 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:76 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %76, i16 1 ) ; <i16>:77 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %78, i32 1 ) ; <i32>:79 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:80 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %80, i32 1 ) ; <i32>:81 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:82 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %82, i64 1 ) ; <i64>:83 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:84 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %84, i64 1 ) ; <i64>:85 [#uses=0]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:86 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %86, i64 1 ) ; <i64>:87 [#uses=0]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:88 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %88, i64 1 ) ; <i64>:89 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:90 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:91 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:92 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %92, i16 1 ) ; <i16>:93 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:94 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %94, i16 1 ) ; <i16>:95 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:96 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %96, i32 1 ) ; <i32>:97 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:98 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %98, i32 1 ) ; <i32>:99 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:100 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %100, i64 1 ) ; <i64>:101 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:102 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %102, i64 1 ) ; <i64>:103 [#uses=0]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:104 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %104, i64 1 ) ; <i64>:105 [#uses=0]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:106 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %106, i64 1 ) ; <i64>:107 [#uses=0]
- br label %return
-
-return: ; preds = %entry
- ret void
+ %0 = atomicrmw add i8* @sc, i8 1 monotonic
+ %1 = atomicrmw add i8* @uc, i8 1 monotonic
+ %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %3 = atomicrmw add i16* %2, i16 1 monotonic
+ %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %5 = atomicrmw add i16* %4, i16 1 monotonic
+ %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %7 = atomicrmw add i32* %6, i32 1 monotonic
+ %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %9 = atomicrmw add i32* %8, i32 1 monotonic
+ %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %11 = atomicrmw add i64* %10, i64 1 monotonic
+ %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %13 = atomicrmw add i64* %12, i64 1 monotonic
+ %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %15 = atomicrmw add i64* %14, i64 1 monotonic
+ %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %17 = atomicrmw add i64* %16, i64 1 monotonic
+ %18 = atomicrmw sub i8* @sc, i8 1 monotonic
+ %19 = atomicrmw sub i8* @uc, i8 1 monotonic
+ %20 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %21 = atomicrmw sub i16* %20, i16 1 monotonic
+ %22 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %23 = atomicrmw sub i16* %22, i16 1 monotonic
+ %24 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %25 = atomicrmw sub i32* %24, i32 1 monotonic
+ %26 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %27 = atomicrmw sub i32* %26, i32 1 monotonic
+ %28 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %29 = atomicrmw sub i64* %28, i64 1 monotonic
+ %30 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %31 = atomicrmw sub i64* %30, i64 1 monotonic
+ %32 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %33 = atomicrmw sub i64* %32, i64 1 monotonic
+ %34 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %35 = atomicrmw sub i64* %34, i64 1 monotonic
+ %36 = atomicrmw or i8* @sc, i8 1 monotonic
+ %37 = atomicrmw or i8* @uc, i8 1 monotonic
+ %38 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %39 = atomicrmw or i16* %38, i16 1 monotonic
+ %40 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %41 = atomicrmw or i16* %40, i16 1 monotonic
+ %42 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %43 = atomicrmw or i32* %42, i32 1 monotonic
+ %44 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %45 = atomicrmw or i32* %44, i32 1 monotonic
+ %46 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %47 = atomicrmw or i64* %46, i64 1 monotonic
+ %48 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %49 = atomicrmw or i64* %48, i64 1 monotonic
+ %50 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %51 = atomicrmw or i64* %50, i64 1 monotonic
+ %52 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %53 = atomicrmw or i64* %52, i64 1 monotonic
+ %54 = atomicrmw xor i8* @sc, i8 1 monotonic
+ %55 = atomicrmw xor i8* @uc, i8 1 monotonic
+ %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %57 = atomicrmw xor i16* %56, i16 1 monotonic
+ %58 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %59 = atomicrmw xor i16* %58, i16 1 monotonic
+ %60 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %61 = atomicrmw xor i32* %60, i32 1 monotonic
+ %62 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %63 = atomicrmw xor i32* %62, i32 1 monotonic
+ %64 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %65 = atomicrmw xor i64* %64, i64 1 monotonic
+ %66 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %67 = atomicrmw xor i64* %66, i64 1 monotonic
+ %68 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %69 = atomicrmw xor i64* %68, i64 1 monotonic
+ %70 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %71 = atomicrmw xor i64* %70, i64 1 monotonic
+ %72 = atomicrmw and i8* @sc, i8 1 monotonic
+ %73 = atomicrmw and i8* @uc, i8 1 monotonic
+ %74 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %75 = atomicrmw and i16* %74, i16 1 monotonic
+ %76 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %77 = atomicrmw and i16* %76, i16 1 monotonic
+ %78 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %79 = atomicrmw and i32* %78, i32 1 monotonic
+ %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %81 = atomicrmw and i32* %80, i32 1 monotonic
+ %82 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %83 = atomicrmw and i64* %82, i64 1 monotonic
+ %84 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %85 = atomicrmw and i64* %84, i64 1 monotonic
+ %86 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %87 = atomicrmw and i64* %86, i64 1 monotonic
+ %88 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %89 = atomicrmw and i64* %88, i64 1 monotonic
+ %90 = atomicrmw nand i8* @sc, i8 1 monotonic
+ %91 = atomicrmw nand i8* @uc, i8 1 monotonic
+ %92 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %93 = atomicrmw nand i16* %92, i16 1 monotonic
+ %94 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %95 = atomicrmw nand i16* %94, i16 1 monotonic
+ %96 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %97 = atomicrmw nand i32* %96, i32 1 monotonic
+ %98 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %99 = atomicrmw nand i32* %98, i32 1 monotonic
+ %100 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %101 = atomicrmw nand i64* %100, i64 1 monotonic
+ %102 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %103 = atomicrmw nand i64* %102, i64 1 monotonic
+ %104 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %105 = atomicrmw nand i64* %104, i64 1 monotonic
+ %106 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %107 = atomicrmw nand i64* %106, i64 1 monotonic
+ br label %return
+
+return: ; preds = %entry
+ ret void
}
-declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.add.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.or.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.xor.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.and.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.nand.i64.p0i64(i64*, i64) nounwind
-
define void @test_fetch_and_op() nounwind {
entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 11 ) ; <i64>:11 [#uses=1]
- store i64 %11, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 11 ) ; <i64>:13 [#uses=1]
- store i64 %13, i64* @ul, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:14 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %14, i64 11 ) ; <i64>:15 [#uses=1]
- store i64 %15, i64* @sll, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:16 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %16, i64 11 ) ; <i64>:17 [#uses=1]
- store i64 %17, i64* @ull, align 8
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:18 [#uses=1]
- store i8 %18, i8* @sc, align 1
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:19 [#uses=1]
- store i8 %19, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:20 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %20, i16 11 ) ; <i16>:21 [#uses=1]
- store i16 %21, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:22 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %22, i16 11 ) ; <i16>:23 [#uses=1]
- store i16 %23, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 11 ) ; <i32>:25 [#uses=1]
- store i32 %25, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:26 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 11 ) ; <i32>:27 [#uses=1]
- store i32 %27, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:28 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %28, i64 11 ) ; <i64>:29 [#uses=1]
- store i64 %29, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:30 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %30, i64 11 ) ; <i64>:31 [#uses=1]
- store i64 %31, i64* @ul, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:32 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %32, i64 11 ) ; <i64>:33 [#uses=1]
- store i64 %33, i64* @sll, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:34 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %34, i64 11 ) ; <i64>:35 [#uses=1]
- store i64 %35, i64* @ull, align 8
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:36 [#uses=1]
- store i8 %36, i8* @sc, align 1
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:37 [#uses=1]
- store i8 %37, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:38 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %38, i16 11 ) ; <i16>:39 [#uses=1]
- store i16 %39, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:40 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %40, i16 11 ) ; <i16>:41 [#uses=1]
- store i16 %41, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:42 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %42, i32 11 ) ; <i32>:43 [#uses=1]
- store i32 %43, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:44 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %44, i32 11 ) ; <i32>:45 [#uses=1]
- store i32 %45, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:46 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %46, i64 11 ) ; <i64>:47 [#uses=1]
- store i64 %47, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:48 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %48, i64 11 ) ; <i64>:49 [#uses=1]
- store i64 %49, i64* @ul, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:50 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %50, i64 11 ) ; <i64>:51 [#uses=1]
- store i64 %51, i64* @sll, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:52 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %52, i64 11 ) ; <i64>:53 [#uses=1]
- store i64 %53, i64* @ull, align 8
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:54 [#uses=1]
- store i8 %54, i8* @sc, align 1
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:55 [#uses=1]
- store i8 %55, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:56 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %56, i16 11 ) ; <i16>:57 [#uses=1]
- store i16 %57, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %58, i16 11 ) ; <i16>:59 [#uses=1]
- store i16 %59, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:60 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %60, i32 11 ) ; <i32>:61 [#uses=1]
- store i32 %61, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %62, i32 11 ) ; <i32>:63 [#uses=1]
- store i32 %63, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:64 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %64, i64 11 ) ; <i64>:65 [#uses=1]
- store i64 %65, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:66 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %66, i64 11 ) ; <i64>:67 [#uses=1]
- store i64 %67, i64* @ul, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:68 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %68, i64 11 ) ; <i64>:69 [#uses=1]
- store i64 %69, i64* @sll, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:70 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %70, i64 11 ) ; <i64>:71 [#uses=1]
- store i64 %71, i64* @ull, align 8
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:72 [#uses=1]
- store i8 %72, i8* @sc, align 1
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:73 [#uses=1]
- store i8 %73, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %74, i16 11 ) ; <i16>:75 [#uses=1]
- store i16 %75, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:76 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %76, i16 11 ) ; <i16>:77 [#uses=1]
- store i16 %77, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %78, i32 11 ) ; <i32>:79 [#uses=1]
- store i32 %79, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:80 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %80, i32 11 ) ; <i32>:81 [#uses=1]
- store i32 %81, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:82 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %82, i64 11 ) ; <i64>:83 [#uses=1]
- store i64 %83, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:84 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %84, i64 11 ) ; <i64>:85 [#uses=1]
- store i64 %85, i64* @ul, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:86 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %86, i64 11 ) ; <i64>:87 [#uses=1]
- store i64 %87, i64* @sll, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:88 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %88, i64 11 ) ; <i64>:89 [#uses=1]
- store i64 %89, i64* @ull, align 8
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:90 [#uses=1]
- store i8 %90, i8* @sc, align 1
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:91 [#uses=1]
- store i8 %91, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:92 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %92, i16 11 ) ; <i16>:93 [#uses=1]
- store i16 %93, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:94 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %94, i16 11 ) ; <i16>:95 [#uses=1]
- store i16 %95, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:96 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %96, i32 11 ) ; <i32>:97 [#uses=1]
- store i32 %97, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:98 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %98, i32 11 ) ; <i32>:99 [#uses=1]
- store i32 %99, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:100 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %100, i64 11 ) ; <i64>:101 [#uses=1]
- store i64 %101, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:102 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %102, i64 11 ) ; <i64>:103 [#uses=1]
- store i64 %103, i64* @ul, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:104 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %104, i64 11 ) ; <i64>:105 [#uses=1]
- store i64 %105, i64* @sll, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:106 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %106, i64 11 ) ; <i64>:107 [#uses=1]
- store i64 %107, i64* @ull, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
+ %0 = atomicrmw add i8* @sc, i8 11 monotonic
+ store i8 %0, i8* @sc, align 1
+ %1 = atomicrmw add i8* @uc, i8 11 monotonic
+ store i8 %1, i8* @uc, align 1
+ %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %3 = atomicrmw add i16* %2, i16 11 monotonic
+ store i16 %3, i16* @ss, align 2
+ %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %5 = atomicrmw add i16* %4, i16 11 monotonic
+ store i16 %5, i16* @us, align 2
+ %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %7 = atomicrmw add i32* %6, i32 11 monotonic
+ store i32 %7, i32* @si, align 4
+ %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %9 = atomicrmw add i32* %8, i32 11 monotonic
+ store i32 %9, i32* @ui, align 4
+ %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %11 = atomicrmw add i64* %10, i64 11 monotonic
+ store i64 %11, i64* @sl, align 8
+ %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %13 = atomicrmw add i64* %12, i64 11 monotonic
+ store i64 %13, i64* @ul, align 8
+ %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %15 = atomicrmw add i64* %14, i64 11 monotonic
+ store i64 %15, i64* @sll, align 8
+ %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %17 = atomicrmw add i64* %16, i64 11 monotonic
+ store i64 %17, i64* @ull, align 8
+ %18 = atomicrmw sub i8* @sc, i8 11 monotonic
+ store i8 %18, i8* @sc, align 1
+ %19 = atomicrmw sub i8* @uc, i8 11 monotonic
+ store i8 %19, i8* @uc, align 1
+ %20 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %21 = atomicrmw sub i16* %20, i16 11 monotonic
+ store i16 %21, i16* @ss, align 2
+ %22 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %23 = atomicrmw sub i16* %22, i16 11 monotonic
+ store i16 %23, i16* @us, align 2
+ %24 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %25 = atomicrmw sub i32* %24, i32 11 monotonic
+ store i32 %25, i32* @si, align 4
+ %26 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %27 = atomicrmw sub i32* %26, i32 11 monotonic
+ store i32 %27, i32* @ui, align 4
+ %28 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %29 = atomicrmw sub i64* %28, i64 11 monotonic
+ store i64 %29, i64* @sl, align 8
+ %30 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %31 = atomicrmw sub i64* %30, i64 11 monotonic
+ store i64 %31, i64* @ul, align 8
+ %32 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %33 = atomicrmw sub i64* %32, i64 11 monotonic
+ store i64 %33, i64* @sll, align 8
+ %34 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %35 = atomicrmw sub i64* %34, i64 11 monotonic
+ store i64 %35, i64* @ull, align 8
+ %36 = atomicrmw or i8* @sc, i8 11 monotonic
+ store i8 %36, i8* @sc, align 1
+ %37 = atomicrmw or i8* @uc, i8 11 monotonic
+ store i8 %37, i8* @uc, align 1
+ %38 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %39 = atomicrmw or i16* %38, i16 11 monotonic
+ store i16 %39, i16* @ss, align 2
+ %40 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %41 = atomicrmw or i16* %40, i16 11 monotonic
+ store i16 %41, i16* @us, align 2
+ %42 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %43 = atomicrmw or i32* %42, i32 11 monotonic
+ store i32 %43, i32* @si, align 4
+ %44 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %45 = atomicrmw or i32* %44, i32 11 monotonic
+ store i32 %45, i32* @ui, align 4
+ %46 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %47 = atomicrmw or i64* %46, i64 11 monotonic
+ store i64 %47, i64* @sl, align 8
+ %48 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %49 = atomicrmw or i64* %48, i64 11 monotonic
+ store i64 %49, i64* @ul, align 8
+ %50 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %51 = atomicrmw or i64* %50, i64 11 monotonic
+ store i64 %51, i64* @sll, align 8
+ %52 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %53 = atomicrmw or i64* %52, i64 11 monotonic
+ store i64 %53, i64* @ull, align 8
+ %54 = atomicrmw xor i8* @sc, i8 11 monotonic
+ store i8 %54, i8* @sc, align 1
+ %55 = atomicrmw xor i8* @uc, i8 11 monotonic
+ store i8 %55, i8* @uc, align 1
+ %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %57 = atomicrmw xor i16* %56, i16 11 monotonic
+ store i16 %57, i16* @ss, align 2
+ %58 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %59 = atomicrmw xor i16* %58, i16 11 monotonic
+ store i16 %59, i16* @us, align 2
+ %60 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %61 = atomicrmw xor i32* %60, i32 11 monotonic
+ store i32 %61, i32* @si, align 4
+ %62 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %63 = atomicrmw xor i32* %62, i32 11 monotonic
+ store i32 %63, i32* @ui, align 4
+ %64 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %65 = atomicrmw xor i64* %64, i64 11 monotonic
+ store i64 %65, i64* @sl, align 8
+ %66 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %67 = atomicrmw xor i64* %66, i64 11 monotonic
+ store i64 %67, i64* @ul, align 8
+ %68 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %69 = atomicrmw xor i64* %68, i64 11 monotonic
+ store i64 %69, i64* @sll, align 8
+ %70 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %71 = atomicrmw xor i64* %70, i64 11 monotonic
+ store i64 %71, i64* @ull, align 8
+ %72 = atomicrmw and i8* @sc, i8 11 monotonic
+ store i8 %72, i8* @sc, align 1
+ %73 = atomicrmw and i8* @uc, i8 11 monotonic
+ store i8 %73, i8* @uc, align 1
+ %74 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %75 = atomicrmw and i16* %74, i16 11 monotonic
+ store i16 %75, i16* @ss, align 2
+ %76 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %77 = atomicrmw and i16* %76, i16 11 monotonic
+ store i16 %77, i16* @us, align 2
+ %78 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %79 = atomicrmw and i32* %78, i32 11 monotonic
+ store i32 %79, i32* @si, align 4
+ %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %81 = atomicrmw and i32* %80, i32 11 monotonic
+ store i32 %81, i32* @ui, align 4
+ %82 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %83 = atomicrmw and i64* %82, i64 11 monotonic
+ store i64 %83, i64* @sl, align 8
+ %84 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %85 = atomicrmw and i64* %84, i64 11 monotonic
+ store i64 %85, i64* @ul, align 8
+ %86 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %87 = atomicrmw and i64* %86, i64 11 monotonic
+ store i64 %87, i64* @sll, align 8
+ %88 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %89 = atomicrmw and i64* %88, i64 11 monotonic
+ store i64 %89, i64* @ull, align 8
+ %90 = atomicrmw nand i8* @sc, i8 11 monotonic
+ store i8 %90, i8* @sc, align 1
+ %91 = atomicrmw nand i8* @uc, i8 11 monotonic
+ store i8 %91, i8* @uc, align 1
+ %92 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %93 = atomicrmw nand i16* %92, i16 11 monotonic
+ store i16 %93, i16* @ss, align 2
+ %94 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %95 = atomicrmw nand i16* %94, i16 11 monotonic
+ store i16 %95, i16* @us, align 2
+ %96 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %97 = atomicrmw nand i32* %96, i32 11 monotonic
+ store i32 %97, i32* @si, align 4
+ %98 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %99 = atomicrmw nand i32* %98, i32 11 monotonic
+ store i32 %99, i32* @ui, align 4
+ %100 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %101 = atomicrmw nand i64* %100, i64 11 monotonic
+ store i64 %101, i64* @sl, align 8
+ %102 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %103 = atomicrmw nand i64* %102, i64 11 monotonic
+ store i64 %103, i64* @ul, align 8
+ %104 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %105 = atomicrmw nand i64* %104, i64 11 monotonic
+ store i64 %105, i64* @sll, align 8
+ %106 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %107 = atomicrmw nand i64* %106, i64 11 monotonic
+ store i64 %107, i64* @ull, align 8
+ br label %return
+
+return: ; preds = %entry
+ ret void
}
define void @test_op_and_fetch() nounwind {
entry:
- load i8* @uc, align 1 ; <i8>:0 [#uses=1]
- zext i8 %0 to i32 ; <i32>:1 [#uses=1]
- trunc i32 %1 to i8 ; <i8>:2 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %2 ) ; <i8>:3 [#uses=1]
- add i8 %3, %2 ; <i8>:4 [#uses=1]
- store i8 %4, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:5 [#uses=1]
- zext i8 %5 to i32 ; <i32>:6 [#uses=1]
- trunc i32 %6 to i8 ; <i8>:7 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %7 ) ; <i8>:8 [#uses=1]
- add i8 %8, %7 ; <i8>:9 [#uses=1]
- store i8 %9, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:10 [#uses=1]
- zext i8 %10 to i32 ; <i32>:11 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:12 [#uses=1]
- trunc i32 %11 to i16 ; <i16>:13 [#uses=2]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %12, i16 %13 ) ; <i16>:14 [#uses=1]
- add i16 %14, %13 ; <i16>:15 [#uses=1]
- store i16 %15, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:16 [#uses=1]
- zext i8 %16 to i32 ; <i32>:17 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- trunc i32 %17 to i16 ; <i16>:19 [#uses=2]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %18, i16 %19 ) ; <i16>:20 [#uses=1]
- add i16 %20, %19 ; <i16>:21 [#uses=1]
- store i16 %21, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:22 [#uses=1]
- zext i8 %22 to i32 ; <i32>:23 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %24, i32 %23 ) ; <i32>:25 [#uses=1]
- add i32 %25, %23 ; <i32>:26 [#uses=1]
- store i32 %26, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:27 [#uses=1]
- zext i8 %27 to i32 ; <i32>:28 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:29 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %29, i32 %28 ) ; <i32>:30 [#uses=1]
- add i32 %30, %28 ; <i32>:31 [#uses=1]
- store i32 %31, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:32 [#uses=1]
- zext i8 %32 to i64 ; <i64>:33 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:34 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %34, i64 %33 ) ; <i64>:35 [#uses=1]
- add i64 %35, %33 ; <i64>:36 [#uses=1]
- store i64 %36, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:37 [#uses=1]
- zext i8 %37 to i64 ; <i64>:38 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:39 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %39, i64 %38 ) ; <i64>:40 [#uses=1]
- add i64 %40, %38 ; <i64>:41 [#uses=1]
- store i64 %41, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:42 [#uses=1]
- zext i8 %42 to i64 ; <i64>:43 [#uses=2]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:44 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %44, i64 %43 ) ; <i64>:45 [#uses=1]
- add i64 %45, %43 ; <i64>:46 [#uses=1]
- store i64 %46, i64* @sll, align 8
- load i8* @uc, align 1 ; <i8>:47 [#uses=1]
- zext i8 %47 to i64 ; <i64>:48 [#uses=2]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:49 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %49, i64 %48 ) ; <i64>:50 [#uses=1]
- add i64 %50, %48 ; <i64>:51 [#uses=1]
- store i64 %51, i64* @ull, align 8
- load i8* @uc, align 1 ; <i8>:52 [#uses=1]
- zext i8 %52 to i32 ; <i32>:53 [#uses=1]
- trunc i32 %53 to i8 ; <i8>:54 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %54 ) ; <i8>:55 [#uses=1]
- sub i8 %55, %54 ; <i8>:56 [#uses=1]
- store i8 %56, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:57 [#uses=1]
- zext i8 %57 to i32 ; <i32>:58 [#uses=1]
- trunc i32 %58 to i8 ; <i8>:59 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %59 ) ; <i8>:60 [#uses=1]
- sub i8 %60, %59 ; <i8>:61 [#uses=1]
- store i8 %61, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:62 [#uses=1]
- zext i8 %62 to i32 ; <i32>:63 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:64 [#uses=1]
- trunc i32 %63 to i16 ; <i16>:65 [#uses=2]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %64, i16 %65 ) ; <i16>:66 [#uses=1]
- sub i16 %66, %65 ; <i16>:67 [#uses=1]
- store i16 %67, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:68 [#uses=1]
- zext i8 %68 to i32 ; <i32>:69 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:70 [#uses=1]
- trunc i32 %69 to i16 ; <i16>:71 [#uses=2]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %70, i16 %71 ) ; <i16>:72 [#uses=1]
- sub i16 %72, %71 ; <i16>:73 [#uses=1]
- store i16 %73, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:74 [#uses=1]
- zext i8 %74 to i32 ; <i32>:75 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %76, i32 %75 ) ; <i32>:77 [#uses=1]
- sub i32 %77, %75 ; <i32>:78 [#uses=1]
- store i32 %78, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:79 [#uses=1]
- zext i8 %79 to i32 ; <i32>:80 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:81 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %81, i32 %80 ) ; <i32>:82 [#uses=1]
- sub i32 %82, %80 ; <i32>:83 [#uses=1]
- store i32 %83, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:84 [#uses=1]
- zext i8 %84 to i64 ; <i64>:85 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:86 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %86, i64 %85 ) ; <i64>:87 [#uses=1]
- sub i64 %87, %85 ; <i64>:88 [#uses=1]
- store i64 %88, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:89 [#uses=1]
- zext i8 %89 to i64 ; <i64>:90 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:91 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %91, i64 %90 ) ; <i64>:92 [#uses=1]
- sub i64 %92, %90 ; <i64>:93 [#uses=1]
- store i64 %93, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:94 [#uses=1]
- zext i8 %94 to i64 ; <i64>:95 [#uses=2]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:96 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %96, i64 %95 ) ; <i64>:97 [#uses=1]
- sub i64 %97, %95 ; <i64>:98 [#uses=1]
- store i64 %98, i64* @sll, align 8
- load i8* @uc, align 1 ; <i8>:99 [#uses=1]
- zext i8 %99 to i64 ; <i64>:100 [#uses=2]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:101 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %101, i64 %100 ) ; <i64>:102 [#uses=1]
- sub i64 %102, %100 ; <i64>:103 [#uses=1]
- store i64 %103, i64* @ull, align 8
- load i8* @uc, align 1 ; <i8>:104 [#uses=1]
- zext i8 %104 to i32 ; <i32>:105 [#uses=1]
- trunc i32 %105 to i8 ; <i8>:106 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %106 ) ; <i8>:107 [#uses=1]
- or i8 %107, %106 ; <i8>:108 [#uses=1]
- store i8 %108, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:109 [#uses=1]
- zext i8 %109 to i32 ; <i32>:110 [#uses=1]
- trunc i32 %110 to i8 ; <i8>:111 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %111 ) ; <i8>:112 [#uses=1]
- or i8 %112, %111 ; <i8>:113 [#uses=1]
- store i8 %113, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:114 [#uses=1]
- zext i8 %114 to i32 ; <i32>:115 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:116 [#uses=1]
- trunc i32 %115 to i16 ; <i16>:117 [#uses=2]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %116, i16 %117 ) ; <i16>:118 [#uses=1]
- or i16 %118, %117 ; <i16>:119 [#uses=1]
- store i16 %119, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:120 [#uses=1]
- zext i8 %120 to i32 ; <i32>:121 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:122 [#uses=1]
- trunc i32 %121 to i16 ; <i16>:123 [#uses=2]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %122, i16 %123 ) ; <i16>:124 [#uses=1]
- or i16 %124, %123 ; <i16>:125 [#uses=1]
- store i16 %125, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:126 [#uses=1]
- zext i8 %126 to i32 ; <i32>:127 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:128 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %128, i32 %127 ) ; <i32>:129 [#uses=1]
- or i32 %129, %127 ; <i32>:130 [#uses=1]
- store i32 %130, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:131 [#uses=1]
- zext i8 %131 to i32 ; <i32>:132 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:133 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %133, i32 %132 ) ; <i32>:134 [#uses=1]
- or i32 %134, %132 ; <i32>:135 [#uses=1]
- store i32 %135, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:136 [#uses=1]
- zext i8 %136 to i64 ; <i64>:137 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:138 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %138, i64 %137 ) ; <i64>:139 [#uses=1]
- or i64 %139, %137 ; <i64>:140 [#uses=1]
- store i64 %140, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:141 [#uses=1]
- zext i8 %141 to i64 ; <i64>:142 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:143 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %143, i64 %142 ) ; <i64>:144 [#uses=1]
- or i64 %144, %142 ; <i64>:145 [#uses=1]
- store i64 %145, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:146 [#uses=1]
- zext i8 %146 to i64 ; <i64>:147 [#uses=2]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:148 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %148, i64 %147 ) ; <i64>:149 [#uses=1]
- or i64 %149, %147 ; <i64>:150 [#uses=1]
- store i64 %150, i64* @sll, align 8
- load i8* @uc, align 1 ; <i8>:151 [#uses=1]
- zext i8 %151 to i64 ; <i64>:152 [#uses=2]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:153 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %153, i64 %152 ) ; <i64>:154 [#uses=1]
- or i64 %154, %152 ; <i64>:155 [#uses=1]
- store i64 %155, i64* @ull, align 8
- load i8* @uc, align 1 ; <i8>:156 [#uses=1]
- zext i8 %156 to i32 ; <i32>:157 [#uses=1]
- trunc i32 %157 to i8 ; <i8>:158 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %158 ) ; <i8>:159 [#uses=1]
- xor i8 %159, %158 ; <i8>:160 [#uses=1]
- store i8 %160, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:161 [#uses=1]
- zext i8 %161 to i32 ; <i32>:162 [#uses=1]
- trunc i32 %162 to i8 ; <i8>:163 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %163 ) ; <i8>:164 [#uses=1]
- xor i8 %164, %163 ; <i8>:165 [#uses=1]
- store i8 %165, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:166 [#uses=1]
- zext i8 %166 to i32 ; <i32>:167 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:168 [#uses=1]
- trunc i32 %167 to i16 ; <i16>:169 [#uses=2]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %168, i16 %169 ) ; <i16>:170 [#uses=1]
- xor i16 %170, %169 ; <i16>:171 [#uses=1]
- store i16 %171, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:172 [#uses=1]
- zext i8 %172 to i32 ; <i32>:173 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:174 [#uses=1]
- trunc i32 %173 to i16 ; <i16>:175 [#uses=2]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %174, i16 %175 ) ; <i16>:176 [#uses=1]
- xor i16 %176, %175 ; <i16>:177 [#uses=1]
- store i16 %177, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:178 [#uses=1]
- zext i8 %178 to i32 ; <i32>:179 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:180 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %180, i32 %179 ) ; <i32>:181 [#uses=1]
- xor i32 %181, %179 ; <i32>:182 [#uses=1]
- store i32 %182, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:183 [#uses=1]
- zext i8 %183 to i32 ; <i32>:184 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:185 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %185, i32 %184 ) ; <i32>:186 [#uses=1]
- xor i32 %186, %184 ; <i32>:187 [#uses=1]
- store i32 %187, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:188 [#uses=1]
- zext i8 %188 to i64 ; <i64>:189 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:190 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %190, i64 %189 ) ; <i64>:191 [#uses=1]
- xor i64 %191, %189 ; <i64>:192 [#uses=1]
- store i64 %192, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:193 [#uses=1]
- zext i8 %193 to i64 ; <i64>:194 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:195 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %195, i64 %194 ) ; <i64>:196 [#uses=1]
- xor i64 %196, %194 ; <i64>:197 [#uses=1]
- store i64 %197, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:198 [#uses=1]
- zext i8 %198 to i64 ; <i64>:199 [#uses=2]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:200 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %200, i64 %199 ) ; <i64>:201 [#uses=1]
- xor i64 %201, %199 ; <i64>:202 [#uses=1]
- store i64 %202, i64* @sll, align 8
- load i8* @uc, align 1 ; <i8>:203 [#uses=1]
- zext i8 %203 to i64 ; <i64>:204 [#uses=2]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:205 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %205, i64 %204 ) ; <i64>:206 [#uses=1]
- xor i64 %206, %204 ; <i64>:207 [#uses=1]
- store i64 %207, i64* @ull, align 8
- load i8* @uc, align 1 ; <i8>:208 [#uses=1]
- zext i8 %208 to i32 ; <i32>:209 [#uses=1]
- trunc i32 %209 to i8 ; <i8>:210 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %210 ) ; <i8>:211 [#uses=1]
- and i8 %211, %210 ; <i8>:212 [#uses=1]
- store i8 %212, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:213 [#uses=1]
- zext i8 %213 to i32 ; <i32>:214 [#uses=1]
- trunc i32 %214 to i8 ; <i8>:215 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %215 ) ; <i8>:216 [#uses=1]
- and i8 %216, %215 ; <i8>:217 [#uses=1]
- store i8 %217, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:218 [#uses=1]
- zext i8 %218 to i32 ; <i32>:219 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:220 [#uses=1]
- trunc i32 %219 to i16 ; <i16>:221 [#uses=2]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %220, i16 %221 ) ; <i16>:222 [#uses=1]
- and i16 %222, %221 ; <i16>:223 [#uses=1]
- store i16 %223, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:224 [#uses=1]
- zext i8 %224 to i32 ; <i32>:225 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:226 [#uses=1]
- trunc i32 %225 to i16 ; <i16>:227 [#uses=2]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %226, i16 %227 ) ; <i16>:228 [#uses=1]
- and i16 %228, %227 ; <i16>:229 [#uses=1]
- store i16 %229, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:230 [#uses=1]
- zext i8 %230 to i32 ; <i32>:231 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:232 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %232, i32 %231 ) ; <i32>:233 [#uses=1]
- and i32 %233, %231 ; <i32>:234 [#uses=1]
- store i32 %234, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:235 [#uses=1]
- zext i8 %235 to i32 ; <i32>:236 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:237 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %237, i32 %236 ) ; <i32>:238 [#uses=1]
- and i32 %238, %236 ; <i32>:239 [#uses=1]
- store i32 %239, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:240 [#uses=1]
- zext i8 %240 to i64 ; <i64>:241 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:242 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %242, i64 %241 ) ; <i64>:243 [#uses=1]
- and i64 %243, %241 ; <i64>:244 [#uses=1]
- store i64 %244, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:245 [#uses=1]
- zext i8 %245 to i64 ; <i64>:246 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:247 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %247, i64 %246 ) ; <i64>:248 [#uses=1]
- and i64 %248, %246 ; <i64>:249 [#uses=1]
- store i64 %249, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:250 [#uses=1]
- zext i8 %250 to i64 ; <i64>:251 [#uses=2]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:252 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %252, i64 %251 ) ; <i64>:253 [#uses=1]
- and i64 %253, %251 ; <i64>:254 [#uses=1]
- store i64 %254, i64* @sll, align 8
- load i8* @uc, align 1 ; <i8>:255 [#uses=1]
- zext i8 %255 to i64 ; <i64>:256 [#uses=2]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:257 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %257, i64 %256 ) ; <i64>:258 [#uses=1]
- and i64 %258, %256 ; <i64>:259 [#uses=1]
- store i64 %259, i64* @ull, align 8
- load i8* @uc, align 1 ; <i8>:260 [#uses=1]
- zext i8 %260 to i32 ; <i32>:261 [#uses=1]
- trunc i32 %261 to i8 ; <i8>:262 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %262 ) ; <i8>:263 [#uses=1]
- xor i8 %263, -1 ; <i8>:264 [#uses=1]
- and i8 %264, %262 ; <i8>:265 [#uses=1]
- store i8 %265, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:266 [#uses=1]
- zext i8 %266 to i32 ; <i32>:267 [#uses=1]
- trunc i32 %267 to i8 ; <i8>:268 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %268 ) ; <i8>:269 [#uses=1]
- xor i8 %269, -1 ; <i8>:270 [#uses=1]
- and i8 %270, %268 ; <i8>:271 [#uses=1]
- store i8 %271, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:272 [#uses=1]
- zext i8 %272 to i32 ; <i32>:273 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:274 [#uses=1]
- trunc i32 %273 to i16 ; <i16>:275 [#uses=2]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %274, i16 %275 ) ; <i16>:276 [#uses=1]
- xor i16 %276, -1 ; <i16>:277 [#uses=1]
- and i16 %277, %275 ; <i16>:278 [#uses=1]
- store i16 %278, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:279 [#uses=1]
- zext i8 %279 to i32 ; <i32>:280 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:281 [#uses=1]
- trunc i32 %280 to i16 ; <i16>:282 [#uses=2]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %281, i16 %282 ) ; <i16>:283 [#uses=1]
- xor i16 %283, -1 ; <i16>:284 [#uses=1]
- and i16 %284, %282 ; <i16>:285 [#uses=1]
- store i16 %285, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:286 [#uses=1]
- zext i8 %286 to i32 ; <i32>:287 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:288 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %288, i32 %287 ) ; <i32>:289 [#uses=1]
- xor i32 %289, -1 ; <i32>:290 [#uses=1]
- and i32 %290, %287 ; <i32>:291 [#uses=1]
- store i32 %291, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:292 [#uses=1]
- zext i8 %292 to i32 ; <i32>:293 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:294 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %294, i32 %293 ) ; <i32>:295 [#uses=1]
- xor i32 %295, -1 ; <i32>:296 [#uses=1]
- and i32 %296, %293 ; <i32>:297 [#uses=1]
- store i32 %297, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:298 [#uses=1]
- zext i8 %298 to i64 ; <i64>:299 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:300 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %300, i64 %299 ) ; <i64>:301 [#uses=1]
- xor i64 %301, -1 ; <i64>:302 [#uses=1]
- and i64 %302, %299 ; <i64>:303 [#uses=1]
- store i64 %303, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:304 [#uses=1]
- zext i8 %304 to i64 ; <i64>:305 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:306 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %306, i64 %305 ) ; <i64>:307 [#uses=1]
- xor i64 %307, -1 ; <i64>:308 [#uses=1]
- and i64 %308, %305 ; <i64>:309 [#uses=1]
- store i64 %309, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:310 [#uses=1]
- zext i8 %310 to i64 ; <i64>:311 [#uses=2]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:312 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %312, i64 %311 ) ; <i64>:313 [#uses=1]
- xor i64 %313, -1 ; <i64>:314 [#uses=1]
- and i64 %314, %311 ; <i64>:315 [#uses=1]
- store i64 %315, i64* @sll, align 8
- load i8* @uc, align 1 ; <i8>:316 [#uses=1]
- zext i8 %316 to i64 ; <i64>:317 [#uses=2]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:318 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %318, i64 %317 ) ; <i64>:319 [#uses=1]
- xor i64 %319, -1 ; <i64>:320 [#uses=1]
- and i64 %320, %317 ; <i64>:321 [#uses=1]
- store i64 %321, i64* @ull, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
+ %0 = load i8* @uc, align 1
+ %1 = zext i8 %0 to i32
+ %2 = trunc i32 %1 to i8
+ %3 = atomicrmw add i8* @sc, i8 %2 monotonic
+ %4 = add i8 %3, %2
+ store i8 %4, i8* @sc, align 1
+ %5 = load i8* @uc, align 1
+ %6 = zext i8 %5 to i32
+ %7 = trunc i32 %6 to i8
+ %8 = atomicrmw add i8* @uc, i8 %7 monotonic
+ %9 = add i8 %8, %7
+ store i8 %9, i8* @uc, align 1
+ %10 = load i8* @uc, align 1
+ %11 = zext i8 %10 to i32
+ %12 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %13 = trunc i32 %11 to i16
+ %14 = atomicrmw add i16* %12, i16 %13 monotonic
+ %15 = add i16 %14, %13
+ store i16 %15, i16* @ss, align 2
+ %16 = load i8* @uc, align 1
+ %17 = zext i8 %16 to i32
+ %18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %19 = trunc i32 %17 to i16
+ %20 = atomicrmw add i16* %18, i16 %19 monotonic
+ %21 = add i16 %20, %19
+ store i16 %21, i16* @us, align 2
+ %22 = load i8* @uc, align 1
+ %23 = zext i8 %22 to i32
+ %24 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %25 = atomicrmw add i32* %24, i32 %23 monotonic
+ %26 = add i32 %25, %23
+ store i32 %26, i32* @si, align 4
+ %27 = load i8* @uc, align 1
+ %28 = zext i8 %27 to i32
+ %29 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %30 = atomicrmw add i32* %29, i32 %28 monotonic
+ %31 = add i32 %30, %28
+ store i32 %31, i32* @ui, align 4
+ %32 = load i8* @uc, align 1
+ %33 = zext i8 %32 to i64
+ %34 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %35 = atomicrmw add i64* %34, i64 %33 monotonic
+ %36 = add i64 %35, %33
+ store i64 %36, i64* @sl, align 8
+ %37 = load i8* @uc, align 1
+ %38 = zext i8 %37 to i64
+ %39 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %40 = atomicrmw add i64* %39, i64 %38 monotonic
+ %41 = add i64 %40, %38
+ store i64 %41, i64* @ul, align 8
+ %42 = load i8* @uc, align 1
+ %43 = zext i8 %42 to i64
+ %44 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %45 = atomicrmw add i64* %44, i64 %43 monotonic
+ %46 = add i64 %45, %43
+ store i64 %46, i64* @sll, align 8
+ %47 = load i8* @uc, align 1
+ %48 = zext i8 %47 to i64
+ %49 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %50 = atomicrmw add i64* %49, i64 %48 monotonic
+ %51 = add i64 %50, %48
+ store i64 %51, i64* @ull, align 8
+ %52 = load i8* @uc, align 1
+ %53 = zext i8 %52 to i32
+ %54 = trunc i32 %53 to i8
+ %55 = atomicrmw sub i8* @sc, i8 %54 monotonic
+ %56 = sub i8 %55, %54
+ store i8 %56, i8* @sc, align 1
+ %57 = load i8* @uc, align 1
+ %58 = zext i8 %57 to i32
+ %59 = trunc i32 %58 to i8
+ %60 = atomicrmw sub i8* @uc, i8 %59 monotonic
+ %61 = sub i8 %60, %59
+ store i8 %61, i8* @uc, align 1
+ %62 = load i8* @uc, align 1
+ %63 = zext i8 %62 to i32
+ %64 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %65 = trunc i32 %63 to i16
+ %66 = atomicrmw sub i16* %64, i16 %65 monotonic
+ %67 = sub i16 %66, %65
+ store i16 %67, i16* @ss, align 2
+ %68 = load i8* @uc, align 1
+ %69 = zext i8 %68 to i32
+ %70 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %71 = trunc i32 %69 to i16
+ %72 = atomicrmw sub i16* %70, i16 %71 monotonic
+ %73 = sub i16 %72, %71
+ store i16 %73, i16* @us, align 2
+ %74 = load i8* @uc, align 1
+ %75 = zext i8 %74 to i32
+ %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %77 = atomicrmw sub i32* %76, i32 %75 monotonic
+ %78 = sub i32 %77, %75
+ store i32 %78, i32* @si, align 4
+ %79 = load i8* @uc, align 1
+ %80 = zext i8 %79 to i32
+ %81 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %82 = atomicrmw sub i32* %81, i32 %80 monotonic
+ %83 = sub i32 %82, %80
+ store i32 %83, i32* @ui, align 4
+ %84 = load i8* @uc, align 1
+ %85 = zext i8 %84 to i64
+ %86 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %87 = atomicrmw sub i64* %86, i64 %85 monotonic
+ %88 = sub i64 %87, %85
+ store i64 %88, i64* @sl, align 8
+ %89 = load i8* @uc, align 1
+ %90 = zext i8 %89 to i64
+ %91 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %92 = atomicrmw sub i64* %91, i64 %90 monotonic
+ %93 = sub i64 %92, %90
+ store i64 %93, i64* @ul, align 8
+ %94 = load i8* @uc, align 1
+ %95 = zext i8 %94 to i64
+ %96 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %97 = atomicrmw sub i64* %96, i64 %95 monotonic
+ %98 = sub i64 %97, %95
+ store i64 %98, i64* @sll, align 8
+ %99 = load i8* @uc, align 1
+ %100 = zext i8 %99 to i64
+ %101 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %102 = atomicrmw sub i64* %101, i64 %100 monotonic
+ %103 = sub i64 %102, %100
+ store i64 %103, i64* @ull, align 8
+ %104 = load i8* @uc, align 1
+ %105 = zext i8 %104 to i32
+ %106 = trunc i32 %105 to i8
+ %107 = atomicrmw or i8* @sc, i8 %106 monotonic
+ %108 = or i8 %107, %106
+ store i8 %108, i8* @sc, align 1
+ %109 = load i8* @uc, align 1
+ %110 = zext i8 %109 to i32
+ %111 = trunc i32 %110 to i8
+ %112 = atomicrmw or i8* @uc, i8 %111 monotonic
+ %113 = or i8 %112, %111
+ store i8 %113, i8* @uc, align 1
+ %114 = load i8* @uc, align 1
+ %115 = zext i8 %114 to i32
+ %116 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %117 = trunc i32 %115 to i16
+ %118 = atomicrmw or i16* %116, i16 %117 monotonic
+ %119 = or i16 %118, %117
+ store i16 %119, i16* @ss, align 2
+ %120 = load i8* @uc, align 1
+ %121 = zext i8 %120 to i32
+ %122 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %123 = trunc i32 %121 to i16
+ %124 = atomicrmw or i16* %122, i16 %123 monotonic
+ %125 = or i16 %124, %123
+ store i16 %125, i16* @us, align 2
+ %126 = load i8* @uc, align 1
+ %127 = zext i8 %126 to i32
+ %128 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %129 = atomicrmw or i32* %128, i32 %127 monotonic
+ %130 = or i32 %129, %127
+ store i32 %130, i32* @si, align 4
+ %131 = load i8* @uc, align 1
+ %132 = zext i8 %131 to i32
+ %133 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %134 = atomicrmw or i32* %133, i32 %132 monotonic
+ %135 = or i32 %134, %132
+ store i32 %135, i32* @ui, align 4
+ %136 = load i8* @uc, align 1
+ %137 = zext i8 %136 to i64
+ %138 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %139 = atomicrmw or i64* %138, i64 %137 monotonic
+ %140 = or i64 %139, %137
+ store i64 %140, i64* @sl, align 8
+ %141 = load i8* @uc, align 1
+ %142 = zext i8 %141 to i64
+ %143 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %144 = atomicrmw or i64* %143, i64 %142 monotonic
+ %145 = or i64 %144, %142
+ store i64 %145, i64* @ul, align 8
+ %146 = load i8* @uc, align 1
+ %147 = zext i8 %146 to i64
+ %148 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %149 = atomicrmw or i64* %148, i64 %147 monotonic
+ %150 = or i64 %149, %147
+ store i64 %150, i64* @sll, align 8
+ %151 = load i8* @uc, align 1
+ %152 = zext i8 %151 to i64
+ %153 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %154 = atomicrmw or i64* %153, i64 %152 monotonic
+ %155 = or i64 %154, %152
+ store i64 %155, i64* @ull, align 8
+ %156 = load i8* @uc, align 1
+ %157 = zext i8 %156 to i32
+ %158 = trunc i32 %157 to i8
+ %159 = atomicrmw xor i8* @sc, i8 %158 monotonic
+ %160 = xor i8 %159, %158
+ store i8 %160, i8* @sc, align 1
+ %161 = load i8* @uc, align 1
+ %162 = zext i8 %161 to i32
+ %163 = trunc i32 %162 to i8
+ %164 = atomicrmw xor i8* @uc, i8 %163 monotonic
+ %165 = xor i8 %164, %163
+ store i8 %165, i8* @uc, align 1
+ %166 = load i8* @uc, align 1
+ %167 = zext i8 %166 to i32
+ %168 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %169 = trunc i32 %167 to i16
+ %170 = atomicrmw xor i16* %168, i16 %169 monotonic
+ %171 = xor i16 %170, %169
+ store i16 %171, i16* @ss, align 2
+ %172 = load i8* @uc, align 1
+ %173 = zext i8 %172 to i32
+ %174 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %175 = trunc i32 %173 to i16
+ %176 = atomicrmw xor i16* %174, i16 %175 monotonic
+ %177 = xor i16 %176, %175
+ store i16 %177, i16* @us, align 2
+ %178 = load i8* @uc, align 1
+ %179 = zext i8 %178 to i32
+ %180 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %181 = atomicrmw xor i32* %180, i32 %179 monotonic
+ %182 = xor i32 %181, %179
+ store i32 %182, i32* @si, align 4
+ %183 = load i8* @uc, align 1
+ %184 = zext i8 %183 to i32
+ %185 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %186 = atomicrmw xor i32* %185, i32 %184 monotonic
+ %187 = xor i32 %186, %184
+ store i32 %187, i32* @ui, align 4
+ %188 = load i8* @uc, align 1
+ %189 = zext i8 %188 to i64
+ %190 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %191 = atomicrmw xor i64* %190, i64 %189 monotonic
+ %192 = xor i64 %191, %189
+ store i64 %192, i64* @sl, align 8
+ %193 = load i8* @uc, align 1
+ %194 = zext i8 %193 to i64
+ %195 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %196 = atomicrmw xor i64* %195, i64 %194 monotonic
+ %197 = xor i64 %196, %194
+ store i64 %197, i64* @ul, align 8
+ %198 = load i8* @uc, align 1
+ %199 = zext i8 %198 to i64
+ %200 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %201 = atomicrmw xor i64* %200, i64 %199 monotonic
+ %202 = xor i64 %201, %199
+ store i64 %202, i64* @sll, align 8
+ %203 = load i8* @uc, align 1
+ %204 = zext i8 %203 to i64
+ %205 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %206 = atomicrmw xor i64* %205, i64 %204 monotonic
+ %207 = xor i64 %206, %204
+ store i64 %207, i64* @ull, align 8
+ %208 = load i8* @uc, align 1
+ %209 = zext i8 %208 to i32
+ %210 = trunc i32 %209 to i8
+ %211 = atomicrmw and i8* @sc, i8 %210 monotonic
+ %212 = and i8 %211, %210
+ store i8 %212, i8* @sc, align 1
+ %213 = load i8* @uc, align 1
+ %214 = zext i8 %213 to i32
+ %215 = trunc i32 %214 to i8
+ %216 = atomicrmw and i8* @uc, i8 %215 monotonic
+ %217 = and i8 %216, %215
+ store i8 %217, i8* @uc, align 1
+ %218 = load i8* @uc, align 1
+ %219 = zext i8 %218 to i32
+ %220 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %221 = trunc i32 %219 to i16
+ %222 = atomicrmw and i16* %220, i16 %221 monotonic
+ %223 = and i16 %222, %221
+ store i16 %223, i16* @ss, align 2
+ %224 = load i8* @uc, align 1
+ %225 = zext i8 %224 to i32
+ %226 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %227 = trunc i32 %225 to i16
+ %228 = atomicrmw and i16* %226, i16 %227 monotonic
+ %229 = and i16 %228, %227
+ store i16 %229, i16* @us, align 2
+ %230 = load i8* @uc, align 1
+ %231 = zext i8 %230 to i32
+ %232 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %233 = atomicrmw and i32* %232, i32 %231 monotonic
+ %234 = and i32 %233, %231
+ store i32 %234, i32* @si, align 4
+ %235 = load i8* @uc, align 1
+ %236 = zext i8 %235 to i32
+ %237 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %238 = atomicrmw and i32* %237, i32 %236 monotonic
+ %239 = and i32 %238, %236
+ store i32 %239, i32* @ui, align 4
+ %240 = load i8* @uc, align 1
+ %241 = zext i8 %240 to i64
+ %242 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %243 = atomicrmw and i64* %242, i64 %241 monotonic
+ %244 = and i64 %243, %241
+ store i64 %244, i64* @sl, align 8
+ %245 = load i8* @uc, align 1
+ %246 = zext i8 %245 to i64
+ %247 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %248 = atomicrmw and i64* %247, i64 %246 monotonic
+ %249 = and i64 %248, %246
+ store i64 %249, i64* @ul, align 8
+ %250 = load i8* @uc, align 1
+ %251 = zext i8 %250 to i64
+ %252 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %253 = atomicrmw and i64* %252, i64 %251 monotonic
+ %254 = and i64 %253, %251
+ store i64 %254, i64* @sll, align 8
+ %255 = load i8* @uc, align 1
+ %256 = zext i8 %255 to i64
+ %257 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %258 = atomicrmw and i64* %257, i64 %256 monotonic
+ %259 = and i64 %258, %256
+ store i64 %259, i64* @ull, align 8
+ %260 = load i8* @uc, align 1
+ %261 = zext i8 %260 to i32
+ %262 = trunc i32 %261 to i8
+ %263 = atomicrmw nand i8* @sc, i8 %262 monotonic
+ %264 = xor i8 %263, -1
+ %265 = and i8 %264, %262
+ store i8 %265, i8* @sc, align 1
+ %266 = load i8* @uc, align 1
+ %267 = zext i8 %266 to i32
+ %268 = trunc i32 %267 to i8
+ %269 = atomicrmw nand i8* @uc, i8 %268 monotonic
+ %270 = xor i8 %269, -1
+ %271 = and i8 %270, %268
+ store i8 %271, i8* @uc, align 1
+ %272 = load i8* @uc, align 1
+ %273 = zext i8 %272 to i32
+ %274 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %275 = trunc i32 %273 to i16
+ %276 = atomicrmw nand i16* %274, i16 %275 monotonic
+ %277 = xor i16 %276, -1
+ %278 = and i16 %277, %275
+ store i16 %278, i16* @ss, align 2
+ %279 = load i8* @uc, align 1
+ %280 = zext i8 %279 to i32
+ %281 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %282 = trunc i32 %280 to i16
+ %283 = atomicrmw nand i16* %281, i16 %282 monotonic
+ %284 = xor i16 %283, -1
+ %285 = and i16 %284, %282
+ store i16 %285, i16* @us, align 2
+ %286 = load i8* @uc, align 1
+ %287 = zext i8 %286 to i32
+ %288 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %289 = atomicrmw nand i32* %288, i32 %287 monotonic
+ %290 = xor i32 %289, -1
+ %291 = and i32 %290, %287
+ store i32 %291, i32* @si, align 4
+ %292 = load i8* @uc, align 1
+ %293 = zext i8 %292 to i32
+ %294 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %295 = atomicrmw nand i32* %294, i32 %293 monotonic
+ %296 = xor i32 %295, -1
+ %297 = and i32 %296, %293
+ store i32 %297, i32* @ui, align 4
+ %298 = load i8* @uc, align 1
+ %299 = zext i8 %298 to i64
+ %300 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %301 = atomicrmw nand i64* %300, i64 %299 monotonic
+ %302 = xor i64 %301, -1
+ %303 = and i64 %302, %299
+ store i64 %303, i64* @sl, align 8
+ %304 = load i8* @uc, align 1
+ %305 = zext i8 %304 to i64
+ %306 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %307 = atomicrmw nand i64* %306, i64 %305 monotonic
+ %308 = xor i64 %307, -1
+ %309 = and i64 %308, %305
+ store i64 %309, i64* @ul, align 8
+ %310 = load i8* @uc, align 1
+ %311 = zext i8 %310 to i64
+ %312 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %313 = atomicrmw nand i64* %312, i64 %311 monotonic
+ %314 = xor i64 %313, -1
+ %315 = and i64 %314, %311
+ store i64 %315, i64* @sll, align 8
+ %316 = load i8* @uc, align 1
+ %317 = zext i8 %316 to i64
+ %318 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %319 = atomicrmw nand i64* %318, i64 %317 monotonic
+ %320 = xor i64 %319, -1
+ %321 = and i64 %320, %317
+ store i64 %321, i64* @ull, align 8
+ br label %return
+
+return: ; preds = %entry
+ ret void
}
define void @test_compare_and_swap() nounwind {
entry:
- load i8* @sc, align 1 ; <i8>:0 [#uses=1]
- zext i8 %0 to i32 ; <i32>:1 [#uses=1]
- load i8* @uc, align 1 ; <i8>:2 [#uses=1]
- zext i8 %2 to i32 ; <i32>:3 [#uses=1]
- trunc i32 %3 to i8 ; <i8>:4 [#uses=1]
- trunc i32 %1 to i8 ; <i8>:5 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %4, i8 %5 ) ; <i8>:6 [#uses=1]
- store i8 %6, i8* @sc, align 1
- load i8* @sc, align 1 ; <i8>:7 [#uses=1]
- zext i8 %7 to i32 ; <i32>:8 [#uses=1]
- load i8* @uc, align 1 ; <i8>:9 [#uses=1]
- zext i8 %9 to i32 ; <i32>:10 [#uses=1]
- trunc i32 %10 to i8 ; <i8>:11 [#uses=1]
- trunc i32 %8 to i8 ; <i8>:12 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %11, i8 %12 ) ; <i8>:13 [#uses=1]
- store i8 %13, i8* @uc, align 1
- load i8* @sc, align 1 ; <i8>:14 [#uses=1]
- sext i8 %14 to i16 ; <i16>:15 [#uses=1]
- zext i16 %15 to i32 ; <i32>:16 [#uses=1]
- load i8* @uc, align 1 ; <i8>:17 [#uses=1]
- zext i8 %17 to i32 ; <i32>:18 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:19 [#uses=1]
- trunc i32 %18 to i16 ; <i16>:20 [#uses=1]
- trunc i32 %16 to i16 ; <i16>:21 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %19, i16 %20, i16 %21 ) ; <i16>:22 [#uses=1]
- store i16 %22, i16* @ss, align 2
- load i8* @sc, align 1 ; <i8>:23 [#uses=1]
- sext i8 %23 to i16 ; <i16>:24 [#uses=1]
- zext i16 %24 to i32 ; <i32>:25 [#uses=1]
- load i8* @uc, align 1 ; <i8>:26 [#uses=1]
- zext i8 %26 to i32 ; <i32>:27 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:28 [#uses=1]
- trunc i32 %27 to i16 ; <i16>:29 [#uses=1]
- trunc i32 %25 to i16 ; <i16>:30 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %28, i16 %29, i16 %30 ) ; <i16>:31 [#uses=1]
- store i16 %31, i16* @us, align 2
- load i8* @sc, align 1 ; <i8>:32 [#uses=1]
- sext i8 %32 to i32 ; <i32>:33 [#uses=1]
- load i8* @uc, align 1 ; <i8>:34 [#uses=1]
- zext i8 %34 to i32 ; <i32>:35 [#uses=1]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %36, i32 %35, i32 %33 ) ; <i32>:37 [#uses=1]
- store i32 %37, i32* @si, align 4
- load i8* @sc, align 1 ; <i8>:38 [#uses=1]
- sext i8 %38 to i32 ; <i32>:39 [#uses=1]
- load i8* @uc, align 1 ; <i8>:40 [#uses=1]
- zext i8 %40 to i32 ; <i32>:41 [#uses=1]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:42 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %42, i32 %41, i32 %39 ) ; <i32>:43 [#uses=1]
- store i32 %43, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:44 [#uses=1]
- sext i8 %44 to i64 ; <i64>:45 [#uses=1]
- load i8* @uc, align 1 ; <i8>:46 [#uses=1]
- zext i8 %46 to i64 ; <i64>:47 [#uses=1]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:48 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %48, i64 %47, i64 %45 ) ; <i64>:49 [#uses=1]
- store i64 %49, i64* @sl, align 8
- load i8* @sc, align 1 ; <i8>:50 [#uses=1]
- sext i8 %50 to i64 ; <i64>:51 [#uses=1]
- load i8* @uc, align 1 ; <i8>:52 [#uses=1]
- zext i8 %52 to i64 ; <i64>:53 [#uses=1]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:54 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %54, i64 %53, i64 %51 ) ; <i64>:55 [#uses=1]
- store i64 %55, i64* @ul, align 8
- load i8* @sc, align 1 ; <i8>:56 [#uses=1]
- sext i8 %56 to i64 ; <i64>:57 [#uses=1]
- load i8* @uc, align 1 ; <i8>:58 [#uses=1]
- zext i8 %58 to i64 ; <i64>:59 [#uses=1]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:60 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %60, i64 %59, i64 %57 ) ; <i64>:61 [#uses=1]
- store i64 %61, i64* @sll, align 8
- load i8* @sc, align 1 ; <i8>:62 [#uses=1]
- sext i8 %62 to i64 ; <i64>:63 [#uses=1]
- load i8* @uc, align 1 ; <i8>:64 [#uses=1]
- zext i8 %64 to i64 ; <i64>:65 [#uses=1]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:66 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %66, i64 %65, i64 %63 ) ; <i64>:67 [#uses=1]
- store i64 %67, i64* @ull, align 8
- load i8* @sc, align 1 ; <i8>:68 [#uses=1]
- zext i8 %68 to i32 ; <i32>:69 [#uses=1]
- load i8* @uc, align 1 ; <i8>:70 [#uses=1]
- zext i8 %70 to i32 ; <i32>:71 [#uses=1]
- trunc i32 %71 to i8 ; <i8>:72 [#uses=2]
- trunc i32 %69 to i8 ; <i8>:73 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %72, i8 %73 ) ; <i8>:74 [#uses=1]
- icmp eq i8 %74, %72 ; <i1>:75 [#uses=1]
- zext i1 %75 to i8 ; <i8>:76 [#uses=1]
- zext i8 %76 to i32 ; <i32>:77 [#uses=1]
- store i32 %77, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:78 [#uses=1]
- zext i8 %78 to i32 ; <i32>:79 [#uses=1]
- load i8* @uc, align 1 ; <i8>:80 [#uses=1]
- zext i8 %80 to i32 ; <i32>:81 [#uses=1]
- trunc i32 %81 to i8 ; <i8>:82 [#uses=2]
- trunc i32 %79 to i8 ; <i8>:83 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %82, i8 %83 ) ; <i8>:84 [#uses=1]
- icmp eq i8 %84, %82 ; <i1>:85 [#uses=1]
- zext i1 %85 to i8 ; <i8>:86 [#uses=1]
- zext i8 %86 to i32 ; <i32>:87 [#uses=1]
- store i32 %87, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:88 [#uses=1]
- sext i8 %88 to i16 ; <i16>:89 [#uses=1]
- zext i16 %89 to i32 ; <i32>:90 [#uses=1]
- load i8* @uc, align 1 ; <i8>:91 [#uses=1]
- zext i8 %91 to i32 ; <i32>:92 [#uses=1]
- trunc i32 %92 to i8 ; <i8>:93 [#uses=2]
- trunc i32 %90 to i8 ; <i8>:94 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 ) ; <i8>:95 [#uses=1]
- icmp eq i8 %95, %93 ; <i1>:96 [#uses=1]
- zext i1 %96 to i8 ; <i8>:97 [#uses=1]
- zext i8 %97 to i32 ; <i32>:98 [#uses=1]
- store i32 %98, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:99 [#uses=1]
- sext i8 %99 to i16 ; <i16>:100 [#uses=1]
- zext i16 %100 to i32 ; <i32>:101 [#uses=1]
- load i8* @uc, align 1 ; <i8>:102 [#uses=1]
- zext i8 %102 to i32 ; <i32>:103 [#uses=1]
- trunc i32 %103 to i8 ; <i8>:104 [#uses=2]
- trunc i32 %101 to i8 ; <i8>:105 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 ) ; <i8>:106 [#uses=1]
- icmp eq i8 %106, %104 ; <i1>:107 [#uses=1]
- zext i1 %107 to i8 ; <i8>:108 [#uses=1]
- zext i8 %108 to i32 ; <i32>:109 [#uses=1]
- store i32 %109, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:110 [#uses=1]
- sext i8 %110 to i32 ; <i32>:111 [#uses=1]
- load i8* @uc, align 1 ; <i8>:112 [#uses=1]
- zext i8 %112 to i32 ; <i32>:113 [#uses=1]
- trunc i32 %113 to i8 ; <i8>:114 [#uses=2]
- trunc i32 %111 to i8 ; <i8>:115 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 ) ; <i8>:116 [#uses=1]
- icmp eq i8 %116, %114 ; <i1>:117 [#uses=1]
- zext i1 %117 to i8 ; <i8>:118 [#uses=1]
- zext i8 %118 to i32 ; <i32>:119 [#uses=1]
- store i32 %119, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:120 [#uses=1]
- sext i8 %120 to i32 ; <i32>:121 [#uses=1]
- load i8* @uc, align 1 ; <i8>:122 [#uses=1]
- zext i8 %122 to i32 ; <i32>:123 [#uses=1]
- trunc i32 %123 to i8 ; <i8>:124 [#uses=2]
- trunc i32 %121 to i8 ; <i8>:125 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 ) ; <i8>:126 [#uses=1]
- icmp eq i8 %126, %124 ; <i1>:127 [#uses=1]
- zext i1 %127 to i8 ; <i8>:128 [#uses=1]
- zext i8 %128 to i32 ; <i32>:129 [#uses=1]
- store i32 %129, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:130 [#uses=1]
- sext i8 %130 to i64 ; <i64>:131 [#uses=1]
- load i8* @uc, align 1 ; <i8>:132 [#uses=1]
- zext i8 %132 to i64 ; <i64>:133 [#uses=1]
- trunc i64 %133 to i8 ; <i8>:134 [#uses=2]
- trunc i64 %131 to i8 ; <i8>:135 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 ) ; <i8>:136 [#uses=1]
- icmp eq i8 %136, %134 ; <i1>:137 [#uses=1]
- zext i1 %137 to i8 ; <i8>:138 [#uses=1]
- zext i8 %138 to i32 ; <i32>:139 [#uses=1]
- store i32 %139, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:140 [#uses=1]
- sext i8 %140 to i64 ; <i64>:141 [#uses=1]
- load i8* @uc, align 1 ; <i8>:142 [#uses=1]
- zext i8 %142 to i64 ; <i64>:143 [#uses=1]
- trunc i64 %143 to i8 ; <i8>:144 [#uses=2]
- trunc i64 %141 to i8 ; <i8>:145 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 ) ; <i8>:146 [#uses=1]
- icmp eq i8 %146, %144 ; <i1>:147 [#uses=1]
- zext i1 %147 to i8 ; <i8>:148 [#uses=1]
- zext i8 %148 to i32 ; <i32>:149 [#uses=1]
- store i32 %149, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:150 [#uses=1]
- sext i8 %150 to i64 ; <i64>:151 [#uses=1]
- load i8* @uc, align 1 ; <i8>:152 [#uses=1]
- zext i8 %152 to i64 ; <i64>:153 [#uses=1]
- trunc i64 %153 to i8 ; <i8>:154 [#uses=2]
- trunc i64 %151 to i8 ; <i8>:155 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 ) ; <i8>:156 [#uses=1]
- icmp eq i8 %156, %154 ; <i1>:157 [#uses=1]
- zext i1 %157 to i8 ; <i8>:158 [#uses=1]
- zext i8 %158 to i32 ; <i32>:159 [#uses=1]
- store i32 %159, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:160 [#uses=1]
- sext i8 %160 to i64 ; <i64>:161 [#uses=1]
- load i8* @uc, align 1 ; <i8>:162 [#uses=1]
- zext i8 %162 to i64 ; <i64>:163 [#uses=1]
- trunc i64 %163 to i8 ; <i8>:164 [#uses=2]
- trunc i64 %161 to i8 ; <i8>:165 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 ) ; <i8>:166 [#uses=1]
- icmp eq i8 %166, %164 ; <i1>:167 [#uses=1]
- zext i1 %167 to i8 ; <i8>:168 [#uses=1]
- zext i8 %168 to i32 ; <i32>:169 [#uses=1]
- store i32 %169, i32* @ui, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
+ %0 = load i8* @sc, align 1
+ %1 = zext i8 %0 to i32
+ %2 = load i8* @uc, align 1
+ %3 = zext i8 %2 to i32
+ %4 = trunc i32 %3 to i8
+ %5 = trunc i32 %1 to i8
+ %6 = cmpxchg i8* @sc, i8 %4, i8 %5 monotonic
+ store i8 %6, i8* @sc, align 1
+ %7 = load i8* @sc, align 1
+ %8 = zext i8 %7 to i32
+ %9 = load i8* @uc, align 1
+ %10 = zext i8 %9 to i32
+ %11 = trunc i32 %10 to i8
+ %12 = trunc i32 %8 to i8
+ %13 = cmpxchg i8* @uc, i8 %11, i8 %12 monotonic
+ store i8 %13, i8* @uc, align 1
+ %14 = load i8* @sc, align 1
+ %15 = sext i8 %14 to i16
+ %16 = zext i16 %15 to i32
+ %17 = load i8* @uc, align 1
+ %18 = zext i8 %17 to i32
+ %19 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %20 = trunc i32 %18 to i16
+ %21 = trunc i32 %16 to i16
+ %22 = cmpxchg i16* %19, i16 %20, i16 %21 monotonic
+ store i16 %22, i16* @ss, align 2
+ %23 = load i8* @sc, align 1
+ %24 = sext i8 %23 to i16
+ %25 = zext i16 %24 to i32
+ %26 = load i8* @uc, align 1
+ %27 = zext i8 %26 to i32
+ %28 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %29 = trunc i32 %27 to i16
+ %30 = trunc i32 %25 to i16
+ %31 = cmpxchg i16* %28, i16 %29, i16 %30 monotonic
+ store i16 %31, i16* @us, align 2
+ %32 = load i8* @sc, align 1
+ %33 = sext i8 %32 to i32
+ %34 = load i8* @uc, align 1
+ %35 = zext i8 %34 to i32
+ %36 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %37 = cmpxchg i32* %36, i32 %35, i32 %33 monotonic
+ store i32 %37, i32* @si, align 4
+ %38 = load i8* @sc, align 1
+ %39 = sext i8 %38 to i32
+ %40 = load i8* @uc, align 1
+ %41 = zext i8 %40 to i32
+ %42 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %43 = cmpxchg i32* %42, i32 %41, i32 %39 monotonic
+ store i32 %43, i32* @ui, align 4
+ %44 = load i8* @sc, align 1
+ %45 = sext i8 %44 to i64
+ %46 = load i8* @uc, align 1
+ %47 = zext i8 %46 to i64
+ %48 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %49 = cmpxchg i64* %48, i64 %47, i64 %45 monotonic
+ store i64 %49, i64* @sl, align 8
+ %50 = load i8* @sc, align 1
+ %51 = sext i8 %50 to i64
+ %52 = load i8* @uc, align 1
+ %53 = zext i8 %52 to i64
+ %54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %55 = cmpxchg i64* %54, i64 %53, i64 %51 monotonic
+ store i64 %55, i64* @ul, align 8
+ %56 = load i8* @sc, align 1
+ %57 = sext i8 %56 to i64
+ %58 = load i8* @uc, align 1
+ %59 = zext i8 %58 to i64
+ %60 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %61 = cmpxchg i64* %60, i64 %59, i64 %57 monotonic
+ store i64 %61, i64* @sll, align 8
+ %62 = load i8* @sc, align 1
+ %63 = sext i8 %62 to i64
+ %64 = load i8* @uc, align 1
+ %65 = zext i8 %64 to i64
+ %66 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %67 = cmpxchg i64* %66, i64 %65, i64 %63 monotonic
+ store i64 %67, i64* @ull, align 8
+ %68 = load i8* @sc, align 1
+ %69 = zext i8 %68 to i32
+ %70 = load i8* @uc, align 1
+ %71 = zext i8 %70 to i32
+ %72 = trunc i32 %71 to i8
+ %73 = trunc i32 %69 to i8
+ %74 = cmpxchg i8* @sc, i8 %72, i8 %73 monotonic
+ %75 = icmp eq i8 %74, %72
+ %76 = zext i1 %75 to i8
+ %77 = zext i8 %76 to i32
+ store i32 %77, i32* @ui, align 4
+ %78 = load i8* @sc, align 1
+ %79 = zext i8 %78 to i32
+ %80 = load i8* @uc, align 1
+ %81 = zext i8 %80 to i32
+ %82 = trunc i32 %81 to i8
+ %83 = trunc i32 %79 to i8
+ %84 = cmpxchg i8* @uc, i8 %82, i8 %83 monotonic
+ %85 = icmp eq i8 %84, %82
+ %86 = zext i1 %85 to i8
+ %87 = zext i8 %86 to i32
+ store i32 %87, i32* @ui, align 4
+ %88 = load i8* @sc, align 1
+ %89 = sext i8 %88 to i16
+ %90 = zext i16 %89 to i32
+ %91 = load i8* @uc, align 1
+ %92 = zext i8 %91 to i32
+ %93 = trunc i32 %92 to i8
+ %94 = trunc i32 %90 to i8
+ %95 = cmpxchg i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 monotonic
+ %96 = icmp eq i8 %95, %93
+ %97 = zext i1 %96 to i8
+ %98 = zext i8 %97 to i32
+ store i32 %98, i32* @ui, align 4
+ %99 = load i8* @sc, align 1
+ %100 = sext i8 %99 to i16
+ %101 = zext i16 %100 to i32
+ %102 = load i8* @uc, align 1
+ %103 = zext i8 %102 to i32
+ %104 = trunc i32 %103 to i8
+ %105 = trunc i32 %101 to i8
+ %106 = cmpxchg i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 monotonic
+ %107 = icmp eq i8 %106, %104
+ %108 = zext i1 %107 to i8
+ %109 = zext i8 %108 to i32
+ store i32 %109, i32* @ui, align 4
+ %110 = load i8* @sc, align 1
+ %111 = sext i8 %110 to i32
+ %112 = load i8* @uc, align 1
+ %113 = zext i8 %112 to i32
+ %114 = trunc i32 %113 to i8
+ %115 = trunc i32 %111 to i8
+ %116 = cmpxchg i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 monotonic
+ %117 = icmp eq i8 %116, %114
+ %118 = zext i1 %117 to i8
+ %119 = zext i8 %118 to i32
+ store i32 %119, i32* @ui, align 4
+ %120 = load i8* @sc, align 1
+ %121 = sext i8 %120 to i32
+ %122 = load i8* @uc, align 1
+ %123 = zext i8 %122 to i32
+ %124 = trunc i32 %123 to i8
+ %125 = trunc i32 %121 to i8
+ %126 = cmpxchg i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 monotonic
+ %127 = icmp eq i8 %126, %124
+ %128 = zext i1 %127 to i8
+ %129 = zext i8 %128 to i32
+ store i32 %129, i32* @ui, align 4
+ %130 = load i8* @sc, align 1
+ %131 = sext i8 %130 to i64
+ %132 = load i8* @uc, align 1
+ %133 = zext i8 %132 to i64
+ %134 = trunc i64 %133 to i8
+ %135 = trunc i64 %131 to i8
+ %136 = cmpxchg i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 monotonic
+ %137 = icmp eq i8 %136, %134
+ %138 = zext i1 %137 to i8
+ %139 = zext i8 %138 to i32
+ store i32 %139, i32* @ui, align 4
+ %140 = load i8* @sc, align 1
+ %141 = sext i8 %140 to i64
+ %142 = load i8* @uc, align 1
+ %143 = zext i8 %142 to i64
+ %144 = trunc i64 %143 to i8
+ %145 = trunc i64 %141 to i8
+ %146 = cmpxchg i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 monotonic
+ %147 = icmp eq i8 %146, %144
+ %148 = zext i1 %147 to i8
+ %149 = zext i8 %148 to i32
+ store i32 %149, i32* @ui, align 4
+ %150 = load i8* @sc, align 1
+ %151 = sext i8 %150 to i64
+ %152 = load i8* @uc, align 1
+ %153 = zext i8 %152 to i64
+ %154 = trunc i64 %153 to i8
+ %155 = trunc i64 %151 to i8
+ %156 = cmpxchg i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 monotonic
+ %157 = icmp eq i8 %156, %154
+ %158 = zext i1 %157 to i8
+ %159 = zext i8 %158 to i32
+ store i32 %159, i32* @ui, align 4
+ %160 = load i8* @sc, align 1
+ %161 = sext i8 %160 to i64
+ %162 = load i8* @uc, align 1
+ %163 = zext i8 %162 to i64
+ %164 = trunc i64 %163 to i8
+ %165 = trunc i64 %161 to i8
+ %166 = cmpxchg i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 monotonic
+ %167 = icmp eq i8 %166, %164
+ %168 = zext i1 %167 to i8
+ %169 = zext i8 %168 to i32
+ store i32 %169, i32* @ui, align 4
+ br label %return
+
+return: ; preds = %entry
+ ret void
}
-declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind
-
-declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind
-
-declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
-
-declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64*, i64, i64) nounwind
-
define void @test_lock() nounwind {
entry:
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1]
- call i64 @llvm.atomic.swap.i64.p0i64( i64* %10, i64 1 ) ; <i64>:11 [#uses=1]
- store i64 %11, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1]
- call i64 @llvm.atomic.swap.i64.p0i64( i64* %12, i64 1 ) ; <i64>:13 [#uses=1]
- store i64 %13, i64* @ul, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:14 [#uses=1]
- call i64 @llvm.atomic.swap.i64.p0i64( i64* %14, i64 1 ) ; <i64>:15 [#uses=1]
- store i64 %15, i64* @sll, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:16 [#uses=1]
- call i64 @llvm.atomic.swap.i64.p0i64( i64* %16, i64 1 ) ; <i64>:17 [#uses=1]
- store i64 %17, i64* @ull, align 8
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false )
- volatile store i8 0, i8* @sc, align 1
- volatile store i8 0, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:18 [#uses=1]
- volatile store i16 0, i16* %18, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:19 [#uses=1]
- volatile store i16 0, i16* %19, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
- volatile store i32 0, i32* %20, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:21 [#uses=1]
- volatile store i32 0, i32* %21, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:22 [#uses=1]
- volatile store i64 0, i64* %22, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:23 [#uses=1]
- volatile store i64 0, i64* %23, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:24 [#uses=1]
- volatile store i64 0, i64* %24, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:25 [#uses=1]
- volatile store i64 0, i64* %25, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
+ %0 = atomicrmw xchg i8* @sc, i8 1 monotonic
+ store i8 %0, i8* @sc, align 1
+ %1 = atomicrmw xchg i8* @uc, i8 1 monotonic
+ store i8 %1, i8* @uc, align 1
+ %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %3 = atomicrmw xchg i16* %2, i16 1 monotonic
+ store i16 %3, i16* @ss, align 2
+ %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %5 = atomicrmw xchg i16* %4, i16 1 monotonic
+ store i16 %5, i16* @us, align 2
+ %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %7 = atomicrmw xchg i32* %6, i32 1 monotonic
+ store i32 %7, i32* @si, align 4
+ %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %9 = atomicrmw xchg i32* %8, i32 1 monotonic
+ store i32 %9, i32* @ui, align 4
+ %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %11 = atomicrmw xchg i64* %10, i64 1 monotonic
+ store i64 %11, i64* @sl, align 8
+ %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %13 = atomicrmw xchg i64* %12, i64 1 monotonic
+ store i64 %13, i64* @ul, align 8
+ %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %15 = atomicrmw xchg i64* %14, i64 1 monotonic
+ store i64 %15, i64* @sll, align 8
+ %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %17 = atomicrmw xchg i64* %16, i64 1 monotonic
+ store i64 %17, i64* @ull, align 8
+ fence seq_cst
+ store volatile i8 0, i8* @sc, align 1
+ store volatile i8 0, i8* @uc, align 1
+ %18 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ store volatile i16 0, i16* %18, align 2
+ %19 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ store volatile i16 0, i16* %19, align 2
+ %20 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ store volatile i32 0, i32* %20, align 4
+ %21 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ store volatile i32 0, i32* %21, align 4
+ %22 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ store volatile i64 0, i64* %22, align 8
+ %23 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ store volatile i64 0, i64* %23, align 8
+ %24 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ store volatile i64 0, i64* %24, align 8
+ %25 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ store volatile i64 0, i64* %25, align 8
+ br label %return
+
+return: ; preds = %entry
+ ret void
}
-
-declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.swap.i64.p0i64(i64*, i64) nounwind
-
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
diff --git a/test/CodeGen/X86/MachineSink-DbgValue.ll b/test/CodeGen/X86/MachineSink-DbgValue.ll
new file mode 100644
index 0000000..ea791a3
--- /dev/null
+++ b/test/CodeGen/X86/MachineSink-DbgValue.ll
@@ -0,0 +1,49 @@
+; RUN: llc < %s | FileCheck %s
+; Should sink matching DBG_VALUEs also.
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-macosx10.7.0"
+
+define i32 @foo(i32 %i, i32* nocapture %c) nounwind uwtable readonly ssp {
+ tail call void @llvm.dbg.value(metadata !{i32 %i}, i64 0, metadata !6), !dbg !12
+ %ab = load i32* %c, align 1, !dbg !14
+ tail call void @llvm.dbg.value(metadata !{i32* %c}, i64 0, metadata !7), !dbg !13
+ tail call void @llvm.dbg.value(metadata !{i32 %ab}, i64 0, metadata !10), !dbg !14
+ %cd = icmp eq i32 %i, 42, !dbg !15
+ br i1 %cd, label %bb1, label %bb2, !dbg !15
+
+bb1: ; preds = %0
+;CHECK: DEBUG_VALUE: a
+;CHECK-NEXT: .loc 1 5 5
+;CHECK-NEXT: addl
+ %gh = add nsw i32 %ab, 2, !dbg !16
+ br label %bb2, !dbg !16
+
+bb2:
+ %.0 = phi i32 [ %gh, %bb1 ], [ 0, %0 ]
+ ret i32 %.0, !dbg !17
+}
+
+declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
+
+!llvm.dbg.cu = !{!0}
+!llvm.dbg.sp = !{!1}
+!llvm.dbg.lv.foo = !{!6, !7, !10}
+
+!0 = metadata !{i32 589841, i32 0, i32 12, metadata !"a.c", metadata !"/private/tmp", metadata !"Apple clang version 3.0 (tags/Apple/clang-211.10.1) (based on LLVM 3.0svn)", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{i32 589870, i32 0, metadata !2, metadata !"foo", metadata !"foo", metadata !"", metadata !2, i32 2, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32, i32*)* @foo, null, null} ; [ DW_TAG_subprogram ]
+!2 = metadata !{i32 589865, metadata !"a.c", metadata !"/private/tmp", metadata !0} ; [ DW_TAG_file_type ]
+!3 = metadata !{i32 589845, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!4 = metadata !{metadata !5}
+!5 = metadata !{i32 589860, metadata !0, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
+!6 = metadata !{i32 590081, metadata !1, metadata !"i", metadata !2, i32 16777218, metadata !5, i32 0} ; [ DW_TAG_arg_variable ]
+!7 = metadata !{i32 590081, metadata !1, metadata !"c", metadata !2, i32 33554434, metadata !8, i32 0} ; [ DW_TAG_arg_variable ]
+!8 = metadata !{i32 589839, metadata !0, metadata !"", null, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !9} ; [ DW_TAG_pointer_type ]
+!9 = metadata !{i32 589860, metadata !0, metadata !"char", null, i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ]
+!10 = metadata !{i32 590080, metadata !11, metadata !"a", metadata !2, i32 3, metadata !9, i32 0} ; [ DW_TAG_auto_variable ]
+!11 = metadata !{i32 589835, metadata !1, i32 2, i32 25, metadata !2, i32 0} ; [ DW_TAG_lexical_block ]
+!12 = metadata !{i32 2, i32 13, metadata !1, null}
+!13 = metadata !{i32 2, i32 22, metadata !1, null}
+!14 = metadata !{i32 3, i32 14, metadata !11, null}
+!15 = metadata !{i32 4, i32 3, metadata !11, null}
+!16 = metadata !{i32 5, i32 5, metadata !11, null}
+!17 = metadata !{i32 7, i32 1, metadata !11, null}
diff --git a/test/CodeGen/X86/MachineSink-eflags.ll b/test/CodeGen/X86/MachineSink-eflags.ll
new file mode 100644
index 0000000..5b8c7b2
--- /dev/null
+++ b/test/CodeGen/X86/MachineSink-eflags.ll
@@ -0,0 +1,74 @@
+; RUN: llc < %s | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-pc-linux"
+
+
+%0 = type <{ i64, i64, %1, %1, [21 x %2] }>
+%1 = type <{ i64, i64, i64 }>
+%2 = type <{ i32, i32, i8 addrspace(2)* }>
+%3 = type { i8*, i8*, i8*, i8*, i32 }
+%4 = type <{ %5*, i8*, i32, i32, [4 x i64], [4 x i64], [4 x i64], [4 x i64], [4 x i64] }>
+%5 = type <{ void (i32)*, i8*, i32 (i8*, ...)* }>
+
+define void @foo(i8* nocapture %_stubArgs) nounwind {
+entry:
+ %i0 = alloca i8*, align 8
+ %i2 = alloca i8*, align 8
+ %b.i = alloca [16 x <2 x double>], align 16
+ %conv = bitcast i8* %_stubArgs to i32*
+ %tmp1 = load i32* %conv, align 4
+ %ptr8 = getelementptr i8* %_stubArgs, i64 16
+ %i4 = bitcast i8* %ptr8 to <2 x double>*
+ %ptr20 = getelementptr i8* %_stubArgs, i64 48
+ %i7 = bitcast i8* %ptr20 to <2 x double> addrspace(1)**
+ %tmp21 = load <2 x double> addrspace(1)** %i7, align 8
+ %ptr28 = getelementptr i8* %_stubArgs, i64 64
+ %i9 = bitcast i8* %ptr28 to i32*
+ %tmp29 = load i32* %i9, align 4
+ %ptr32 = getelementptr i8* %_stubArgs, i64 68
+ %i10 = bitcast i8* %ptr32 to i32*
+ %tmp33 = load i32* %i10, align 4
+ %tmp17.i = mul i32 10, 20
+ %tmp19.i = add i32 %tmp17.i, %tmp33
+ %conv21.i = zext i32 %tmp19.i to i64
+ %tmp6.i = and i32 42, -32
+ %tmp42.i = add i32 %tmp6.i, 17
+ %tmp44.i = insertelement <2 x i32> undef, i32 %tmp42.i, i32 1
+ %tmp96676677.i = or i32 17, -4
+ %ptr4438.i = getelementptr inbounds [16 x <2 x double>]* %b.i, i64 0, i64 0
+ %arrayidx4506.i = getelementptr [16 x <2 x double>]* %b.i, i64 0, i64 4
+ %tmp52.i = insertelement <2 x i32> %tmp44.i, i32 0, i32 0
+ %tmp78.i = extractelement <2 x i32> %tmp44.i, i32 1
+ %tmp97.i = add i32 %tmp78.i, %tmp96676677.i
+ %tmp99.i = insertelement <2 x i32> %tmp52.i, i32 %tmp97.i, i32 1
+ %tmp154.i = extractelement <2 x i32> %tmp99.i, i32 1
+ %tmp156.i = extractelement <2 x i32> %tmp52.i, i32 0
+ %tmp158.i = urem i32 %tmp156.i, %tmp1
+ %i38 = mul i32 %tmp154.i, %tmp29
+ %i39 = add i32 %tmp158.i, %i38
+ %conv160.i = zext i32 %i39 to i64
+ %tmp22.sum652.i = add i64 %conv160.i, %conv21.i
+ %arrayidx161.i = getelementptr <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum652.i
+ %tmp162.i = load <2 x double> addrspace(1)* %arrayidx161.i, align 16
+ %tmp222.i = add i32 %tmp154.i, 1
+ %i43 = mul i32 %tmp222.i, %tmp29
+ %i44 = add i32 %tmp158.i, %i43
+ %conv228.i = zext i32 %i44 to i64
+ %tmp22.sum656.i = add i64 %conv228.i, %conv21.i
+ %arrayidx229.i = getelementptr <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum656.i
+ %tmp230.i = load <2 x double> addrspace(1)* %arrayidx229.i, align 16
+ %cmp432.i = icmp ult i32 %tmp156.i, %tmp1
+
+; %shl.i should not be sinked below the compare.
+; CHECK: cmpl
+; CHECK-NOT: shlq
+
+ %cond.i = select i1 %cmp432.i, <2 x double> %tmp162.i, <2 x double> zeroinitializer
+ store <2 x double> %cond.i, <2 x double>* %ptr4438.i, align 16
+ %cond448.i = select i1 %cmp432.i, <2 x double> %tmp230.i, <2 x double> zeroinitializer
+ store <2 x double> %cond448.i, <2 x double>* %arrayidx4506.i, align 16
+ ret void
+}
+
+
+
diff --git a/test/CodeGen/X86/SIMD/dg.exp b/test/CodeGen/X86/SIMD/dg.exp
deleted file mode 100644
index 629a147..0000000
--- a/test/CodeGen/X86/SIMD/dg.exp
+++ /dev/null
@@ -1,5 +0,0 @@
-load_lib llvm.exp
-
-if { [llvm_supports_target X86] } {
- RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
-}
diff --git a/test/CodeGen/X86/SIMD/notvunpcklpd.ll b/test/CodeGen/X86/SIMD/notvunpcklpd.ll
deleted file mode 100644
index 3afc2f2..0000000
--- a/test/CodeGen/X86/SIMD/notvunpcklpd.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -mattr=+avx | FileCheck %s
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define void @try_([2 x <4 x double>]* noalias %incarray, [2 x <4 x double>]* noalias %incarrayb ) {
-entry:
- %incarray1 = alloca [2 x <4 x double>]*, align 8
- %incarrayb1 = alloca [2 x <4 x double>]*, align 8
- %carray = alloca [2 x <4 x double>], align 16
- %r = getelementptr [2 x <4 x double>]* %incarray, i32 0, i32 0
- %rb = getelementptr [2 x <4 x double>]* %incarrayb, i32 0, i32 0
- %r3 = load <4 x double>* %r, align 8
- %r4 = load <4 x double>* %rb, align 8
- %r11 = shufflevector <4 x double> %r3, <4 x double> %r4, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x double>> [#uses=1]
-; CHECK-NOT: vunpcklpd
- %r12 = getelementptr [2 x <4 x double>]* %carray, i32 0, i32 1
- store <4 x double> %r11, <4 x double>* %r12, align 4
- ret void
-}
diff --git a/test/CodeGen/X86/SIMD/notvunpcklps.ll b/test/CodeGen/X86/SIMD/notvunpcklps.ll
deleted file mode 100644
index 19daa3e..0000000
--- a/test/CodeGen/X86/SIMD/notvunpcklps.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -mattr=+avx | FileCheck %s
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define void @try_([2 x <8 x float>]* noalias %incarray, [2 x <8 x float>]* noalias %incarrayb ) {
-enmtry:
- %incarray1 = alloca [2 x <8 x float>]*, align 8
- %incarrayb1 = alloca [2 x <8 x float>]*, align 8
- %carray = alloca [2 x <8 x float>], align 16
- %r = getelementptr [2 x <8 x float>]* %incarray, i32 0, i32 0
- %rb = getelementptr [2 x <8 x float>]* %incarrayb, i32 0, i32 0
- %r3 = load <8 x float>* %r, align 8
- %r4 = load <8 x float>* %rb, align 8
- %r8 = shufflevector <8 x float> %r3, <8 x float> %r4, <8 x i32> < i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11 > ; <<8 x float>> [#uses=1]
-; CHECK-NOT: vunpcklps
- %r9 = getelementptr [2 x <8 x float>]* %carray, i32 0, i32 0
- store <8 x float> %r8, <8 x float>* %r9, align 4
- ret void
-}
diff --git a/test/CodeGen/X86/SIMD/vunpcklpd.ll b/test/CodeGen/X86/SIMD/vunpcklpd.ll
deleted file mode 100644
index 60d23a4..0000000
--- a/test/CodeGen/X86/SIMD/vunpcklpd.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -mattr=+avx | FileCheck %s
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define void @try_([2 x <4 x double>]* noalias %incarray, [2 x <4 x double>]* noalias %incarrayb ) {
-entry:
- %incarray1 = alloca [2 x <4 x double>]*, align 8
- %incarrayb1 = alloca [2 x <4 x double>]*, align 8
- %carray = alloca [2 x <4 x double>], align 16
- %r = getelementptr [2 x <4 x double>]* %incarray, i32 0, i32 0
- %rb = getelementptr [2 x <4 x double>]* %incarrayb, i32 0, i32 0
- %r3 = load <4 x double>* %r, align 8
- %r4 = load <4 x double>* %rb, align 8
- %r11 = shufflevector <4 x double> %r3, <4 x double> %r4, <4 x i32> < i32 0, i32 4, i32 2, i32 6 > ; <<4 x double>> [#uses=1]
-; CHECK: vunpcklpd
- %r12 = getelementptr [2 x <4 x double>]* %carray, i32 0, i32 1
- store <4 x double> %r11, <4 x double>* %r12, align 4
- ret void
-}
diff --git a/test/CodeGen/X86/SIMD/vunpcklps.ll b/test/CodeGen/X86/SIMD/vunpcklps.ll
deleted file mode 100644
index a87b299..0000000
--- a/test/CodeGen/X86/SIMD/vunpcklps.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -mattr=+avx | FileCheck %s
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define void @try_([2 x <8 x float>]* noalias %incarray, [2 x <8 x float>]* noalias %incarrayb ) {
-entry:
- %incarray1 = alloca [2 x <8 x float>]*, align 8
- %incarrayb1 = alloca [2 x <8 x float>]*, align 8
- %carray = alloca [2 x <8 x float>], align 16
- %r = getelementptr [2 x <8 x float>]* %incarray, i32 0, i32 0
- %rb = getelementptr [2 x <8 x float>]* %incarrayb, i32 0, i32 0
- %r3 = load <8 x float>* %r, align 8
- %r4 = load <8 x float>* %rb, align 8
- %r11 = shufflevector <8 x float> %r3, <8 x float> %r4, <8 x i32> < i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13 > ; <<8 x float>> [#uses=1]
-; CHECK: vunpcklps
- %r12 = getelementptr [2 x <8 x float>]* %carray, i32 0, i32 1
- store <8 x float> %r11, <8 x float>* %r12, align 4
- ret void
-}
diff --git a/test/CodeGen/X86/alignment-2.ll b/test/CodeGen/X86/alignment-2.ll
new file mode 100644
index 0000000..cc709b5
--- /dev/null
+++ b/test/CodeGen/X86/alignment-2.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -mtriple i386-apple-darwin10 | FileCheck %s
+; <rdar://problem/10058036>
+
+%struct._psqlSettings = type { %struct.pg_conn*, i32, %struct.__sFILE*, i8, %struct.printQueryOpt, i8*, i8, i32, %struct.__sFILE*, i8, i32, i8*, i8*, i8*, i64, i8, %struct.__sFILE*, %struct._variable*, i8, i8, i8, i8, i8, i32, i32, i32, i32, i32, i8*, i8*, i8*, i32 }
+%struct.pg_conn = type opaque
+%struct.__sFILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
+%struct.__sbuf = type { i8*, i32 }
+%struct.__sFILEX = type opaque
+%struct.printQueryOpt = type { %struct.printTableOpt, i8*, i8, i8*, i8**, i8, i8, i8* }
+%struct.printTableOpt = type { i32, i8, i16, i16, i8, i8, i8, i32, %struct.printTextFormat*, i8*, i8*, i8, i8*, i32, i32, i32 }
+%struct.printTextFormat = type { i8*, [4 x %struct.printTextLineFormat], i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8 }
+%struct.printTextLineFormat = type { i8*, i8*, i8*, i8* }
+%struct._variable = type { i8*, i8*, void (i8*)*, %struct._variable* }
+%struct.pg_result = type opaque
+
+@pset = external global %struct._psqlSettings
+
+define signext i8 @do_lo_list() nounwind optsize ssp {
+bb:
+; CHECK: do_lo_list
+; CHECK-NOT: movaps
+ %myopt = alloca %struct.printQueryOpt, align 4
+ %tmp = bitcast %struct.printQueryOpt* %myopt to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* bitcast (%struct.printQueryOpt* getelementptr inbounds (%struct._psqlSettings* @pset, i32 0, i32 4) to i8*), i32 76, i32 4, i1 false)
+ ret i8 0
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
diff --git a/test/CodeGen/X86/alignment.ll b/test/CodeGen/X86/alignment.ll
index 7e91115..5908c0c 100644
--- a/test/CodeGen/X86/alignment.ll
+++ b/test/CodeGen/X86/alignment.ll
@@ -40,4 +40,4 @@
; CHECK: .comm GlobalBS,384,8
@GlobalCS = common global { [384 x i8] } zeroinitializer, align 2, section "foo"
-; CHECK: .comm GlobalCS,384,2 \ No newline at end of file
+; CHECK: .comm GlobalCS,384,2
diff --git a/test/CodeGen/X86/asm-label2.ll b/test/CodeGen/X86/asm-label2.ll
index 0b5de34..8715aa9 100644
--- a/test/CodeGen/X86/asm-label2.ll
+++ b/test/CodeGen/X86/asm-label2.ll
@@ -16,7 +16,11 @@ invoke.cont: ; preds = %entry
ret void
lpad: ; preds = %entry
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
}
declare void @_zed() ssp align 2
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/atomic-load-store-wide.ll b/test/CodeGen/X86/atomic-load-store-wide.ll
new file mode 100644
index 0000000..a9ebfef
--- /dev/null
+++ b/test/CodeGen/X86/atomic-load-store-wide.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -march=x86 | FileCheck %s
+
+; 64-bit load/store on x86-32
+; FIXME: The generated code can be substantially improved.
+
+define void @test1(i64* %ptr, i64 %val1) {
+; CHECK: test1
+; CHECK: cmpxchg8b
+; CHECK-NEXT: jne
+ store atomic i64 %val1, i64* %ptr seq_cst, align 8
+ ret void
+}
+
+define i64 @test2(i64* %ptr) {
+; CHECK: test2
+; CHECK: cmpxchg8b
+ %val = load atomic i64* %ptr seq_cst, align 8
+ ret i64 %val
+}
diff --git a/test/CodeGen/X86/atomic-load-store.ll b/test/CodeGen/X86/atomic-load-store.ll
new file mode 100644
index 0000000..fee4585
--- /dev/null
+++ b/test/CodeGen/X86/atomic-load-store.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s -mtriple=x86_64-apple-macosx10.7.0 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-macosx10.7.0 -O0 | FileCheck %s
+
+define void @test1(i32* %ptr, i32 %val1) {
+; CHECK: test1
+; CHECK: xchgl %esi, (%rdi)
+ store atomic i32 %val1, i32* %ptr seq_cst, align 4
+ ret void
+}
+
+define void @test2(i32* %ptr, i32 %val1) {
+; CHECK: test2
+; CHECK: movl %esi, (%rdi)
+ store atomic i32 %val1, i32* %ptr release, align 4
+ ret void
+}
+
+define i32 @test3(i32* %ptr) {
+; CHECK: test3
+; CHECK: movl (%rdi), %eax
+ %val = load atomic i32* %ptr seq_cst, align 4
+ ret i32 %val
+}
diff --git a/test/CodeGen/X86/atomic-or.ll b/test/CodeGen/X86/atomic-or.ll
index 164252d..3f02eaf 100644
--- a/test/CodeGen/X86/atomic-or.ll
+++ b/test/CodeGen/X86/atomic-or.ll
@@ -7,13 +7,11 @@ entry:
%p.addr = alloca i64*, align 8
store i64* %p, i64** %p.addr, align 8
%tmp = load i64** %p.addr, align 8
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
; CHECK: t1:
; CHECK: movl $2147483648, %eax
; CHECK: lock
; CHECK-NEXT: orq %r{{.*}}, (%r{{.*}})
- %0 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %tmp, i64 2147483648)
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ %0 = atomicrmw or i64* %tmp, i64 2147483648 seq_cst
ret void
}
@@ -22,15 +20,9 @@ entry:
%p.addr = alloca i64*, align 8
store i64* %p, i64** %p.addr, align 8
%tmp = load i64** %p.addr, align 8
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
; CHECK: t2:
-; CHECK-NOT: movl
; CHECK: lock
; CHECK-NEXT: orq $2147483644, (%r{{.*}})
- %0 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %tmp, i64 2147483644)
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ %0 = atomicrmw or i64* %tmp, i64 2147483644 seq_cst
ret void
}
-
-declare i64 @llvm.atomic.load.or.i64.p0i64(i64* nocapture, i64) nounwind
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
diff --git a/test/CodeGen/X86/atomic_add.ll b/test/CodeGen/X86/atomic_add.ll
index 26d25e2..1fce256 100644
--- a/test/CodeGen/X86/atomic_add.ll
+++ b/test/CodeGen/X86/atomic_add.ll
@@ -6,80 +6,74 @@ define void @sub1(i32* nocapture %p, i32 %v) nounwind ssp {
entry:
; CHECK: sub1:
; CHECK: subl
- %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 %v) ; <i32> [#uses=0]
- ret void
+ %0 = atomicrmw sub i32* %p, i32 %v monotonic
+ ret void
}
define void @inc4(i64* nocapture %p) nounwind ssp {
entry:
; CHECK: inc4:
; CHECK: incq
- %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0]
- ret void
+ %0 = atomicrmw add i64* %p, i64 1 monotonic
+ ret void
}
-declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind
-
define void @add8(i64* nocapture %p) nounwind ssp {
entry:
; CHECK: add8:
; CHECK: addq $2
- %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 2) ; <i64> [#uses=0]
- ret void
+ %0 = atomicrmw add i64* %p, i64 2 monotonic
+ ret void
}
define void @add4(i64* nocapture %p, i32 %v) nounwind ssp {
entry:
; CHECK: add4:
; CHECK: addq
- %0 = sext i32 %v to i64 ; <i64> [#uses=1]
- %1 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 %0) ; <i64> [#uses=0]
- ret void
+ %0 = sext i32 %v to i64 ; <i64> [#uses=1]
+ %1 = atomicrmw add i64* %p, i64 %0 monotonic
+ ret void
}
define void @inc3(i8* nocapture %p) nounwind ssp {
entry:
; CHECK: inc3:
; CHECK: incb
- %0 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 1) ; <i8> [#uses=0]
- ret void
+ %0 = atomicrmw add i8* %p, i8 1 monotonic
+ ret void
}
-declare i8 @llvm.atomic.load.add.i8.p0i8(i8* nocapture, i8) nounwind
-
define void @add7(i8* nocapture %p) nounwind ssp {
entry:
; CHECK: add7:
; CHECK: addb $2
- %0 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 2) ; <i8> [#uses=0]
- ret void
+ %0 = atomicrmw add i8* %p, i8 2 monotonic
+ ret void
}
define void @add3(i8* nocapture %p, i32 %v) nounwind ssp {
entry:
; CHECK: add3:
; CHECK: addb
- %0 = trunc i32 %v to i8 ; <i8> [#uses=1]
- %1 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 %0) ; <i8> [#uses=0]
- ret void
+ %0 = trunc i32 %v to i8 ; <i8> [#uses=1]
+ %1 = atomicrmw add i8* %p, i8 %0 monotonic
+ ret void
}
define void @inc2(i16* nocapture %p) nounwind ssp {
entry:
; CHECK: inc2:
; CHECK: incw
- %0 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 1) ; <i16> [#uses=0]
- ret void
+ %0 = atomicrmw add i16* %p, i16 1 monotonic
+ ret void
}
-declare i16 @llvm.atomic.load.add.i16.p0i16(i16* nocapture, i16) nounwind
-
define void @add6(i16* nocapture %p) nounwind ssp {
entry:
; CHECK: add6:
; CHECK: addw $2
- %0 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 2) ; <i16> [#uses=0]
- ret void
+ %0 = atomicrmw add i16* %p, i16 2 monotonic
+ ret void
}
define void @add2(i16* nocapture %p, i32 %v) nounwind ssp {
@@ -87,52 +81,48 @@ entry:
; CHECK: add2:
; CHECK: addw
%0 = trunc i32 %v to i16 ; <i16> [#uses=1]
- %1 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 %0) ; <i16> [#uses=0]
- ret void
+ %1 = atomicrmw add i16* %p, i16 %0 monotonic
+ ret void
}
define void @inc1(i32* nocapture %p) nounwind ssp {
entry:
; CHECK: inc1:
; CHECK: incl
- %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 1) ; <i32> [#uses=0]
- ret void
+ %0 = atomicrmw add i32* %p, i32 1 monotonic
+ ret void
}
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32* nocapture, i32) nounwind
-
define void @add5(i32* nocapture %p) nounwind ssp {
entry:
; CHECK: add5:
; CHECK: addl $2
- %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 2) ; <i32> [#uses=0]
- ret void
+ %0 = atomicrmw add i32* %p, i32 2 monotonic
+ ret void
}
define void @add1(i32* nocapture %p, i32 %v) nounwind ssp {
entry:
; CHECK: add1:
; CHECK: addl
- %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 %v) ; <i32> [#uses=0]
- ret void
+ %0 = atomicrmw add i32* %p, i32 %v monotonic
+ ret void
}
define void @dec4(i64* nocapture %p) nounwind ssp {
entry:
; CHECK: dec4:
; CHECK: decq
- %0 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0]
- ret void
+ %0 = atomicrmw sub i64* %p, i64 1 monotonic
+ ret void
}
-declare i64 @llvm.atomic.load.sub.i64.p0i64(i64* nocapture, i64) nounwind
-
define void @sub8(i64* nocapture %p) nounwind ssp {
entry:
; CHECK: sub8:
; CHECK: subq $2
- %0 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 2) ; <i64> [#uses=0]
- ret void
+ %0 = atomicrmw sub i64* %p, i64 2 monotonic
+ ret void
}
define void @sub4(i64* nocapture %p, i32 %v) nounwind ssp {
@@ -140,26 +130,24 @@ entry:
; CHECK: sub4:
; CHECK: subq
%0 = sext i32 %v to i64 ; <i64> [#uses=1]
- %1 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 %0) ; <i64> [#uses=0]
- ret void
+ %1 = atomicrmw sub i64* %p, i64 %0 monotonic
+ ret void
}
define void @dec3(i8* nocapture %p) nounwind ssp {
entry:
; CHECK: dec3:
; CHECK: decb
- %0 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 1) ; <i8> [#uses=0]
- ret void
+ %0 = atomicrmw sub i8* %p, i8 1 monotonic
+ ret void
}
-declare i8 @llvm.atomic.load.sub.i8.p0i8(i8* nocapture, i8) nounwind
-
define void @sub7(i8* nocapture %p) nounwind ssp {
entry:
; CHECK: sub7:
; CHECK: subb $2
- %0 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 2) ; <i8> [#uses=0]
- ret void
+ %0 = atomicrmw sub i8* %p, i8 2 monotonic
+ ret void
}
define void @sub3(i8* nocapture %p, i32 %v) nounwind ssp {
@@ -167,26 +155,24 @@ entry:
; CHECK: sub3:
; CHECK: subb
%0 = trunc i32 %v to i8 ; <i8> [#uses=1]
- %1 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 %0) ; <i8> [#uses=0]
- ret void
+ %1 = atomicrmw sub i8* %p, i8 %0 monotonic
+ ret void
}
define void @dec2(i16* nocapture %p) nounwind ssp {
entry:
; CHECK: dec2:
; CHECK: decw
- %0 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 1) ; <i16> [#uses=0]
- ret void
+ %0 = atomicrmw sub i16* %p, i16 1 monotonic
+ ret void
}
-declare i16 @llvm.atomic.load.sub.i16.p0i16(i16* nocapture, i16) nounwind
-
define void @sub6(i16* nocapture %p) nounwind ssp {
entry:
; CHECK: sub6:
; CHECK: subw $2
- %0 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 2) ; <i16> [#uses=0]
- ret void
+ %0 = atomicrmw sub i16* %p, i16 2 monotonic
+ ret void
}
define void @sub2(i16* nocapture %p, i32 %v) nounwind ssp {
@@ -194,24 +180,22 @@ entry:
; CHECK: sub2:
; CHECK: negl
%0 = trunc i32 %v to i16 ; <i16> [#uses=1]
- %1 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 %0) ; <i16> [#uses=0]
- ret void
+ %1 = atomicrmw sub i16* %p, i16 %0 monotonic
+ ret void
}
define void @dec1(i32* nocapture %p) nounwind ssp {
entry:
; CHECK: dec1:
; CHECK: decl
- %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 1) ; <i32> [#uses=0]
- ret void
+ %0 = atomicrmw sub i32* %p, i32 1 monotonic
+ ret void
}
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32* nocapture, i32) nounwind
-
define void @sub5(i32* nocapture %p) nounwind ssp {
entry:
; CHECK: sub5:
; CHECK: subl $2
- %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 2) ; <i32> [#uses=0]
- ret void
+ %0 = atomicrmw sub i32* %p, i32 2 monotonic
+ ret void
}
diff --git a/test/CodeGen/X86/atomic_op.ll b/test/CodeGen/X86/atomic_op.ll
index f3ade93..972dab2 100644
--- a/test/CodeGen/X86/atomic_op.ll
+++ b/test/CodeGen/X86/atomic_op.ll
@@ -24,87 +24,87 @@ entry:
%tmp = load i32* %temp
; CHECK: lock
; CHECK: xaddl
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val1, i32 %tmp ) ; <i32>:0 [#uses=1]
+ %0 = atomicrmw add i32* %val1, i32 %tmp monotonic
store i32 %0, i32* %old
; CHECK: lock
; CHECK: xaddl
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 30 ) ; <i32>:1 [#uses=1]
+ %1 = atomicrmw sub i32* %val2, i32 30 monotonic
store i32 %1, i32* %old
; CHECK: lock
; CHECK: xaddl
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:2 [#uses=1]
+ %2 = atomicrmw add i32* %val2, i32 1 monotonic
store i32 %2, i32* %old
; CHECK: lock
; CHECK: xaddl
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:3 [#uses=1]
+ %3 = atomicrmw sub i32* %val2, i32 1 monotonic
store i32 %3, i32* %old
; CHECK: andl
; CHECK: lock
; CHECK: cmpxchgl
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %andt, i32 4080 ) ; <i32>:4 [#uses=1]
+ %4 = atomicrmw and i32* %andt, i32 4080 monotonic
store i32 %4, i32* %old
; CHECK: orl
; CHECK: lock
; CHECK: cmpxchgl
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %ort, i32 4080 ) ; <i32>:5 [#uses=1]
+ %5 = atomicrmw or i32* %ort, i32 4080 monotonic
store i32 %5, i32* %old
; CHECK: xorl
; CHECK: lock
; CHECK: cmpxchgl
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %xort, i32 4080 ) ; <i32>:6 [#uses=1]
+ %6 = atomicrmw xor i32* %xort, i32 4080 monotonic
store i32 %6, i32* %old
; CHECK: cmov
; CHECK: lock
; CHECK: cmpxchgl
- call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 16 ) ; <i32>:7 [#uses=1]
+ %7 = atomicrmw min i32* %val2, i32 16 monotonic
store i32 %7, i32* %old
%neg = sub i32 0, 1 ; <i32> [#uses=1]
; CHECK: cmov
; CHECK: lock
; CHECK: cmpxchgl
- call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 %neg ) ; <i32>:8 [#uses=1]
+ %8 = atomicrmw min i32* %val2, i32 %neg monotonic
store i32 %8, i32* %old
; CHECK: cmov
; CHECK: lock
; CHECK: cmpxchgl
- call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:9 [#uses=1]
+ %9 = atomicrmw max i32* %val2, i32 1 monotonic
store i32 %9, i32* %old
; CHECK: cmov
; CHECK: lock
; CHECK: cmpxchgl
- call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 0 ) ; <i32>:10 [#uses=1]
+ %10 = atomicrmw max i32* %val2, i32 0 monotonic
store i32 %10, i32* %old
; CHECK: cmov
; CHECK: lock
; CHECK: cmpxchgl
- call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %val2, i32 65535 ) ; <i32>:11 [#uses=1]
+ %11 = atomicrmw umax i32* %val2, i32 65535 monotonic
store i32 %11, i32* %old
; CHECK: cmov
; CHECK: lock
; CHECK: cmpxchgl
- call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %val2, i32 10 ) ; <i32>:12 [#uses=1]
+ %12 = atomicrmw umax i32* %val2, i32 10 monotonic
store i32 %12, i32* %old
; CHECK: cmov
; CHECK: lock
; CHECK: cmpxchgl
- call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:13 [#uses=1]
+ %13 = atomicrmw umin i32* %val2, i32 1 monotonic
store i32 %13, i32* %old
; CHECK: cmov
; CHECK: lock
; CHECK: cmpxchgl
- call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %val2, i32 10 ) ; <i32>:14 [#uses=1]
+ %14 = atomicrmw umin i32* %val2, i32 10 monotonic
store i32 %14, i32* %old
; CHECK: xchgl %{{.*}}, {{.*}}(%esp)
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %val2, i32 1976 ) ; <i32>:15 [#uses=1]
+ %15 = atomicrmw xchg i32* %val2, i32 1976 monotonic
store i32 %15, i32* %old
%neg1 = sub i32 0, 10 ; <i32> [#uses=1]
; CHECK: lock
; CHECK: cmpxchgl
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %val2, i32 %neg1, i32 1 ) ; <i32>:16 [#uses=1]
+ %16 = cmpxchg i32* %val2, i32 %neg1, i32 1 monotonic
store i32 %16, i32* %old
; CHECK: lock
; CHECK: cmpxchgl
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %val2, i32 1976, i32 1 ) ; <i32>:17 [#uses=1]
+ %17 = cmpxchg i32* %val2, i32 1976, i32 1 monotonic
store i32 %17, i32* %old
ret void
}
@@ -114,30 +114,6 @@ entry:
; CHECK: lock
; CHECK: cmpxchgl %{{.*}}, %gs:(%{{.*}})
- %0 = tail call i32 @llvm.atomic.cmp.swap.i32.p256i32(i32 addrspace(256)* %P, i32 0, i32 1)
+ %0 = cmpxchg i32 addrspace(256)* %P, i32 0, i32 1 monotonic
ret void
}
-
-declare i32 @llvm.atomic.cmp.swap.i32.p256i32(i32 addrspace(256)* nocapture, i32, i32) nounwind
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.min.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.max.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.umax.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.umin.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
diff --git a/test/CodeGen/X86/avx-256-arith.s b/test/CodeGen/X86/avx-256-arith.s
deleted file mode 100644
index e69de29..0000000
--- a/test/CodeGen/X86/avx-256-arith.s
+++ /dev/null
diff --git a/test/CodeGen/X86/avx-256.ll b/test/CodeGen/X86/avx-256.ll
deleted file mode 100644
index 20d31e7..0000000
--- a/test/CodeGen/X86/avx-256.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86 -mcpu=corei7 -mattr=avx | FileCheck %s
-
-@x = common global <8 x float> zeroinitializer, align 32
-@y = common global <4 x double> zeroinitializer, align 32
-
-define void @zero() nounwind ssp {
-entry:
- ; CHECK: vxorps
- ; CHECK: vmovaps
- ; CHECK: vmovaps
- store <8 x float> zeroinitializer, <8 x float>* @x, align 32
- store <4 x double> zeroinitializer, <4 x double>* @y, align 32
- ret void
-}
-
diff --git a/test/CodeGen/X86/avx-256-arith.ll b/test/CodeGen/X86/avx-arith.ll
index 5c512db..59988ca 100644
--- a/test/CodeGen/X86/avx-256-arith.ll
+++ b/test/CodeGen/X86/avx-arith.ll
@@ -114,3 +114,148 @@ entry:
ret <8 x float> %div.i
}
+; CHECK: vsqrtss
+define float @sqrtA(float %a) nounwind uwtable readnone ssp {
+entry:
+ %conv1 = tail call float @sqrtf(float %a) nounwind readnone
+ ret float %conv1
+}
+
+declare double @sqrt(double) readnone
+
+; CHECK: vsqrtsd
+define double @sqrtB(double %a) nounwind uwtable readnone ssp {
+entry:
+ %call = tail call double @sqrt(double %a) nounwind readnone
+ ret double %call
+}
+
+declare float @sqrtf(float) readnone
+
+
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vextractf128 $1
+; CHECK-NEXT: vpaddq %xmm
+; CHECK-NEXT: vpaddq %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <4 x i64> @vpaddq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+ %x = add <4 x i64> %i, %j
+ ret <4 x i64> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vextractf128 $1
+; CHECK-NEXT: vpaddd %xmm
+; CHECK-NEXT: vpaddd %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <8 x i32> @vpaddd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
+ %x = add <8 x i32> %i, %j
+ ret <8 x i32> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vextractf128 $1
+; CHECK-NEXT: vpaddw %xmm
+; CHECK-NEXT: vpaddw %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <16 x i16> @vpaddw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
+ %x = add <16 x i16> %i, %j
+ ret <16 x i16> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vextractf128 $1
+; CHECK-NEXT: vpaddb %xmm
+; CHECK-NEXT: vpaddb %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <32 x i8> @vpaddb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
+ %x = add <32 x i8> %i, %j
+ ret <32 x i8> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vextractf128 $1
+; CHECK-NEXT: vpsubq %xmm
+; CHECK-NEXT: vpsubq %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <4 x i64> @vpsubq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+ %x = sub <4 x i64> %i, %j
+ ret <4 x i64> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vextractf128 $1
+; CHECK-NEXT: vpsubd %xmm
+; CHECK-NEXT: vpsubd %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <8 x i32> @vpsubd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
+ %x = sub <8 x i32> %i, %j
+ ret <8 x i32> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vextractf128 $1
+; CHECK-NEXT: vpsubw %xmm
+; CHECK-NEXT: vpsubw %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <16 x i16> @vpsubw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
+ %x = sub <16 x i16> %i, %j
+ ret <16 x i16> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vextractf128 $1
+; CHECK-NEXT: vpsubb %xmm
+; CHECK-NEXT: vpsubb %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <32 x i8> @vpsubb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
+ %x = sub <32 x i8> %i, %j
+ ret <32 x i8> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vextractf128 $1
+; CHECK-NEXT: vpmulld %xmm
+; CHECK-NEXT: vpmulld %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <8 x i32> @vpmulld(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
+ %x = mul <8 x i32> %i, %j
+ ret <8 x i32> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vextractf128 $1
+; CHECK-NEXT: vpmullw %xmm
+; CHECK-NEXT: vpmullw %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <16 x i16> @vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
+ %x = mul <16 x i16> %i, %j
+ ret <16 x i16> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vextractf128 $1
+; CHECK-NEXT: vpmuludq %xmm
+; CHECK-NEXT: vpsrlq $32, %xmm
+; CHECK-NEXT: vpmuludq %xmm
+; CHECK-NEXT: vpsllq $32, %xmm
+; CHECK-NEXT: vpaddq %xmm
+; CHECK-NEXT: vpmuludq %xmm
+; CHECK-NEXT: vpsrlq $32, %xmm
+; CHECK-NEXT: vpmuludq %xmm
+; CHECK-NEXT: vpsllq $32, %xmm
+; CHECK-NEXT: vpsrlq $32, %xmm
+; CHECK-NEXT: vpmuludq %xmm
+; CHECK-NEXT: vpsllq $32, %xmm
+; CHECK-NEXT: vpaddq %xmm
+; CHECK-NEXT: vpaddq %xmm
+; CHECK-NEXT: vpsrlq $32, %xmm
+; CHECK-NEXT: vpmuludq %xmm
+; CHECK-NEXT: vpsllq $32, %xmm
+; CHECK-NEXT: vpaddq %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <4 x i64> @mul-v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+ %x = mul <4 x i64> %i, %j
+ ret <4 x i64> %x
+}
+
diff --git a/test/CodeGen/X86/avx-basic.ll b/test/CodeGen/X86/avx-basic.ll
new file mode 100644
index 0000000..0a46b08
--- /dev/null
+++ b/test/CodeGen/X86/avx-basic.ll
@@ -0,0 +1,107 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+@x = common global <8 x float> zeroinitializer, align 32
+@y = common global <4 x double> zeroinitializer, align 32
+@z = common global <4 x float> zeroinitializer, align 16
+
+define void @zero128() nounwind ssp {
+entry:
+ ; CHECK: vpxor
+ ; CHECK: vmovaps
+ store <4 x float> zeroinitializer, <4 x float>* @z, align 16
+ ret void
+}
+
+define void @zero256() nounwind ssp {
+entry:
+ ; CHECK: vxorps
+ ; CHECK: vmovaps
+ ; CHECK: vmovaps
+ store <8 x float> zeroinitializer, <8 x float>* @x, align 32
+ store <4 x double> zeroinitializer, <4 x double>* @y, align 32
+ ret void
+}
+
+; CHECK: vpcmpeqd
+; CHECK: vinsertf128 $1
+define void @ones([0 x float]* nocapture %RET, [0 x float]* nocapture %aFOO) nounwind {
+allocas:
+ %ptr2vec615 = bitcast [0 x float]* %RET to <8 x float>*
+ store <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float
+0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float
+0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, <8 x
+float>* %ptr2vec615, align 32
+ ret void
+}
+
+; CHECK: vpcmpeqd
+; CHECK: vinsertf128 $1
+define void @ones2([0 x i32]* nocapture %RET, [0 x i32]* nocapture %aFOO) nounwind {
+allocas:
+ %ptr2vec615 = bitcast [0 x i32]* %RET to <8 x i32>*
+ store <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32>* %ptr2vec615, align 32
+ ret void
+}
+
+;;; Just make sure this doesn't crash
+; CHECK: _ISelCrash
+define <4 x i64> @ISelCrash(<4 x i64> %a) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 2, i32 3, i32 4, i32 4>
+ ret <4 x i64> %shuffle
+}
+
+;;;
+;;; Check that some 256-bit vectors are xformed into 128 ops
+; CHECK: _A
+; CHECK: vshufpd $1
+; CHECK-NEXT: vextractf128 $1
+; CHECK-NEXT: vshufpd $1
+; CHECK-NEXT: vinsertf128 $1
+define <4 x i64> @A(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 0, i32 7, i32 6>
+ ret <4 x i64> %shuffle
+}
+
+; CHECK: _B
+; CHECK: vshufpd $1, %ymm
+define <4 x i64> @B(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 undef, i32 undef, i32 6>
+ ret <4 x i64> %shuffle
+}
+
+; CHECK: movlhps
+; CHECK-NEXT: vextractf128 $1
+; CHECK-NEXT: movlhps
+; CHECK-NEXT: vinsertf128 $1
+define <4 x i64> @C(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 undef, i32 0, i32 undef, i32 6>
+ ret <4 x i64> %shuffle
+}
+
+; CHECK: vpshufd $-96
+; CHECK: vpshufd $-6
+; CHECK: vinsertf128 $1
+define <8 x i32> @D(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 10, i32 10, i32 11, i32 11>
+ ret <8 x i32> %shuffle
+}
+
+;;; Don't crash on movd
+; CHECK: _VMOVZQI2PQI
+; CHECK: vmovd (%
+define <8 x i32> @VMOVZQI2PQI([0 x float]* nocapture %aFOO) nounwind {
+allocas:
+ %ptrcast.i33.i = bitcast [0 x float]* %aFOO to i32*
+ %val.i34.i = load i32* %ptrcast.i33.i, align 4
+ %ptroffset.i22.i992 = getelementptr [0 x float]* %aFOO, i64 0, i64 1
+ %ptrcast.i23.i = bitcast float* %ptroffset.i22.i992 to i32*
+ %val.i24.i = load i32* %ptrcast.i23.i, align 4
+ %updatedret.i30.i = insertelement <8 x i32> undef, i32 %val.i34.i, i32 1
+ ret <8 x i32> %updatedret.i30.i
+}
+
diff --git a/test/CodeGen/X86/avx-bitcast.ll b/test/CodeGen/X86/avx-bitcast.ll
new file mode 100644
index 0000000..ecc71be
--- /dev/null
+++ b/test/CodeGen/X86/avx-bitcast.ll
@@ -0,0 +1,10 @@
+; RUN: llc < %s -O0 -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+; CHECK: vmovsd (%
+; CHECK-NEXT: vmovd %xmm
+define i64 @bitcasti64tof64() {
+ %a = load double* undef
+ %b = bitcast double %a to i64
+ ret i64 %b
+}
+
diff --git a/test/CodeGen/X86/avx-blend.ll b/test/CodeGen/X86/avx-blend.ll
new file mode 100644
index 0000000..7729491
--- /dev/null
+++ b/test/CodeGen/X86/avx-blend.ll
@@ -0,0 +1,104 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -promote-elements -mattr=+avx | FileCheck %s
+
+; AVX128 tests:
+
+;CHECK: vsel_float
+;CHECK: vblendvps
+;CHECK: ret
+define <4 x float> @vsel_float(<4 x float> %v1, <4 x float> %v2) {
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %v1, <4 x float> %v2
+ ret <4 x float> %vsel
+}
+
+
+;CHECK: vsel_i32
+;CHECK: vblendvps
+;CHECK: ret
+define <4 x i32> @vsel_i32(<4 x i32> %v1, <4 x i32> %v2) {
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> %v1, <4 x i32> %v2
+ ret <4 x i32> %vsel
+}
+
+
+;CHECK: vsel_double
+;CHECK: vblendvpd
+;CHECK: ret
+define <2 x double> @vsel_double(<2 x double> %v1, <2 x double> %v2) {
+ %vsel = select <2 x i1> <i1 true, i1 false>, <2 x double> %v1, <2 x double> %v2
+ ret <2 x double> %vsel
+}
+
+
+;CHECK: vsel_i64
+;CHECK: vblendvpd
+;CHECK: ret
+define <2 x i64> @vsel_i64(<2 x i64> %v1, <2 x i64> %v2) {
+ %vsel = select <2 x i1> <i1 true, i1 false>, <2 x i64> %v1, <2 x i64> %v2
+ ret <2 x i64> %vsel
+}
+
+
+;CHECK: vsel_i8
+;CHECK: vpblendvb
+;CHECK: ret
+define <16 x i8> @vsel_i8(<16 x i8> %v1, <16 x i8> %v2) {
+ %vsel = select <16 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <16 x i8> %v1, <16 x i8> %v2
+ ret <16 x i8> %vsel
+}
+
+
+; AVX256 tests:
+
+
+;CHECK: vsel_float
+;CHECK: vblendvps
+;CHECK: ret
+define <8 x float> @vsel_float8(<8 x float> %v1, <8 x float> %v2) {
+ %vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x float> %v1, <8 x float> %v2
+ ret <8 x float> %vsel
+}
+
+;CHECK: vsel_i32
+;CHECK: vblendvps
+;CHECK: ret
+define <8 x i32> @vsel_i328(<8 x i32> %v1, <8 x i32> %v2) {
+ %vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x i32> %v1, <8 x i32> %v2
+ ret <8 x i32> %vsel
+}
+
+;CHECK: vsel_double
+;CHECK: vblendvpd
+;CHECK: ret
+define <8 x double> @vsel_double8(<8 x double> %v1, <8 x double> %v2) {
+ %vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x double> %v1, <8 x double> %v2
+ ret <8 x double> %vsel
+}
+
+;CHECK: vsel_i64
+;CHECK: vblendvpd
+;CHECK: ret
+define <8 x i64> @vsel_i648(<8 x i64> %v1, <8 x i64> %v2) {
+ %vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x i64> %v1, <8 x i64> %v2
+ ret <8 x i64> %vsel
+}
+
+;; TEST blend + compares
+; CHECK: A
+define <2 x double> @A(<2 x double> %x, <2 x double> %y) {
+ ; CHECK: vcmplepd
+ ; CHECK: vblendvpd
+ %max_is_x = fcmp oge <2 x double> %x, %y
+ %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %max
+}
+
+; CHECK: B
+define <2 x double> @B(<2 x double> %x, <2 x double> %y) {
+ ; CHECK: vcmpnlepd
+ ; CHECK: vblendvpd
+ %min_is_x = fcmp ult <2 x double> %x, %y
+ %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %min
+}
+
+
diff --git a/test/CodeGen/X86/avx-cast.ll b/test/CodeGen/X86/avx-cast.ll
new file mode 100644
index 0000000..d6d2415
--- /dev/null
+++ b/test/CodeGen/X86/avx-cast.ll
@@ -0,0 +1,47 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+; CHECK: vxorps
+; CHECK-NEXT: vinsertf128 $0
+define <8 x float> @castA(<4 x float> %m) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <4 x float> %m, <4 x float> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x float> %shuffle.i
+}
+
+; CHECK: vxorps
+; CHECK-NEXT: vinsertf128 $0
+define <4 x double> @castB(<2 x double> %m) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <2 x double> %m, <2 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
+ ret <4 x double> %shuffle.i
+}
+
+; CHECK: vpxor
+; CHECK-NEXT: vinsertf128 $0
+define <4 x i64> @castC(<2 x i64> %m) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <2 x i64> %m, <2 x i64> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
+ ret <4 x i64> %shuffle.i
+}
+
+; CHECK-NOT: vextractf128 $0
+define <4 x float> @castD(<8 x float> %m) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <8 x float> %m, <8 x float> %m, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x float> %shuffle.i
+}
+
+; CHECK-NOT: vextractf128 $0
+define <2 x i64> @castE(<4 x i64> %m) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <4 x i64> %m, <4 x i64> %m, <2 x i32> <i32 0, i32 1>
+ ret <2 x i64> %shuffle.i
+}
+
+; CHECK-NOT: vextractf128 $0
+define <2 x double> @castF(<4 x double> %m) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <4 x double> %m, <4 x double> %m, <2 x i32> <i32 0, i32 1>
+ ret <2 x double> %shuffle.i
+}
+
diff --git a/test/CodeGen/X86/avx-cmp.ll b/test/CodeGen/X86/avx-cmp.ll
new file mode 100644
index 0000000..a050d6ab
--- /dev/null
+++ b/test/CodeGen/X86/avx-cmp.ll
@@ -0,0 +1,150 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+; CHECK: vcmpltps %ymm
+; CHECK-NOT: vucomiss
+define <8 x i32> @cmp00(<8 x float> %a, <8 x float> %b) nounwind readnone {
+ %bincmp = fcmp olt <8 x float> %a, %b
+ %s = sext <8 x i1> %bincmp to <8 x i32>
+ ret <8 x i32> %s
+}
+
+; CHECK: vcmpltpd %ymm
+; CHECK-NOT: vucomisd
+define <4 x i64> @cmp01(<4 x double> %a, <4 x double> %b) nounwind readnone {
+ %bincmp = fcmp olt <4 x double> %a, %b
+ %s = sext <4 x i1> %bincmp to <4 x i64>
+ ret <4 x i64> %s
+}
+
+declare void @scale() nounwind uwtable
+
+; CHECK: vucomisd
+define void @render() nounwind uwtable {
+entry:
+ br i1 undef, label %for.cond5, label %for.end52
+
+for.cond5:
+ %or.cond = and i1 undef, false
+ br i1 %or.cond, label %for.body33, label %for.cond5
+
+for.cond30:
+ br i1 false, label %for.body33, label %for.cond5
+
+for.body33:
+ %tobool = fcmp une double undef, 0.000000e+00
+ br i1 %tobool, label %if.then, label %for.cond30
+
+if.then:
+ call void @scale()
+ br label %for.cond30
+
+for.end52:
+ ret void
+}
+
+; CHECK: vextractf128 $1
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vpcmpgtd %xmm
+; CHECK-NEXT: vpcmpgtd %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <8 x i32> @int256-cmp(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
+ %bincmp = icmp slt <8 x i32> %i, %j
+ %x = sext <8 x i1> %bincmp to <8 x i32>
+ ret <8 x i32> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vpcmpgtq %xmm
+; CHECK-NEXT: vpcmpgtq %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <4 x i64> @v4i64-cmp(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+ %bincmp = icmp slt <4 x i64> %i, %j
+ %x = sext <4 x i1> %bincmp to <4 x i64>
+ ret <4 x i64> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vpcmpgtw %xmm
+; CHECK-NEXT: vpcmpgtw %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <16 x i16> @v16i16-cmp(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
+ %bincmp = icmp slt <16 x i16> %i, %j
+ %x = sext <16 x i1> %bincmp to <16 x i16>
+ ret <16 x i16> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vpcmpgtb %xmm
+; CHECK-NEXT: vpcmpgtb %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <32 x i8> @v32i8-cmp(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
+ %bincmp = icmp slt <32 x i8> %i, %j
+ %x = sext <32 x i1> %bincmp to <32 x i8>
+ ret <32 x i8> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vpcmpeqd %xmm
+; CHECK-NEXT: vpcmpeqd %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <8 x i32> @int256-cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
+ %bincmp = icmp eq <8 x i32> %i, %j
+ %x = sext <8 x i1> %bincmp to <8 x i32>
+ ret <8 x i32> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vpcmpeqq %xmm
+; CHECK-NEXT: vpcmpeqq %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <4 x i64> @v4i64-cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+ %bincmp = icmp eq <4 x i64> %i, %j
+ %x = sext <4 x i1> %bincmp to <4 x i64>
+ ret <4 x i64> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vpcmpeqw %xmm
+; CHECK-NEXT: vpcmpeqw %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <16 x i16> @v16i16-cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
+ %bincmp = icmp eq <16 x i16> %i, %j
+ %x = sext <16 x i1> %bincmp to <16 x i16>
+ ret <16 x i16> %x
+}
+
+; CHECK: vextractf128 $1
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vpcmpeqb %xmm
+; CHECK-NEXT: vpcmpeqb %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <32 x i8> @v32i8-cmpeq(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
+ %bincmp = icmp eq <32 x i8> %i, %j
+ %x = sext <32 x i1> %bincmp to <32 x i8>
+ ret <32 x i8> %x
+}
+
+;; Scalar comparison
+
+; CHECK: scalarcmpA
+; CHECK: vcmpeqsd
+define i32 @scalarcmpA() uwtable ssp {
+ %cmp29 = fcmp oeq double undef, 0.000000e+00
+ %res = zext i1 %cmp29 to i32
+ ret i32 %res
+}
+
+; CHECK: scalarcmpB
+; CHECK: vcmpeqss
+define i32 @scalarcmpB() uwtable ssp {
+ %cmp29 = fcmp oeq float undef, 0.000000e+00
+ %res = zext i1 %cmp29 to i32
+ ret i32 %res
+}
+
diff --git a/test/CodeGen/X86/avx-128.ll b/test/CodeGen/X86/avx-cvt.ll
index 57a3826..6c0bd58 100644
--- a/test/CodeGen/X86/avx-128.ll
+++ b/test/CodeGen/X86/avx-cvt.ll
@@ -1,24 +1,41 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
-@z = common global <4 x float> zeroinitializer, align 16
+; CHECK: vcvtdq2ps %ymm
+define <8 x float> @sitofp00(<8 x i32> %a) nounwind {
+ %b = sitofp <8 x i32> %a to <8 x float>
+ ret <8 x float> %b
+}
-define void @zero() nounwind ssp {
-entry:
- ; CHECK: vxorps
- ; CHECK: vmovaps
- store <4 x float> zeroinitializer, <4 x float>* @z, align 16
- ret void
+; CHECK: vcvttps2dq %ymm
+define <8 x i32> @fptosi00(<8 x float> %a) nounwind {
+ %b = fptosi <8 x float> %a to <8 x i32>
+ ret <8 x i32> %b
}
-define void @fpext() nounwind uwtable {
-entry:
- %f = alloca float, align 4
- %d = alloca double, align 8
- %tmp = load float* %f, align 4
- ; CHECK: vcvtss2sd
- %conv = fpext float %tmp to double
- store double %conv, double* %d, align 8
- ret void
+; CHECK: vcvtdq2pd %xmm
+define <4 x double> @sitofp01(<4 x i32> %a) {
+ %b = sitofp <4 x i32> %a to <4 x double>
+ ret <4 x double> %b
+}
+
+; CHECK: vcvtpd2dqy %ymm
+define <4 x i32> @fptosi01(<4 x double> %a) {
+ %b = fptosi <4 x double> %a to <4 x i32>
+ ret <4 x i32> %b
+}
+
+; CHECK: vcvtpd2psy %ymm
+; CHECK-NEXT: vcvtpd2psy %ymm
+; CHECK-NEXT: vinsertf128 $1
+define <8 x float> @fptrunc00(<8 x double> %b) nounwind {
+ %a = fptrunc <8 x double> %b to <8 x float>
+ ret <8 x float> %a
+}
+
+; CHECK: vcvtps2pd %xmm
+define <4 x double> @fpext00(<4 x float> %b) nounwind {
+ %a = fpext <4 x float> %b to <4 x double>
+ ret <4 x double> %a
}
; CHECK: vcvtsi2sdq (%
@@ -52,3 +69,15 @@ entry:
%conv = sitofp i64 %tmp1 to float
ret float %conv
}
+
+; CHECK: vcvtss2sd
+define void @fpext() nounwind uwtable {
+entry:
+ %f = alloca float, align 4
+ %d = alloca double, align 8
+ %tmp = load float* %f, align 4
+ %conv = fpext float %tmp to double
+ store double %conv, double* %d, align 8
+ ret void
+}
+
diff --git a/test/CodeGen/X86/avx-load-store.ll b/test/CodeGen/X86/avx-load-store.ll
index 5196089..07a63ef 100644
--- a/test/CodeGen/X86/avx-load-store.ll
+++ b/test/CodeGen/X86/avx-load-store.ll
@@ -1,9 +1,10 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+; RUN: llc -O0 < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s -check-prefix=CHECK_O0
; CHECK: vmovaps
; CHECK: vmovaps
-; CHECK: vmovapd
-; CHECK: vmovapd
+; CHECK: vmovaps
+; CHECK: vmovaps
; CHECK: vmovaps
; CHECK: vmovaps
define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind uwtable ssp {
@@ -22,3 +23,83 @@ entry:
declare void @dummy(<4 x double>, <8 x float>, <4 x i64>)
+;;
+;; The two tests below check that we must fold load + scalar_to_vector
+;; + ins_subvec+ zext into only a single vmovss or vmovsd
+
+; CHECK: vmovss (%
+define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
+ %val = load float* %ptr
+ %i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0
+ ret <8 x float> %i0
+}
+
+; CHECK: vmovsd (%
+define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind {
+ %val = load double* %ptr
+ %i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0
+ ret <4 x double> %i0
+}
+
+; CHECK: vmovaps %ymm
+define void @storev16i16(<16 x i16> %a) nounwind {
+ store <16 x i16> %a, <16 x i16>* undef, align 32
+ unreachable
+}
+
+; CHECK: vmovups %ymm
+define void @storev16i16_01(<16 x i16> %a) nounwind {
+ store <16 x i16> %a, <16 x i16>* undef, align 4
+ unreachable
+}
+
+; CHECK: vmovaps %ymm
+define void @storev32i8(<32 x i8> %a) nounwind {
+ store <32 x i8> %a, <32 x i8>* undef, align 32
+ unreachable
+}
+
+; CHECK: vmovups %ymm
+define void @storev32i8_01(<32 x i8> %a) nounwind {
+ store <32 x i8> %a, <32 x i8>* undef, align 4
+ unreachable
+}
+
+; It is faster to make two saves, if the data is already in XMM registers. For
+; example, after making an integer operation.
+; CHECK: _double_save
+; CHECK-NOT: vinsertf128 $1
+; CHECK-NOT: vinsertf128 $0
+; CHECK: vmovaps %xmm
+; CHECK: vmovaps %xmm
+define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp {
+entry:
+ %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ store <8 x i32> %Z, <8 x i32>* %P, align 16
+ ret void
+}
+
+declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x float>, <8 x float>) nounwind
+
+; CHECK_O0: _f_f
+; CHECK-O0: vmovss LCPI
+; CHECK-O0: vxorps %xmm
+; CHECK-O0: vmovss %xmm
+define void @f_f() nounwind {
+allocas:
+ br i1 undef, label %cif_mask_all, label %cif_mask_mixed
+
+cif_mask_all: ; preds = %allocas
+ unreachable
+
+cif_mask_mixed: ; preds = %allocas
+ br i1 undef, label %cif_mixed_test_all, label %cif_mixed_test_any_check
+
+cif_mixed_test_all: ; preds = %cif_mask_mixed
+ call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x float> <float 0xFFFFFFFFE0000000, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, <8 x float> undef) nounwind
+ unreachable
+
+cif_mixed_test_any_check: ; preds = %cif_mask_mixed
+ unreachable
+}
+
diff --git a/test/CodeGen/X86/avx-256-logic.ll b/test/CodeGen/X86/avx-logic.ll
index d9e5d08..518c09c 100644
--- a/test/CodeGen/X86/avx-256-logic.ll
+++ b/test/CodeGen/X86/avx-logic.ll
@@ -159,3 +159,21 @@ entry:
%2 = bitcast <8 x i32> %and.i to <8 x float>
ret <8 x float> %2
}
+
+;;; Test that basic 2 x i64 logic use the integer version on AVX
+
+; CHECK: vpandn %xmm
+define <2 x i64> @vpandn(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+ %y = xor <2 x i64> %a, <i64 -1, i64 -1>
+ %x = and <2 x i64> %a, %y
+ ret <2 x i64> %x
+}
+
+; CHECK: vpand %xmm
+define <2 x i64> @vpand(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+ %x = and <2 x i64> %a, %b
+ ret <2 x i64> %x
+}
+
diff --git a/test/CodeGen/X86/avx-minmax.ll b/test/CodeGen/X86/avx-minmax.ll
new file mode 100644
index 0000000..f36ba7b
--- /dev/null
+++ b/test/CodeGen/X86/avx-minmax.ll
@@ -0,0 +1,65 @@
+; RUN: llc < %s -march=x86-64 -mattr=+avx -asm-verbose=false -join-physregs -enable-unsafe-fp-math -enable-no-nans-fp-math -promote-elements | FileCheck -check-prefix=UNSAFE %s
+
+; UNSAFE: maxpd:
+; UNSAFE: vmaxpd {{.+}}, %xmm
+define <2 x double> @maxpd(<2 x double> %x, <2 x double> %y) {
+ %max_is_x = fcmp oge <2 x double> %x, %y
+ %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %max
+}
+
+; UNSAFE: minpd:
+; UNSAFE: vminpd {{.+}}, %xmm
+define <2 x double> @minpd(<2 x double> %x, <2 x double> %y) {
+ %min_is_x = fcmp ole <2 x double> %x, %y
+ %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %min
+}
+
+; UNSAFE: maxps:
+; UNSAFE: vmaxps {{.+}}, %xmm
+define <4 x float> @maxps(<4 x float> %x, <4 x float> %y) {
+ %max_is_x = fcmp oge <4 x float> %x, %y
+ %max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %max
+}
+
+; UNSAFE: minps:
+; UNSAFE: vminps {{.+}}, %xmm
+define <4 x float> @minps(<4 x float> %x, <4 x float> %y) {
+ %min_is_x = fcmp ole <4 x float> %x, %y
+ %min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %min
+}
+
+; UNSAFE: vmaxpd:
+; UNSAFE: vmaxpd %ymm
+define <4 x double> @vmaxpd(<4 x double> %x, <4 x double> %y) {
+ %max_is_x = fcmp oge <4 x double> %x, %y
+ %max = select <4 x i1> %max_is_x, <4 x double> %x, <4 x double> %y
+ ret <4 x double> %max
+}
+
+; UNSAFE: vminpd:
+; UNSAFE: vminpd %ymm
+define <4 x double> @vminpd(<4 x double> %x, <4 x double> %y) {
+ %min_is_x = fcmp ole <4 x double> %x, %y
+ %min = select <4 x i1> %min_is_x, <4 x double> %x, <4 x double> %y
+ ret <4 x double> %min
+}
+
+; UNSAFE: vmaxps:
+; UNSAFE: vmaxps %ymm
+define <8 x float> @vmaxps(<8 x float> %x, <8 x float> %y) {
+ %max_is_x = fcmp oge <8 x float> %x, %y
+ %max = select <8 x i1> %max_is_x, <8 x float> %x, <8 x float> %y
+ ret <8 x float> %max
+}
+
+; UNSAFE: vminps:
+; UNSAFE: vminps %ymm
+define <8 x float> @vminps(<8 x float> %x, <8 x float> %y) {
+ %min_is_x = fcmp ole <8 x float> %x, %y
+ %min = select <8 x i1> %min_is_x, <8 x float> %x, <8 x float> %y
+ ret <8 x float> %min
+}
diff --git a/test/CodeGen/X86/avx-movdup.ll b/test/CodeGen/X86/avx-movdup.ll
new file mode 100644
index 0000000..42d84de
--- /dev/null
+++ b/test/CodeGen/X86/avx-movdup.ll
@@ -0,0 +1,34 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+; CHECK: vmovsldup
+define <8 x float> @movdupA(<8 x float> %src) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <8 x float> %src, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
+ ret <8 x float> %shuffle.i
+}
+
+; CHECK: vmovshdup
+define <8 x float> @movdupB(<8 x float> %src) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <8 x float> %src, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
+ ret <8 x float> %shuffle.i
+}
+
+; CHECK: vmovsldup
+define <4 x i64> @movdupC(<4 x i64> %src) nounwind uwtable readnone ssp {
+entry:
+ %0 = bitcast <4 x i64> %src to <8 x float>
+ %shuffle.i = shufflevector <8 x float> %0, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
+ %1 = bitcast <8 x float> %shuffle.i to <4 x i64>
+ ret <4 x i64> %1
+}
+
+; CHECK: vmovshdup
+define <4 x i64> @movdupD(<4 x i64> %src) nounwind uwtable readnone ssp {
+entry:
+ %0 = bitcast <4 x i64> %src to <8 x float>
+ %shuffle.i = shufflevector <8 x float> %0, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
+ %1 = bitcast <8 x float> %shuffle.i to <4 x i64>
+ ret <4 x i64> %1
+}
+
diff --git a/test/CodeGen/X86/avx-select.ll b/test/CodeGen/X86/avx-select.ll
new file mode 100644
index 0000000..58a75ef
--- /dev/null
+++ b/test/CodeGen/X86/avx-select.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+; CHECK: _select00
+; CHECK: vmovaps
+; CHECK-NEXT: LBB
+define <8 x i32> @select00(i32 %a, <8 x i32> %b) nounwind {
+ %cmpres = icmp eq i32 %a, 255
+ %selres = select i1 %cmpres, <8 x i32> zeroinitializer, <8 x i32> %b
+ %res = xor <8 x i32> %b, %selres
+ ret <8 x i32> %res
+}
+
+; CHECK: _select01
+; CHECK: vmovaps
+; CHECK-NEXT: LBB
+define <4 x i64> @select01(i32 %a, <4 x i64> %b) nounwind {
+ %cmpres = icmp eq i32 %a, 255
+ %selres = select i1 %cmpres, <4 x i64> zeroinitializer, <4 x i64> %b
+ %res = xor <4 x i64> %b, %selres
+ ret <4 x i64> %res
+}
+
diff --git a/test/CodeGen/X86/avx-shift.ll b/test/CodeGen/X86/avx-shift.ll
new file mode 100644
index 0000000..3ea39a2
--- /dev/null
+++ b/test/CodeGen/X86/avx-shift.ll
@@ -0,0 +1,75 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+;;; Shift left
+; CHECK: vpslld
+; CHECK: vpslld
+define <8 x i32> @vshift00(<8 x i32> %a) nounwind readnone {
+ %s = shl <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32
+2>
+ ret <8 x i32> %s
+}
+
+; CHECK: vpsllw
+; CHECK: vpsllw
+define <16 x i16> @vshift01(<16 x i16> %a) nounwind readnone {
+ %s = shl <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
+ ret <16 x i16> %s
+}
+
+; CHECK: vpsllq
+; CHECK: vpsllq
+define <4 x i64> @vshift02(<4 x i64> %a) nounwind readnone {
+ %s = shl <4 x i64> %a, <i64 2, i64 2, i64 2, i64 2>
+ ret <4 x i64> %s
+}
+
+;;; Logical Shift right
+; CHECK: vpsrld
+; CHECK: vpsrld
+define <8 x i32> @vshift03(<8 x i32> %a) nounwind readnone {
+ %s = lshr <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32
+2>
+ ret <8 x i32> %s
+}
+
+; CHECK: vpsrlw
+; CHECK: vpsrlw
+define <16 x i16> @vshift04(<16 x i16> %a) nounwind readnone {
+ %s = lshr <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
+ ret <16 x i16> %s
+}
+
+; CHECK: vpsrlq
+; CHECK: vpsrlq
+define <4 x i64> @vshift05(<4 x i64> %a) nounwind readnone {
+ %s = lshr <4 x i64> %a, <i64 2, i64 2, i64 2, i64 2>
+ ret <4 x i64> %s
+}
+
+;;; Arithmetic Shift right
+; CHECK: vpsrad
+; CHECK: vpsrad
+define <8 x i32> @vshift06(<8 x i32> %a) nounwind readnone {
+ %s = ashr <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32
+2>
+ ret <8 x i32> %s
+}
+
+; CHECK: vpsraw
+; CHECK: vpsraw
+define <16 x i16> @vshift07(<16 x i16> %a) nounwind readnone {
+ %s = ashr <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
+ ret <16 x i16> %s
+}
+
+;;; Support variable shifts
+; CHECK: _vshift08
+; CHECK: vextractf128 $1
+; CHECK: vpslld $23
+; CHECK: vextractf128 $1
+; CHECK: vpslld $23
+define <8 x i32> @vshift08(<8 x i32> %a) nounwind {
+ %bitop = shl <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %a
+ ret <8 x i32> %bitop
+}
+
diff --git a/test/CodeGen/X86/avx-shuffle.ll b/test/CodeGen/X86/avx-shuffle.ll
new file mode 100644
index 0000000..0db334d
--- /dev/null
+++ b/test/CodeGen/X86/avx-shuffle.ll
@@ -0,0 +1,10 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+; PR11102
+define <4 x float> @test1(<4 x float> %a) nounwind {
+ %b = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 2, i32 5, i32 undef, i32 undef>
+ ret <4 x float> %b
+; CHECK: test1:
+; CHECK: vshufps
+; CHECK: vpshufd
+}
diff --git a/test/CodeGen/X86/avx-splat.ll b/test/CodeGen/X86/avx-splat.ll
new file mode 100644
index 0000000..af20b90
--- /dev/null
+++ b/test/CodeGen/X86/avx-splat.ll
@@ -0,0 +1,103 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+
+; CHECK: vpunpcklbw %xmm
+; CHECK-NEXT: vpunpckhbw %xmm
+; CHECK-NEXT: vinsertf128 $1
+; CHECK-NEXT: vpermilps $85
+define <32 x i8> @funcA(<32 x i8> %a) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ ret <32 x i8> %shuffle
+}
+
+; CHECK: vpunpckhwd %xmm
+; CHECK-NEXT: vinsertf128 $1
+; CHECK-NEXT: vpermilps $85
+define <16 x i16> @funcB(<16 x i16> %a) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ ret <16 x i16> %shuffle
+}
+
+; CHECK: vmovd
+; CHECK-NEXT: vmovlhps %xmm
+; CHECK-NEXT: vinsertf128 $1
+define <4 x i64> @funcC(i64 %q) nounwind uwtable readnone ssp {
+entry:
+ %vecinit.i = insertelement <4 x i64> undef, i64 %q, i32 0
+ %vecinit2.i = insertelement <4 x i64> %vecinit.i, i64 %q, i32 1
+ %vecinit4.i = insertelement <4 x i64> %vecinit2.i, i64 %q, i32 2
+ %vecinit6.i = insertelement <4 x i64> %vecinit4.i, i64 %q, i32 3
+ ret <4 x i64> %vecinit6.i
+}
+
+; CHECK: vshufpd $0
+; CHECK-NEXT: vinsertf128 $1
+define <4 x double> @funcD(double %q) nounwind uwtable readnone ssp {
+entry:
+ %vecinit.i = insertelement <4 x double> undef, double %q, i32 0
+ %vecinit2.i = insertelement <4 x double> %vecinit.i, double %q, i32 1
+ %vecinit4.i = insertelement <4 x double> %vecinit2.i, double %q, i32 2
+ %vecinit6.i = insertelement <4 x double> %vecinit4.i, double %q, i32 3
+ ret <4 x double> %vecinit6.i
+}
+
+; Test this simple opt:
+; shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
+; To:
+; shuffle (vload ptr)), undef, <1, 1, 1, 1>
+; CHECK: vmovdqa
+; CHECK-NEXT: vinsertf128 $1
+; CHECK-NEXT: vpermilps $-1
+define <8 x float> @funcE() nounwind {
+allocas:
+ %udx495 = alloca [18 x [18 x float]], align 32
+ br label %for_test505.preheader
+
+for_test505.preheader: ; preds = %for_test505.preheader, %allocas
+ br i1 undef, label %for_exit499, label %for_test505.preheader
+
+for_exit499: ; preds = %for_test505.preheader
+ br i1 undef, label %__load_and_broadcast_32.exit1249, label %load.i1247
+
+load.i1247: ; preds = %for_exit499
+ %ptr1227 = getelementptr [18 x [18 x float]]* %udx495, i64 0, i64 1, i64 1
+ %ptr.i1237 = bitcast float* %ptr1227 to i32*
+ %val.i1238 = load i32* %ptr.i1237, align 4
+ %ret6.i1245 = insertelement <8 x i32> undef, i32 %val.i1238, i32 6
+ %ret7.i1246 = insertelement <8 x i32> %ret6.i1245, i32 %val.i1238, i32 7
+ %phitmp = bitcast <8 x i32> %ret7.i1246 to <8 x float>
+ br label %__load_and_broadcast_32.exit1249
+
+__load_and_broadcast_32.exit1249: ; preds = %load.i1247, %for_exit499
+ %load_broadcast12281250 = phi <8 x float> [ %phitmp, %load.i1247 ], [ undef, %for_exit499 ]
+ ret <8 x float> %load_broadcast12281250
+}
+
+; CHECK: vinsertf128 $1
+; CHECK-NEXT: vpermilps $0
+define <8 x float> @funcF(i32 %val) nounwind {
+ %ret6 = insertelement <8 x i32> undef, i32 %val, i32 6
+ %ret7 = insertelement <8 x i32> %ret6, i32 %val, i32 7
+ %tmp = bitcast <8 x i32> %ret7 to <8 x float>
+ ret <8 x float> %tmp
+}
+
+; CHECK: vinsertf128 $1
+; CHECK-NEXT: vpermilps $0
+define <8 x float> @funcG(<8 x float> %a) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <8 x float> %shuffle
+}
+
+; CHECK: vextractf128 $1
+; CHECK-NEXT: vinsertf128 $1
+; CHECK-NEXT: vpermilps $85
+define <8 x float> @funcH(<8 x float> %a) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ ret <8 x float> %shuffle
+}
+
diff --git a/test/CodeGen/X86/avx-unpack.ll b/test/CodeGen/X86/avx-unpack.ll
new file mode 100644
index 0000000..d420101
--- /dev/null
+++ b/test/CodeGen/X86/avx-unpack.ll
@@ -0,0 +1,89 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+; CHECK: vunpckhps
+define <8 x float> @unpackhips(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x float> %shuffle.i
+}
+
+; CHECK: vunpckhpd
+define <4 x double> @unpackhipd(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x double> %shuffle.i
+}
+
+; CHECK: vunpcklps
+define <8 x float> @unpacklops(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
+ ret <8 x float> %shuffle.i
+}
+
+; CHECK: vunpcklpd
+define <4 x double> @unpacklopd(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x double> %shuffle.i
+}
+
+; CHECK-NOT: vunpcklps %ymm
+define <8 x float> @unpacklops-not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x float> %shuffle.i
+}
+
+; CHECK-NOT: vunpcklpd %ymm
+define <4 x double> @unpacklopd-not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x double> %shuffle.i
+}
+
+; CHECK-NOT: vunpckhps %ymm
+define <8 x float> @unpackhips-not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13>
+ ret <8 x float> %shuffle.i
+}
+
+; CHECK-NOT: vunpckhpd %ymm
+define <4 x double> @unpackhipd-not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x double> %shuffle.i
+}
+
+;;;;
+;;;; Unpack versions using the fp unit for int unpacking
+;;;;
+
+; CHECK: vunpckhps
+define <8 x i32> @unpackhips1(<8 x i32> %src1, <8 x i32> %src2) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <8 x i32> %src1, <8 x i32> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i32> %shuffle.i
+}
+
+; CHECK: vunpckhpd
+define <4 x i64> @unpackhipd1(<4 x i64> %src1, <4 x i64> %src2) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <4 x i64> %src1, <4 x i64> %src2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i64> %shuffle.i
+}
+
+; CHECK: vunpcklps
+define <8 x i32> @unpacklops1(<8 x i32> %src1, <8 x i32> %src2) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <8 x i32> %src1, <8 x i32> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
+ ret <8 x i32> %shuffle.i
+}
+
+; CHECK: vunpcklpd
+define <4 x i64> @unpacklopd1(<4 x i64> %src1, <4 x i64> %src2) nounwind uwtable readnone ssp {
+entry:
+ %shuffle.i = shufflevector <4 x i64> %src1, <4 x i64> %src2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i64> %shuffle.i
+}
diff --git a/test/CodeGen/X86/avx-vbroadcast.ll b/test/CodeGen/X86/avx-vbroadcast.ll
new file mode 100644
index 0000000..89b4188
--- /dev/null
+++ b/test/CodeGen/X86/avx-vbroadcast.ll
@@ -0,0 +1,94 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+; XFAIL: *
+
+; xfail this file for now because of PR8156, when it gets solved merge this with avx-splat.ll
+
+; CHECK: vbroadcastsd (%
+define <4 x i64> @A(i64* %ptr) nounwind uwtable readnone ssp {
+entry:
+ %q = load i64* %ptr, align 8
+ %vecinit.i = insertelement <4 x i64> undef, i64 %q, i32 0
+ %vecinit2.i = insertelement <4 x i64> %vecinit.i, i64 %q, i32 1
+ %vecinit4.i = insertelement <4 x i64> %vecinit2.i, i64 %q, i32 2
+ %vecinit6.i = insertelement <4 x i64> %vecinit4.i, i64 %q, i32 3
+ ret <4 x i64> %vecinit6.i
+}
+
+; CHECK: vbroadcastss (%
+define <8 x i32> @B(i32* %ptr) nounwind uwtable readnone ssp {
+entry:
+ %q = load i32* %ptr, align 4
+ %vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
+ %vecinit2.i = insertelement <8 x i32> %vecinit.i, i32 %q, i32 1
+ %vecinit4.i = insertelement <8 x i32> %vecinit2.i, i32 %q, i32 2
+ %vecinit6.i = insertelement <8 x i32> %vecinit4.i, i32 %q, i32 3
+ ret <8 x i32> %vecinit6.i
+}
+
+; CHECK: vbroadcastsd (%
+define <4 x double> @C(double* %ptr) nounwind uwtable readnone ssp {
+entry:
+ %q = load double* %ptr, align 8
+ %vecinit.i = insertelement <4 x double> undef, double %q, i32 0
+ %vecinit2.i = insertelement <4 x double> %vecinit.i, double %q, i32 1
+ %vecinit4.i = insertelement <4 x double> %vecinit2.i, double %q, i32 2
+ %vecinit6.i = insertelement <4 x double> %vecinit4.i, double %q, i32 3
+ ret <4 x double> %vecinit6.i
+}
+
+; CHECK: vbroadcastss (%
+define <8 x float> @D(float* %ptr) nounwind uwtable readnone ssp {
+entry:
+ %q = load float* %ptr, align 4
+ %vecinit.i = insertelement <8 x float> undef, float %q, i32 0
+ %vecinit2.i = insertelement <8 x float> %vecinit.i, float %q, i32 1
+ %vecinit4.i = insertelement <8 x float> %vecinit2.i, float %q, i32 2
+ %vecinit6.i = insertelement <8 x float> %vecinit4.i, float %q, i32 3
+ ret <8 x float> %vecinit6.i
+}
+
+;;;; 128-bit versions
+
+; CHECK: vbroadcastss (%
+define <4 x float> @E(float* %ptr) nounwind uwtable readnone ssp {
+entry:
+ %q = load float* %ptr, align 4
+ %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
+ %vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
+ %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
+ %vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
+ ret <4 x float> %vecinit6.i
+}
+
+; CHECK: vbroadcastss (%
+define <4 x i32> @F(i32* %ptr) nounwind uwtable readnone ssp {
+entry:
+ %q = load i32* %ptr, align 4
+ %vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
+ %vecinit2.i = insertelement <4 x i32> %vecinit.i, i32 %q, i32 1
+ %vecinit4.i = insertelement <4 x i32> %vecinit2.i, i32 %q, i32 2
+ %vecinit6.i = insertelement <4 x i32> %vecinit4.i, i32 %q, i32 3
+ ret <4 x i32> %vecinit6.i
+}
+
+; Unsupported vbroadcasts
+
+; CHECK: _G
+; CHECK-NOT: vbroadcastsd (%
+; CHECK: ret
+define <2 x i64> @G(i64* %ptr) nounwind uwtable readnone ssp {
+entry:
+ %q = load i64* %ptr, align 8
+ %vecinit.i = insertelement <2 x i64> undef, i64 %q, i32 0
+ %vecinit2.i = insertelement <2 x i64> %vecinit.i, i64 %q, i32 1
+ ret <2 x i64> %vecinit2.i
+}
+
+; CHECK: _H
+; CHECK-NOT: vbroadcastss
+; CHECK: ret
+define <4 x i32> @H(<4 x i32> %a) {
+ %x = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ ret <4 x i32> %x
+}
+
diff --git a/test/CodeGen/X86/avx-vextractf128.ll b/test/CodeGen/X86/avx-vextractf128.ll
new file mode 100644
index 0000000..dccf901
--- /dev/null
+++ b/test/CodeGen/X86/avx-vextractf128.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+; CHECK-NOT: vunpck
+; CHECK: vextractf128 $1
+define <8 x float> @A(<8 x float> %a) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 8, i32 8, i32 8>
+ ret <8 x float> %shuffle
+}
+
+; CHECK-NOT: vunpck
+; CHECK: vextractf128 $1
+define <4 x double> @B(<4 x double> %a) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 2, i32 3, i32 4, i32 4>
+ ret <4 x double> %shuffle
+}
+
diff --git a/test/CodeGen/X86/avx-vinsertf128.ll b/test/CodeGen/X86/avx-vinsertf128.ll
new file mode 100644
index 0000000..cda1331
--- /dev/null
+++ b/test/CodeGen/X86/avx-vinsertf128.ll
@@ -0,0 +1,58 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck -check-prefix=CHECK-SSE %s
+
+; CHECK-NOT: vunpck
+; CHECK: vinsertf128 $1
+define <8 x float> @A(<8 x float> %a) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 8, i32 8, i32 8, i32 8, i32 0, i32 1, i32 2, i32 3>
+ ret <8 x float> %shuffle
+}
+
+; CHECK-NOT: vunpck
+; CHECK: vinsertf128 $1
+define <4 x double> @B(<4 x double> %a) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 4, i32 4, i32 0, i32 1>
+ ret <4 x double> %shuffle
+}
+
+declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone
+
+declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone
+
+; Just check that no crash happens
+; CHECK-SSE: _insert_crash
+define void @insert_crash() nounwind {
+allocas:
+ %v1.i.i451 = shufflevector <4 x double> zeroinitializer, <4 x double> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %ret_0a.i.i.i452 = shufflevector <4 x double> %v1.i.i451, <4 x double> undef, <2 x i32> <i32 0, i32 1>
+ %vret_0.i.i.i454 = tail call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %ret_0a.i.i.i452, <2 x double> undef) nounwind
+ %ret_val.i.i.i463 = tail call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %vret_0.i.i.i454, <2 x double> undef) nounwind
+ %ret.i1.i.i464 = extractelement <2 x double> %ret_val.i.i.i463, i32 0
+ %double2float = fptrunc double %ret.i1.i.i464 to float
+ %smearinsert50 = insertelement <4 x float> undef, float %double2float, i32 3
+ %blendAsInt.i503 = bitcast <4 x float> %smearinsert50 to <4 x i32>
+ store <4 x i32> %blendAsInt.i503, <4 x i32>* undef, align 4
+ ret void
+}
+
+;; DAG Combine must remove useless vinsertf128 instructions
+
+; CHECK: DAGCombineA
+; CHECK-NOT: vinsertf128 $1
+define <4 x i32> @DAGCombineA(<4 x i32> %v1) nounwind readonly {
+ %1 = shufflevector <4 x i32> %v1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %2
+}
+
+; CHECK: DAGCombineB
+; CHECK: vpaddd %xmm
+; CHECK-NOT: vinsertf128 $1
+; CHECK: vpaddd %xmm
+define <8 x i32> @DAGCombineB(<8 x i32> %v1, <8 x i32> %v2) nounwind readonly {
+ %1 = add <8 x i32> %v1, %v2
+ %2 = add <8 x i32> %1, %v1
+ ret <8 x i32> %2
+}
diff --git a/test/CodeGen/X86/avx-vmovddup.ll b/test/CodeGen/X86/avx-vmovddup.ll
new file mode 100644
index 0000000..1c56fe2
--- /dev/null
+++ b/test/CodeGen/X86/avx-vmovddup.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+; CHECK: vmovddup %ymm
+define <4 x i64> @A(<4 x i64> %a) {
+ %c = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
+ ret <4 x i64> %c
+}
+
+; CHECK: vmovddup (%
+define <4 x i64> @B(<4 x i64>* %ptr) {
+ %a = load <4 x i64>* %ptr
+ %c = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
+ ret <4 x i64> %c
+}
diff --git a/test/CodeGen/X86/avx-vperm2f128.ll b/test/CodeGen/X86/avx-vperm2f128.ll
new file mode 100644
index 0000000..3550a90
--- /dev/null
+++ b/test/CodeGen/X86/avx-vperm2f128.ll
@@ -0,0 +1,62 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+; CHECK: vperm2f128 $1
+define <8 x float> @A(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
+ ret <8 x float> %shuffle
+}
+
+; CHECK: vperm2f128 $48
+define <8 x float> @B(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x float> %shuffle
+}
+
+; CHECK: vperm2f128 $0
+define <8 x float> @C(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ ret <8 x float> %shuffle
+}
+
+; CHECK: vperm2f128 $17
+define <8 x float> @D(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %shuffle
+}
+
+; CHECK: vperm2f128 $17
+define <32 x i8> @E(<32 x i8> %a, <32 x i8> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ ret <32 x i8> %shuffle
+}
+
+; CHECK: vperm2f128 $33
+define <4 x i64> @E2(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
+ ret <4 x i64> %shuffle
+}
+
+;;;; Cases with undef indicies mixed in the mask
+
+; CHECK: vperm2f128 $33
+define <8 x float> @F(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 undef, i32 undef, i32 6, i32 7, i32 undef, i32 9, i32 undef, i32 11>
+ ret <8 x float> %shuffle
+}
+
+;;;; Cases we must not select vperm2f128
+
+; CHECK: _G
+; CHECK-NOT: vperm2f128
+define <8 x float> @G(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 undef, i32 undef, i32 6, i32 7, i32 undef, i32 12, i32 undef, i32 15>
+ ret <8 x float> %shuffle
+}
diff --git a/test/CodeGen/X86/avx-vpermil.ll b/test/CodeGen/X86/avx-vpermil.ll
new file mode 100644
index 0000000..49b2f54
--- /dev/null
+++ b/test/CodeGen/X86/avx-vpermil.ll
@@ -0,0 +1,45 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+; CHECK: vpermilps
+define <8 x float> @funcA(<8 x float> %a) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 1, i32 5, i32 6, i32 7, i32 5>
+ ret <8 x float> %shuffle
+}
+
+; CHECK: vpermilpd
+define <4 x double> @funcB(<4 x double> %a) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 3>
+ ret <4 x double> %shuffle
+}
+
+; CHECK: vpermilps
+define <8 x i32> @funcC(<8 x i32> %a) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 1, i32 5, i32 6, i32 7, i32 5>
+ ret <8 x i32> %shuffle
+}
+
+; CHECK: vpermilpd
+define <4 x i64> @funcD(<4 x i64> %a) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 3>
+ ret <4 x i64> %shuffle
+}
+
+; vpermil should match masks like this: <u,3,1,2,4,u,5,6>. Check that the
+; target specific mask was correctly generated.
+; CHECK: vpermilps $-100
+define <8 x float> @funcE(<8 x float> %a) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 8, i32 3, i32 1, i32 2, i32 4, i32 8, i32 5, i32 6>
+ ret <8 x float> %shuffle
+}
+
+; CHECK-NOT: vpermilps
+define <8 x float> @funcF(<8 x float> %a) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x float> %a, <8 x float> zeroinitializer, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
+ ret <8 x float> %shuffle
+}
diff --git a/test/CodeGen/X86/avx-vshufp.ll b/test/CodeGen/X86/avx-vshufp.ll
new file mode 100644
index 0000000..f06548d
--- /dev/null
+++ b/test/CodeGen/X86/avx-vshufp.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+; CHECK: vshufps $-53, %ymm
+define <8 x float> @A(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 3, i32 2, i32 8, i32 11, i32 7, i32 6, i32 12, i32 15>
+ ret <8 x float> %shuffle
+}
+
+; CHECK: vshufpd $10, %ymm
+define <4 x double> @B(<4 x double> %a, <4 x double> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %shuffle
+}
+
+; CHECK: vshufps $-53, %ymm
+define <8 x float> @C(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 3, i32 undef, i32 undef, i32 11, i32 undef, i32 6, i32 12, i32 undef>
+ ret <8 x float> %shuffle
+}
+
+; CHECK: vshufpd $2, %ymm
+define <4 x double> @D(<4 x double> %a, <4 x double> %b) nounwind uwtable readnone ssp {
+entry:
+ %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 undef>
+ ret <4 x double> %shuffle
+}
diff --git a/test/CodeGen/X86/avx-vzeroupper.ll b/test/CodeGen/X86/avx-vzeroupper.ll
new file mode 100644
index 0000000..eaf236c
--- /dev/null
+++ b/test/CodeGen/X86/avx-vzeroupper.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -x86-use-vzeroupper -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+define <4 x float> @do_sse_local(<4 x float> %a) nounwind uwtable readnone ssp {
+entry:
+ %add.i = fadd <4 x float> %a, %a
+ ret <4 x float> %add.i
+}
+
+; CHECK: _test00
+define <4 x float> @test00(<4 x float> %a, <4 x float> %b) nounwind uwtable ssp {
+entry:
+ %add.i = fadd <4 x float> %a, %b
+ ; CHECK: vzeroupper
+ ; CHECK-NEXT: callq _do_sse
+ %call3 = tail call <4 x float> @do_sse(<4 x float> %add.i) nounwind
+ %sub.i = fsub <4 x float> %call3, %add.i
+ ; CHECK-NOT: vzeroupper
+ ; CHECK: callq _do_sse_local
+ %call8 = tail call <4 x float> @do_sse_local(<4 x float> %sub.i)
+ ; CHECK: vzeroupper
+ ; CHECK-NEXT: jmp _do_sse
+ %call10 = tail call <4 x float> @do_sse(<4 x float> %call8) nounwind
+ ret <4 x float> %call10
+}
+
+declare <4 x float> @do_sse(<4 x float>)
diff --git a/test/CodeGen/X86/barrier-sse.ll b/test/CodeGen/X86/barrier-sse.ll
index 6190c36..bbfeea6 100644
--- a/test/CodeGen/X86/barrier-sse.ll
+++ b/test/CodeGen/X86/barrier-sse.ll
@@ -3,19 +3,9 @@
; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep mfence
; RUN: llc < %s -march=x86 -mattr=+sse2 | grep MEMBARRIER
-
-declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1)
-
define void @test() {
- call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 true, i1 false, i1 false, i1 true, i1 false)
-
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 true, i1 false)
- call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 true, i1 false)
-
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true , i1 false)
- call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 false , i1 false)
- ret void
+ fence acquire
+ fence release
+ fence acq_rel
+ ret void
}
diff --git a/test/CodeGen/X86/barrier.ll b/test/CodeGen/X86/barrier.ll
index fad6ef6..4769b39 100644
--- a/test/CodeGen/X86/barrier.ll
+++ b/test/CodeGen/X86/barrier.ll
@@ -1,7 +1,6 @@
; RUN: llc < %s -march=x86 -mattr=-sse2 | grep lock
-declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1)
define void @test() {
- call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 false, i1 false)
+ fence seq_cst
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/X86/bmi.ll b/test/CodeGen/X86/bmi.ll
new file mode 100644
index 0000000..88c09e3
--- /dev/null
+++ b/test/CodeGen/X86/bmi.ll
@@ -0,0 +1,53 @@
+; RUN: llc < %s -march=x86-64 -mattr=+bmi | FileCheck %s
+
+define i32 @t1(i32 %x) nounwind {
+ %tmp = tail call i32 @llvm.cttz.i32( i32 %x )
+ ret i32 %tmp
+; CHECK: t1:
+; CHECK: tzcntl
+}
+
+declare i32 @llvm.cttz.i32(i32) nounwind readnone
+
+define i16 @t2(i16 %x) nounwind {
+ %tmp = tail call i16 @llvm.cttz.i16( i16 %x )
+ ret i16 %tmp
+; CHECK: t2:
+; CHECK: tzcntw
+}
+
+declare i16 @llvm.cttz.i16(i16) nounwind readnone
+
+define i64 @t3(i64 %x) nounwind {
+ %tmp = tail call i64 @llvm.cttz.i64( i64 %x )
+ ret i64 %tmp
+; CHECK: t3:
+; CHECK: tzcntq
+}
+
+declare i64 @llvm.cttz.i64(i64) nounwind readnone
+
+define i8 @t4(i8 %x) nounwind {
+ %tmp = tail call i8 @llvm.cttz.i8( i8 %x )
+ ret i8 %tmp
+; CHECK: t4:
+; CHECK: tzcntw
+}
+
+declare i8 @llvm.cttz.i8(i8) nounwind readnone
+
+define i32 @andn32(i32 %x, i32 %y) nounwind readnone {
+ %tmp1 = xor i32 %x, -1
+ %tmp2 = and i32 %y, %tmp1
+ ret i32 %tmp2
+; CHECK: andn32:
+; CHECK: andnl
+}
+
+define i64 @andn64(i64 %x, i64 %y) nounwind readnone {
+ %tmp1 = xor i64 %x, -1
+ %tmp2 = and i64 %tmp1, %y
+ ret i64 %tmp2
+; CHECK: andn64:
+; CHECK: andnq
+}
diff --git a/test/CodeGen/X86/bswap.ll b/test/CodeGen/X86/bswap.ll
index a7540aa..d2d6f90 100644
--- a/test/CodeGen/X86/bswap.ll
+++ b/test/CodeGen/X86/bswap.ll
@@ -1,6 +1,6 @@
; bswap should be constant folded when it is passed a constant argument
-; RUN: llc < %s -march=x86 | FileCheck %s
+; RUN: llc < %s -march=x86 -mcpu=i686 | FileCheck %s
declare i16 @llvm.bswap.i16(i16)
diff --git a/test/CodeGen/X86/change-compare-stride-0.ll b/test/CodeGen/X86/change-compare-stride-0.ll
index 3a383ee..439f7b0 100644
--- a/test/CodeGen/X86/change-compare-stride-0.ll
+++ b/test/CodeGen/X86/change-compare-stride-0.ll
@@ -1,4 +1,7 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
+; RUN: llc < %s -march=x86 -enable-lsr-nested | FileCheck %s
+;
+; Nested LSR is required to optimize this case.
+; We do not expect to see this form of IR without -enable-iv-rewrite.
define void @borf(i8* nocapture %in, i8* nocapture %out) nounwind {
; CHECK: borf:
diff --git a/test/CodeGen/X86/change-compare-stride-1.ll b/test/CodeGen/X86/change-compare-stride-1.ll
index eee3b79..8b53ae2 100644
--- a/test/CodeGen/X86/change-compare-stride-1.ll
+++ b/test/CodeGen/X86/change-compare-stride-1.ll
@@ -1,4 +1,7 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -enable-lsr-nested | FileCheck %s
+;
+; Nested LSR is required to optimize this case.
+; We do not expect to see this form of IR without -enable-iv-rewrite.
define void @borf(i8* nocapture %in, i8* nocapture %out) nounwind {
; CHECK: borf:
diff --git a/test/CodeGen/X86/cmov.ll b/test/CodeGen/X86/cmov.ll
index 39d9d1e..7a8d6e6 100644
--- a/test/CodeGen/X86/cmov.ll
+++ b/test/CodeGen/X86/cmov.ll
@@ -90,8 +90,8 @@ bb.i.i.i: ; preds = %entry
; CHECK: test4:
; CHECK: g_100
; CHECK: testb
-; CHECK: testb %al, %al
-; CHECK-NEXT: setne %al
+; CHECK-NOT: xor
+; CHECK: setne
; CHECK-NEXT: testb
func_4.exit.i: ; preds = %bb.i.i.i, %entry
diff --git a/test/CodeGen/X86/cmpxchg16b.ll b/test/CodeGen/X86/cmpxchg16b.ll
new file mode 100644
index 0000000..ba1c4ef
--- /dev/null
+++ b/test/CodeGen/X86/cmpxchg16b.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=x86-64 -mcpu=core2 | FileCheck %s
+
+; Basic 128-bit cmpxchg
+define void @t1(i128* nocapture %p) nounwind ssp {
+entry:
+; CHECK movl $1, %ebx
+; CHECK: lock
+; CHECK-NEXT: cmpxchg16b
+ %r = cmpxchg i128* %p, i128 0, i128 1 seq_cst
+ ret void
+}
+
+; FIXME: Handle 128-bit atomicrmw/load atomic/store atomic
diff --git a/test/CodeGen/X86/coalescer-dce.ll b/test/CodeGen/X86/coalescer-dce.ll
new file mode 100644
index 0000000..7f72e3d
--- /dev/null
+++ b/test/CodeGen/X86/coalescer-dce.ll
@@ -0,0 +1,80 @@
+; RUN: llc < %s -disable-fp-elim -disable-machine-dce -verify-coalescing
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-macosx10.7.0"
+
+; This test case has a sub-register join followed by a remat:
+;
+; 256L %vreg2<def> = COPY %vreg7:sub_32bit<kill>; GR32:%vreg2 GR64:%vreg7
+; Considering merging %vreg2 with %vreg7:sub_32bit
+; Cross-class to GR64.
+; RHS = %vreg2 = [256d,272d:0) 0@256d
+; LHS = %vreg7 = [208d,256d:0)[304L,480L:0) 0@208d
+; updated: 272L %vreg0<def> = COPY %vreg7:sub_32bit<kill>; GR32:%vreg0 GR64:%vreg7
+; Joined. Result = %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d
+;
+; 272L %vreg10:sub_32bit<def> = COPY %vreg7:sub_32bit<kill>, %vreg10<imp-def>; GR64:%vreg10,%vreg7
+; Considering merging %vreg7 with %vreg10
+; RHS = %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d
+; LHS = %vreg10 = [16d,64L:2)[64L,160L:1)[192L,240L:1)[272d,304L:3)[304L,352d:1)[352d,400d:0)[400d,400S:4) 0@352d 1@64L-phidef 2@16d-phikill 3@272d-phikill 4@400d
+; Remat: %vreg10<def> = MOV64r0 %vreg10<imp-def>, %EFLAGS<imp-def,dead>, %vreg10<imp-def>; GR64:%vreg10
+; Shrink: %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d
+; live-in at 240L
+; live-in at 416L
+; live-in at 320L
+; live-in at 304L
+; Shrunk: %vreg7 = [208d,256d:0)[304L,480L:0) 0@208d
+;
+; The COPY at 256L is rewritten as a partial def, and that would artificially
+; extend the live range of %vreg7 to end at 256d. When the joined copy is
+; removed, -verify-coalescing complains about the dangling kill.
+;
+; <rdar://problem/9967101>
+
+define void @f1() nounwind uwtable ssp {
+bb:
+ br label %bb1
+
+bb1:
+ %tmp = phi i32 [ 0, %bb ], [ %tmp21, %bb20 ]
+ br label %bb2
+
+bb2:
+ br i1 undef, label %bb5, label %bb8
+
+bb4:
+ br i1 undef, label %bb2, label %bb20
+
+bb5:
+ br i1 undef, label %bb4, label %bb20
+
+bb8:
+ %tmp9 = phi i32 [ %tmp24, %bb23 ], [ 0, %bb2 ]
+ br i1 false, label %bb41, label %bb10
+
+bb10:
+ %tmp11 = sub nsw i32 %tmp9, %tmp
+ br i1 false, label %bb2, label %bb26
+
+bb20:
+ %tmp21 = phi i32 [ undef, %bb4 ], [ undef, %bb5 ], [ %tmp9, %bb27 ], [ undef, %bb32 ]
+ %tmp22 = phi i32 [ undef, %bb4 ], [ undef, %bb5 ], [ %tmp11, %bb27 ], [ undef, %bb32 ]
+ br label %bb1
+
+bb23:
+ %tmp24 = add nsw i32 %tmp9, 1
+ br label %bb8
+
+bb26:
+ br i1 undef, label %bb27, label %bb32
+
+bb27:
+ %tmp28 = zext i32 %tmp11 to i64
+ %tmp30 = icmp eq i64 undef, %tmp28
+ br i1 %tmp30, label %bb20, label %bb27
+
+bb32:
+ br i1 undef, label %bb20, label %bb23
+
+bb41:
+ ret void
+}
diff --git a/test/CodeGen/X86/coalescer-remat.ll b/test/CodeGen/X86/coalescer-remat.ll
index 4db520f..eb7b7a8 100644
--- a/test/CodeGen/X86/coalescer-remat.ll
+++ b/test/CodeGen/X86/coalescer-remat.ll
@@ -1,15 +1,13 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin | grep xor | count 3
-@val = internal global i64 0 ; <i64*> [#uses=1]
-@"\01LC" = internal constant [7 x i8] c"0x%lx\0A\00" ; <[7 x i8]*> [#uses=1]
+@val = internal global i64 0
+@"\01LC" = internal constant [7 x i8] c"0x%lx\0A\00"
define i32 @main() nounwind {
entry:
- %0 = tail call i64 @llvm.atomic.cmp.swap.i64.p0i64(i64* @val, i64 0, i64 1) ; <i64> [#uses=1]
- %1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([7 x i8]* @"\01LC", i32 0, i64 0), i64 %0) nounwind ; <i32> [#uses=0]
- ret i32 0
+ %0 = cmpxchg i64* @val, i64 0, i64 1 monotonic
+ %1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([7 x i8]* @"\01LC", i32 0, i64 0), i64 %0) nounwind
+ ret i32 0
}
-declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64*, i64, i64) nounwind
-
declare i32 @printf(i8*, ...) nounwind
diff --git a/test/CodeGen/X86/code_placement_eh.ll b/test/CodeGen/X86/code_placement_eh.ll
index 172d591..2da3f9f 100644
--- a/test/CodeGen/X86/code_placement_eh.ll
+++ b/test/CodeGen/X86/code_placement_eh.ll
@@ -22,11 +22,13 @@ bb18.i5.i: ; preds = %.noexc6.i.i, %bb51.
to label %.noexc6.i.i unwind label %lpad.i.i ; <float> [#uses=0]
lpad.i.i: ; preds = %bb18.i5.i, %.noexc6.i.i
- %eh_ptr.i.i = call i8* @llvm.eh.exception() ; <i8*> [#uses=1]
+ %lpadval.i.i = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ catch i8* null
unreachable
lpad59.i: ; preds = %bb15
- %eh_ptr60.i = call i8* @llvm.eh.exception() ; <i8*> [#uses=1]
+ %lpadval60.i.i = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ catch i8* null
unreachable
bb15: ; preds = %.noexc3, %invcont5
@@ -34,9 +36,7 @@ bb15: ; preds = %.noexc3, %invcont5
to label %.noexc3 unwind label %lpad59.i
}
-declare i8* @llvm.eh.exception() nounwind readonly
-
-declare i32 @llvm.eh.selector(i8*, i8*, ...) nounwind
+declare i32 @__gxx_personality_v0(...)
declare float @sinf(float) readonly
diff --git a/test/CodeGen/X86/crash-nosse.ll b/test/CodeGen/X86/crash-nosse.ll
new file mode 100644
index 0000000..1cec25b
--- /dev/null
+++ b/test/CodeGen/X86/crash-nosse.ll
@@ -0,0 +1,27 @@
+; RUN: llc < %s -mattr=-sse2,-sse41 -verify-machineinstrs
+target triple = "x86_64-unknown-linux-gnu"
+
+; PR10503
+; This test case produces INSERT_SUBREG 0, <undef> instructions that
+; ProcessImplicitDefs doesn't eliminate.
+define void @autogen_136178_500() {
+BB:
+ %Shuff6 = shufflevector <32 x i32> undef, <32 x i32> undef, <32 x i32> <i32 27, i32 29, i32 31, i32 undef, i32 undef, i32 37, i32 39, i32 41, i32 undef, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 undef, i32 61, i32 63, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 undef, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25>
+ %S17 = select i1 true, <8 x float>* null, <8 x float>* null
+ br label %CF
+
+CF: ; preds = %CF, %BB
+ %L19 = load <8 x float>* %S17
+ %BC = bitcast <32 x i32> %Shuff6 to <32 x float>
+ %S28 = fcmp ord double 0x3ED1A1F787BB2185, 0x3EE59DE55A8DF890
+ br i1 %S28, label %CF, label %CF39
+
+CF39: ; preds = %CF39, %CF
+ store <8 x float> %L19, <8 x float>* %S17
+ %I35 = insertelement <32 x float> %BC, float 0x3EC2489F60000000, i32 9
+ %S38 = fcmp ule double 0x3EE59DE55A8DF890, 0x3EC4AB0CBB986A1A
+ br i1 %S38, label %CF39, label %CF40
+
+CF40: ; preds = %CF39
+ ret void
+}
diff --git a/test/CodeGen/X86/crash.ll b/test/CodeGen/X86/crash.ll
index b5b1ad4..1531457 100644
--- a/test/CodeGen/X86/crash.ll
+++ b/test/CodeGen/X86/crash.ll
@@ -316,3 +316,78 @@ declare void @_ZNSt6vectorIN4llvm11MachineMoveESaIS1_EE13_M_insert_auxEN9__gnu_c
declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+
+; PR10463
+; Spilling a virtual register with <undef> uses.
+define void @autogen_239_1000() {
+BB:
+ %Shuff = shufflevector <8 x double> undef, <8 x double> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 undef, i32 undef>
+ br label %CF
+
+CF:
+ %B16 = frem <8 x double> zeroinitializer, %Shuff
+ %E19 = extractelement <8 x double> %Shuff, i32 5
+ br i1 undef, label %CF, label %CF75
+
+CF75:
+ br i1 undef, label %CF75, label %CF76
+
+CF76:
+ store double %E19, double* undef
+ br i1 undef, label %CF76, label %CF77
+
+CF77:
+ %B55 = fmul <8 x double> %B16, undef
+ br label %CF77
+}
+
+; PR10527
+define void @pr10527() nounwind uwtable {
+entry:
+ br label %"4"
+
+"3":
+ %0 = load <2 x i32>* null, align 8
+ %1 = xor <2 x i32> zeroinitializer, %0
+ %2 = and <2 x i32> %1, %6
+ %3 = or <2 x i32> undef, %2
+ %4 = and <2 x i32> %3, undef
+ store <2 x i32> %4, <2 x i32>* undef
+ %5 = load <2 x i32>* undef, align 1
+ br label %"4"
+
+"4":
+ %6 = phi <2 x i32> [ %5, %"3" ], [ zeroinitializer, %entry ]
+ %7 = icmp ult i32 undef, undef
+ br i1 %7, label %"3", label %"5"
+
+"5":
+ ret void
+}
+
+; PR11078
+;
+; A virtual register used by the "foo" inline asm memory operand gets
+; constrained to GR32_ABCD during coalescing. This makes the inline asm
+; impossible to allocate without splitting the live range and reinflating the
+; register class around the inline asm.
+;
+; The constraint originally comes from the TEST8ri optimization of (icmp (and %t0, 1), 0).
+
+@__force_order = external hidden global i32, align 4
+define void @pr11078(i32* %pgd) nounwind {
+entry:
+ %t0 = load i32* %pgd, align 4
+ %and2 = and i32 %t0, 1
+ %tobool = icmp eq i32 %and2, 0
+ br i1 %tobool, label %if.then, label %if.end
+
+if.then:
+ %t1 = tail call i32 asm sideeffect "bar", "=r,=*m,~{dirflag},~{fpsr},~{flags}"(i32* @__force_order) nounwind
+ br label %if.end
+
+if.end:
+ %t6 = inttoptr i32 %t0 to i64*
+ %t11 = tail call i64 asm sideeffect "foo", "=*m,=A,{bx},{cx},1,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %t6, i32 0, i32 0, i64 0) nounwind
+ ret void
+}
diff --git a/test/CodeGen/X86/dbg-at-specficiation.ll b/test/CodeGen/X86/dbg-at-specficiation.ll
new file mode 100644
index 0000000..aa5e6ef
--- /dev/null
+++ b/test/CodeGen/X86/dbg-at-specficiation.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s | FileCheck %s
+; Radar 10147769
+; Do not unnecessarily use AT_specification DIE.
+; CHECK-NOT: AT_specification
+
+@a = common global [10 x i32] zeroinitializer, align 16
+
+!llvm.dbg.cu = !{!0}
+
+!0 = metadata !{i32 720913, i32 0, i32 12, metadata !"x.c", metadata !"/private/tmp", metadata !"clang version 3.0 (trunk 140253)", i1 true, i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{metadata !2}
+!2 = metadata !{i32 0}
+!3 = metadata !{metadata !4}
+!4 = metadata !{metadata !5}
+!5 = metadata !{i32 720948, i32 0, null, metadata !"a", metadata !"a", metadata !"", metadata !6, i32 1, metadata !7, i32 0, i32 1, [10 x i32]* @a} ; [ DW_TAG_variable ]
+!6 = metadata !{i32 720937, metadata !"x.c", metadata !"/private/tmp", null} ; [ DW_TAG_file_type ]
+!7 = metadata !{i32 720897, null, metadata !"", null, i32 0, i64 320, i64 32, i32 0, i32 0, metadata !8, metadata !9, i32 0, i32 0} ; [ DW_TAG_array_type ]
+!8 = metadata !{i32 720932, null, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
+!9 = metadata !{metadata !10}
+!10 = metadata !{i32 720929, i64 0, i64 9} ; [ DW_TAG_subrange_type ]
diff --git a/test/CodeGen/X86/dbg-inline.ll b/test/CodeGen/X86/dbg-inline.ll
new file mode 100644
index 0000000..523c62e
--- /dev/null
+++ b/test/CodeGen/X86/dbg-inline.ll
@@ -0,0 +1,140 @@
+; RUN: llc < %s | FileCheck %s
+; Radar 7881628, 9747970
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-macosx10.7.0"
+
+%class.APFloat = type { i32 }
+
+define i32 @_ZNK7APFloat9partCountEv(%class.APFloat* nocapture %this) nounwind uwtable readonly optsize ssp align 2 {
+entry:
+ tail call void @llvm.dbg.value(metadata !{%class.APFloat* %this}, i64 0, metadata !28), !dbg !41
+ %prec = getelementptr inbounds %class.APFloat* %this, i64 0, i32 0, !dbg !42
+ %tmp = load i32* %prec, align 4, !dbg !42, !tbaa !44
+ tail call void @llvm.dbg.value(metadata !{i32 %tmp}, i64 0, metadata !47), !dbg !48
+ %add.i = add i32 %tmp, 42, !dbg !49
+ ret i32 %add.i, !dbg !42
+}
+
+define zeroext i1 @_ZNK7APFloat14bitwiseIsEqualERKS_(%class.APFloat* %this, %class.APFloat* %rhs) uwtable optsize ssp align 2 {
+entry:
+ tail call void @llvm.dbg.value(metadata !{%class.APFloat* %this}, i64 0, metadata !29), !dbg !51
+ tail call void @llvm.dbg.value(metadata !{%class.APFloat* %rhs}, i64 0, metadata !30), !dbg !52
+ tail call void @llvm.dbg.value(metadata !{%class.APFloat* %this}, i64 0, metadata !53), !dbg !55
+ %prec.i = getelementptr inbounds %class.APFloat* %this, i64 0, i32 0, !dbg !56
+;CHECK: DW_TAG_inlined_subroutine
+;CHECK: DW_AT_abstract_origin
+;CHECK: DW_AT_ranges
+ %tmp.i = load i32* %prec.i, align 4, !dbg !56, !tbaa !44
+ tail call void @llvm.dbg.value(metadata !{i32 %tmp.i}, i64 0, metadata !57), !dbg !58
+ %add.i.i = add i32 %tmp.i, 42, !dbg !59
+ tail call void @llvm.dbg.value(metadata !{i32 %add.i.i}, i64 0, metadata !31), !dbg !54
+ %call2 = tail call i64* @_ZNK7APFloat16significandPartsEv(%class.APFloat* %this) optsize, !dbg !60
+ tail call void @llvm.dbg.value(metadata !{i64* %call2}, i64 0, metadata !34), !dbg !60
+ %call3 = tail call i64* @_ZNK7APFloat16significandPartsEv(%class.APFloat* %rhs) optsize, !dbg !61
+ tail call void @llvm.dbg.value(metadata !{i64* %call3}, i64 0, metadata !37), !dbg !61
+ %tmp = zext i32 %add.i.i to i64
+ br label %for.cond, !dbg !62
+
+for.cond: ; preds = %for.inc, %entry
+ %indvar = phi i64 [ %indvar.next, %for.inc ], [ 0, %entry ]
+ %tmp13 = sub i64 %tmp, %indvar, !dbg !62
+ %i.0 = trunc i64 %tmp13 to i32, !dbg !62
+ %cmp = icmp sgt i32 %i.0, 0, !dbg !62
+ br i1 %cmp, label %for.body, label %return, !dbg !62
+
+for.body: ; preds = %for.cond
+ %p.0 = getelementptr i64* %call2, i64 %indvar, !dbg !63
+ %tmp6 = load i64* %p.0, align 8, !dbg !63, !tbaa !66
+ %tmp8 = load i64* %call3, align 8, !dbg !63, !tbaa !66
+ %cmp9 = icmp eq i64 %tmp6, %tmp8, !dbg !63
+ br i1 %cmp9, label %for.inc, label %return, !dbg !63
+
+for.inc: ; preds = %for.body
+ %indvar.next = add i64 %indvar, 1, !dbg !67
+ br label %for.cond, !dbg !67
+
+return: ; preds = %for.cond, %for.body
+ %retval.0 = phi i1 [ false, %for.body ], [ true, %for.cond ]
+ ret i1 %retval.0, !dbg !68
+}
+
+declare i64* @_ZNK7APFloat16significandPartsEv(%class.APFloat*) optsize
+
+declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
+
+!llvm.dbg.cu = !{!0}
+!llvm.dbg.sp = !{!1, !7, !12, !23, !24, !25}
+!llvm.dbg.lv._ZNK7APFloat9partCountEv = !{!28}
+!llvm.dbg.lv._ZNK7APFloat14bitwiseIsEqualERKS_ = !{!29, !30, !31, !34, !37}
+!llvm.dbg.lv._ZL16partCountForBitsj = !{!38}
+!llvm.dbg.gv = !{!39}
+
+!0 = metadata !{i32 655377, i32 0, i32 4, metadata !"/Volumes/Athwagate/R9747970/apf.cc", metadata !"/private/tmp", metadata !"clang version 3.0 (trunk 136149)", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{i32 655406, i32 0, metadata !2, metadata !"bitwiseIsEqual", metadata !"bitwiseIsEqual", metadata !"_ZNK7APFloat14bitwiseIsEqualERKS_", metadata !3, i32 8, metadata !19, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null} ; [ DW_TAG_subprogram ]
+!2 = metadata !{i32 655362, metadata !0, metadata !"APFloat", metadata !3, i32 6, i64 32, i64 32, i32 0, i32 0, null, metadata !4, i32 0, null, null} ; [ DW_TAG_class_type ]
+!3 = metadata !{i32 655401, metadata !"/Volumes/Athwagate/R9747970/apf.cc", metadata !"/private/tmp", metadata !0} ; [ DW_TAG_file_type ]
+!4 = metadata !{metadata !5, metadata !1, metadata !7, metadata !12}
+!5 = metadata !{i32 655373, metadata !2, metadata !"prec", metadata !3, i32 13, i64 32, i64 32, i64 0, i32 0, metadata !6} ; [ DW_TAG_member ]
+!6 = metadata !{i32 655396, metadata !0, metadata !"unsigned int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ]
+!7 = metadata !{i32 655406, i32 0, metadata !2, metadata !"partCount", metadata !"partCount", metadata !"_ZNK7APFloat9partCountEv", metadata !3, i32 9, metadata !8, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null} ; [ DW_TAG_subprogram ]
+!8 = metadata !{i32 655381, metadata !3, metadata !"", metadata !3, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !9, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!9 = metadata !{metadata !6, metadata !10}
+!10 = metadata !{i32 655375, metadata !0, metadata !"", i32 0, i32 0, i64 64, i64 64, i64 0, i32 64, metadata !11} ; [ DW_TAG_pointer_type ]
+!11 = metadata !{i32 655398, metadata !0, metadata !"", null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !2} ; [ DW_TAG_const_type ]
+!12 = metadata !{i32 655406, i32 0, metadata !2, metadata !"significandParts", metadata !"significandParts", metadata !"_ZNK7APFloat16significandPartsEv", metadata !3, i32 11, metadata !13, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null} ; [ DW_TAG_subprogram ]
+!13 = metadata !{i32 655381, metadata !3, metadata !"", metadata !3, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !14, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!14 = metadata !{metadata !15, metadata !10}
+!15 = metadata !{i32 655375, metadata !0, metadata !"", null, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !16} ; [ DW_TAG_pointer_type ]
+!16 = metadata !{i32 655382, metadata !0, metadata !"integerPart", metadata !3, i32 2, i64 0, i64 0, i64 0, i32 0, metadata !17} ; [ DW_TAG_typedef ]
+!17 = metadata !{i32 655382, metadata !0, metadata !"uint64_t", metadata !3, i32 1, i64 0, i64 0, i64 0, i32 0, metadata !18} ; [ DW_TAG_typedef ]
+!18 = metadata !{i32 655396, metadata !0, metadata !"long long unsigned int", null, i32 0, i64 64, i64 64, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ]
+!19 = metadata !{i32 655381, metadata !3, metadata !"", metadata !3, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !20, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!20 = metadata !{metadata !21, metadata !10, metadata !22}
+!21 = metadata !{i32 655396, metadata !0, metadata !"bool", null, i32 0, i64 8, i64 8, i64 0, i32 0, i32 2} ; [ DW_TAG_base_type ]
+!22 = metadata !{i32 655376, metadata !0, null, null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !11} ; [ DW_TAG_reference_type ]
+!23 = metadata !{i32 655406, i32 0, metadata !0, metadata !"partCount", metadata !"partCount", metadata !"_ZNK7APFloat9partCountEv", metadata !3, i32 23, metadata !8, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (%class.APFloat*)* @_ZNK7APFloat9partCountEv, null, metadata !7} ; [ DW_TAG_subprogram ]
+!24 = metadata !{i32 655406, i32 0, metadata !0, metadata !"bitwiseIsEqual", metadata !"bitwiseIsEqual", metadata !"_ZNK7APFloat14bitwiseIsEqualERKS_", metadata !3, i32 28, metadata !19, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i1 (%class.APFloat*, %class.APFloat*)* @_ZNK7APFloat14bitwiseIsEqualERKS_, null, metadata !1} ; [ DW_TAG_subprogram ]
+!25 = metadata !{i32 655406, i32 0, metadata !3, metadata !"partCountForBits", metadata !"partCountForBits", metadata !"", metadata !3, i32 17, metadata !26, i1 true, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, null, null, null} ; [ DW_TAG_subprogram ]
+!26 = metadata !{i32 655381, metadata !3, metadata !"", metadata !3, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !27, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!27 = metadata !{metadata !6}
+!28 = metadata !{i32 655617, metadata !23, metadata !"this", metadata !3, i32 16777238, metadata !10, i32 64, i32 0} ; [ DW_TAG_arg_variable ]
+!29 = metadata !{i32 655617, metadata !24, metadata !"this", metadata !3, i32 16777244, metadata !10, i32 64, i32 0} ; [ DW_TAG_arg_variable ]
+!30 = metadata !{i32 655617, metadata !24, metadata !"rhs", metadata !3, i32 33554460, metadata !22, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
+!31 = metadata !{i32 655616, metadata !32, metadata !"i", metadata !3, i32 29, metadata !33, i32 0, i32 0} ; [ DW_TAG_auto_variable ]
+!32 = metadata !{i32 655371, metadata !24, i32 28, i32 56, metadata !3, i32 1} ; [ DW_TAG_lexical_block ]
+!33 = metadata !{i32 655396, metadata !0, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
+!34 = metadata !{i32 655616, metadata !32, metadata !"p", metadata !3, i32 30, metadata !35, i32 0, i32 0} ; [ DW_TAG_auto_variable ]
+!35 = metadata !{i32 655375, metadata !0, metadata !"", null, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !36} ; [ DW_TAG_pointer_type ]
+!36 = metadata !{i32 655398, metadata !0, metadata !"", null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !16} ; [ DW_TAG_const_type ]
+!37 = metadata !{i32 655616, metadata !32, metadata !"q", metadata !3, i32 31, metadata !35, i32 0, i32 0} ; [ DW_TAG_auto_variable ]
+!38 = metadata !{i32 655617, metadata !25, metadata !"bits", metadata !3, i32 16777232, metadata !6, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
+!39 = metadata !{i32 655412, i32 0, metadata !3, metadata !"integerPartWidth", metadata !"integerPartWidth", metadata !"integerPartWidth", metadata !3, i32 3, metadata !40, i32 1, i32 1, i32 42} ; [ DW_TAG_variable ]
+!40 = metadata !{i32 655398, metadata !0, metadata !"", null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !6} ; [ DW_TAG_const_type ]
+!41 = metadata !{i32 22, i32 23, metadata !23, null}
+!42 = metadata !{i32 24, i32 10, metadata !43, null}
+!43 = metadata !{i32 655371, metadata !23, i32 23, i32 1, metadata !3, i32 0} ; [ DW_TAG_lexical_block ]
+!44 = metadata !{metadata !"int", metadata !45}
+!45 = metadata !{metadata !"omnipotent char", metadata !46}
+!46 = metadata !{metadata !"Simple C/C++ TBAA", null}
+!47 = metadata !{i32 655617, metadata !25, metadata !"bits", metadata !3, i32 16777232, metadata !6, i32 0, metadata !42} ; [ DW_TAG_arg_variable ]
+!48 = metadata !{i32 16, i32 58, metadata !25, metadata !42}
+!49 = metadata !{i32 18, i32 3, metadata !50, metadata !42}
+!50 = metadata !{i32 655371, metadata !25, i32 17, i32 1, metadata !3, i32 4} ; [ DW_TAG_lexical_block ]
+!51 = metadata !{i32 28, i32 15, metadata !24, null}
+!52 = metadata !{i32 28, i32 45, metadata !24, null}
+!53 = metadata !{i32 655617, metadata !23, metadata !"this", metadata !3, i32 16777238, metadata !10, i32 64, metadata !54} ; [ DW_TAG_arg_variable ]
+!54 = metadata !{i32 29, i32 10, metadata !32, null}
+!55 = metadata !{i32 22, i32 23, metadata !23, metadata !54}
+!56 = metadata !{i32 24, i32 10, metadata !43, metadata !54}
+!57 = metadata !{i32 655617, metadata !25, metadata !"bits", metadata !3, i32 16777232, metadata !6, i32 0, metadata !56} ; [ DW_TAG_arg_variable ]
+!58 = metadata !{i32 16, i32 58, metadata !25, metadata !56}
+!59 = metadata !{i32 18, i32 3, metadata !50, metadata !56}
+!60 = metadata !{i32 30, i32 24, metadata !32, null}
+!61 = metadata !{i32 31, i32 24, metadata !32, null}
+!62 = metadata !{i32 32, i32 3, metadata !32, null}
+!63 = metadata !{i32 33, i32 5, metadata !64, null}
+!64 = metadata !{i32 655371, metadata !65, i32 32, i32 25, metadata !3, i32 3} ; [ DW_TAG_lexical_block ]
+!65 = metadata !{i32 655371, metadata !32, i32 32, i32 3, metadata !3, i32 2} ; [ DW_TAG_lexical_block ]
+!66 = metadata !{metadata !"long long", metadata !45}
+!67 = metadata !{i32 32, i32 15, metadata !65, null}
+!68 = metadata !{i32 37, i32 1, metadata !32, null}
diff --git a/test/CodeGen/X86/dbg-large-unsigned-const.ll b/test/CodeGen/X86/dbg-large-unsigned-const.ll
new file mode 100644
index 0000000..fc295c6
--- /dev/null
+++ b/test/CodeGen/X86/dbg-large-unsigned-const.ll
@@ -0,0 +1,61 @@
+; RUN: llc -filetype=obj %s -o /dev/null
+; Hanle large unsigned constant values.
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
+target triple = "i386-apple-macosx10.7.0"
+
+define zeroext i1 @_Z3iseRKxS0_(i64* nocapture %LHS, i64* nocapture %RHS) nounwind readonly optsize ssp {
+entry:
+ tail call void @llvm.dbg.value(metadata !{i64* %LHS}, i64 0, metadata !7), !dbg !13
+ tail call void @llvm.dbg.value(metadata !{i64* %RHS}, i64 0, metadata !11), !dbg !14
+ %tmp1 = load i64* %LHS, align 4, !dbg !15, !tbaa !17
+ %tmp3 = load i64* %RHS, align 4, !dbg !15, !tbaa !17
+ %cmp = icmp eq i64 %tmp1, %tmp3, !dbg !15
+ ret i1 %cmp, !dbg !15
+}
+
+define zeroext i1 @_Z2fnx(i64 %a) nounwind readnone optsize ssp {
+entry:
+ tail call void @llvm.dbg.value(metadata !{i64 %a}, i64 0, metadata !12), !dbg !20
+ tail call void @llvm.dbg.value(metadata !{i64 %a}, i64 0, metadata !12), !dbg !20
+ tail call void @llvm.dbg.value(metadata !{i64 %a}, i64 0, metadata !21), !dbg !24
+ tail call void @llvm.dbg.value(metadata !25, i64 0, metadata !26), !dbg !27
+ %cmp.i = icmp eq i64 %a, 9223372036854775807, !dbg !28
+ ret i1 %cmp.i, !dbg !22
+}
+
+declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
+
+!llvm.dbg.cu = !{!0}
+!llvm.dbg.sp = !{!1, !6}
+!llvm.dbg.lv._Z3iseRKxS0_ = !{!7, !11}
+!llvm.dbg.lv._Z2fnx = !{!12}
+
+!0 = metadata !{i32 655377, i32 0, i32 4, metadata !"lli.cc", metadata !"/private/tmp", metadata !"clang version 3.0 (trunk 135593)", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{i32 655406, i32 0, metadata !2, metadata !"ise", metadata !"ise", metadata !"_Z3iseRKxS0_", metadata !2, i32 2, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i1 (i64*, i64*)* @_Z3iseRKxS0_, null, null} ; [ DW_TAG_subprogram ]
+!2 = metadata !{i32 655401, metadata !"lli.cc", metadata !"/private/tmp", metadata !0} ; [ DW_TAG_file_type ]
+!3 = metadata !{i32 655381, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!4 = metadata !{metadata !5}
+!5 = metadata !{i32 655396, metadata !0, metadata !"bool", null, i32 0, i64 8, i64 8, i64 0, i32 0, i32 2} ; [ DW_TAG_base_type ]
+!6 = metadata !{i32 655406, i32 0, metadata !2, metadata !"fn", metadata !"fn", metadata !"_Z2fnx", metadata !2, i32 6, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i1 (i64)* @_Z2fnx, null, null} ; [ DW_TAG_subprogram ]
+!7 = metadata !{i32 655617, metadata !1, metadata !"LHS", metadata !2, i32 16777218, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
+!8 = metadata !{i32 655376, metadata !0, null, null, i32 0, i64 32, i64 32, i64 0, i32 0, metadata !9} ; [ DW_TAG_reference_type ]
+!9 = metadata !{i32 655398, metadata !0, metadata !"", null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !10} ; [ DW_TAG_const_type ]
+!10 = metadata !{i32 655396, metadata !0, metadata !"long long int", null, i32 0, i64 64, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
+!11 = metadata !{i32 655617, metadata !1, metadata !"RHS", metadata !2, i32 33554434, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
+!12 = metadata !{i32 655617, metadata !6, metadata !"a", metadata !2, i32 16777222, metadata !10, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
+!13 = metadata !{i32 2, i32 27, metadata !1, null}
+!14 = metadata !{i32 2, i32 49, metadata !1, null}
+!15 = metadata !{i32 3, i32 3, metadata !16, null}
+!16 = metadata !{i32 655371, metadata !1, i32 2, i32 54, metadata !2, i32 0} ; [ DW_TAG_lexical_block ]
+!17 = metadata !{metadata !"long long", metadata !18}
+!18 = metadata !{metadata !"omnipotent char", metadata !19}
+!19 = metadata !{metadata !"Simple C/C++ TBAA", null}
+!20 = metadata !{i32 6, i32 19, metadata !6, null}
+!21 = metadata !{i32 655617, metadata !1, metadata !"LHS", metadata !2, i32 16777218, metadata !8, i32 0, metadata !22} ; [ DW_TAG_arg_variable ]
+!22 = metadata !{i32 7, i32 10, metadata !23, null}
+!23 = metadata !{i32 655371, metadata !6, i32 6, i32 22, metadata !2, i32 1} ; [ DW_TAG_lexical_block ]
+!24 = metadata !{i32 2, i32 27, metadata !1, metadata !22}
+!25 = metadata !{i64 9223372036854775807}
+!26 = metadata !{i32 655617, metadata !1, metadata !"RHS", metadata !2, i32 33554434, metadata !8, i32 0, metadata !22} ; [ DW_TAG_arg_variable ]
+!27 = metadata !{i32 2, i32 49, metadata !1, metadata !22}
+!28 = metadata !{i32 3, i32 3, metadata !16, metadata !22}
diff --git a/test/CodeGen/X86/dbg-value-isel.ll b/test/CodeGen/X86/dbg-value-isel.ll
index d1a9e57..f1101e6 100644
--- a/test/CodeGen/X86/dbg-value-isel.ll
+++ b/test/CodeGen/X86/dbg-value-isel.ll
@@ -29,8 +29,8 @@ entry:
get_local_id.exit: ; preds = %4
%6 = phi i32 [ %5, %4 ]
call void @llvm.dbg.value(metadata !{i32 %6}, i64 0, metadata !10), !dbg !12
- %7 = call <4 x i32> @__amdil_get_global_id_int() nounwind
- %8 = extractelement <4 x i32> %7, i32 0
+ %7 = call <4 x i32> @__amdil_get_global_id_int() nounwind, !dbg !12
+ %8 = extractelement <4 x i32> %7, i32 0, !dbg !12
br label %9
; <label>:9 ; preds = %get_local_id.exit
diff --git a/test/CodeGen/X86/extractelement-load.ll b/test/CodeGen/X86/extractelement-load.ll
index ee57d9b..06d739c 100644
--- a/test/CodeGen/X86/extractelement-load.ll
+++ b/test/CodeGen/X86/extractelement-load.ll
@@ -1,9 +1,25 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mcpu=yonah | not grep movd
-; RUN: llc < %s -march=x86-64 -mattr=+sse2 -mcpu=core2 | not grep movd
+; RUN: llc < %s -march=x86 -mattr=+sse2 -mcpu=yonah | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mattr=+sse2 -mcpu=core2 | FileCheck %s
define i32 @t(<2 x i64>* %val) nounwind {
+; CHECK: t:
+; CHECK-NOT: movd
+; CHECK: movl 8(
+; CHECK-NEXT: ret
%tmp2 = load <2 x i64>* %val, align 16 ; <<2 x i64>> [#uses=1]
%tmp3 = bitcast <2 x i64> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp3, i32 2 ; <i32> [#uses=1]
ret i32 %tmp4
}
+
+; Case where extractelement of load ends up as undef.
+; (Making sure this doesn't crash.)
+define i32 @t2(<8 x i32>* %xp) {
+; CHECK: t2:
+; CHECK: ret
+ %x = load <8 x i32>* %xp
+ %Shuff68 = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> <i32
+undef, i32 7, i32 9, i32 undef, i32 13, i32 15, i32 1, i32 3>
+ %y = extractelement <8 x i32> %Shuff68, i32 0
+ ret i32 %y
+}
diff --git a/test/CodeGen/X86/fast-isel-atomic.ll b/test/CodeGen/X86/fast-isel-atomic.ll
index 74c5868..5f761dd 100644
--- a/test/CodeGen/X86/fast-isel-atomic.ll
+++ b/test/CodeGen/X86/fast-isel-atomic.ll
@@ -5,13 +5,11 @@
@sc = external global i8
@uc = external global i8
-declare i8 @llvm.atomic.load.and.i8.p0i8(i8* nocapture, i8) nounwind
-
define void @test_fetch_and_op() nounwind {
entry:
- %tmp40 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1]
+ %tmp40 = atomicrmw and i8* @sc, i8 11 monotonic
store i8 %tmp40, i8* @sc
- %tmp41 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1]
+ %tmp41 = atomicrmw and i8* @uc, i8 11 monotonic
store i8 %tmp41, i8* @uc
ret void
}
diff --git a/test/CodeGen/X86/fast-isel-cmp-branch.ll b/test/CodeGen/X86/fast-isel-cmp-branch.ll
index 12312e8..6e408f8 100644
--- a/test/CodeGen/X86/fast-isel-cmp-branch.ll
+++ b/test/CodeGen/X86/fast-isel-cmp-branch.ll
@@ -26,5 +26,9 @@ true:
return:
ret void
unw:
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
}
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/fast-isel-gep.ll b/test/CodeGen/X86/fast-isel-gep.ll
index 1a2e34e..91d1f5d 100644
--- a/test/CodeGen/X86/fast-isel-gep.ll
+++ b/test/CodeGen/X86/fast-isel-gep.ll
@@ -104,6 +104,36 @@ invoke.cont16: ; preds = %if.then14
unreachable
lpad: ; preds = %if.end19, %if.then14, %if.end, %entry
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
}
declare i8* @_ZNK18G__FastAllocString4dataEv() nounwind
+
+
+; PR10605 / rdar://9930964 - Don't fold loads incorrectly. The load should
+; happen before the store.
+define i32 @test7({i32,i32,i32}* %tmp1, i32 %tmp71, i32 %tmp63) nounwind {
+; X64: test7:
+; X64: movl 8({{%rdi|%rcx}}), %eax
+; X64: movl $4, 8({{%rdi|%rcx}})
+
+
+ %tmp29 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2
+ %tmp30 = load i32* %tmp29, align 4
+
+ %p2 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2
+ store i32 4, i32* %p2
+
+ %tmp72 = or i32 %tmp71, %tmp30
+ %tmp73 = icmp ne i32 %tmp63, 32
+ br i1 %tmp73, label %T, label %F
+
+T:
+ ret i32 %tmp72
+
+F:
+ ret i32 4
+}
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/fast-isel-tls.ll b/test/CodeGen/X86/fast-isel-tls.ll
index a5e6642..0963c52 100644
--- a/test/CodeGen/X86/fast-isel-tls.ll
+++ b/test/CodeGen/X86/fast-isel-tls.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -relocation-model=pic -mtriple=i686-unknown-linux-gnu -fast-isel | grep __tls_get_addr
+; RUN: llc < %s -march=x86 -relocation-model=pic -mtriple=i686-unknown-linux-gnu -fast-isel | FileCheck %s
; PR3654
@v = thread_local global i32 0
@@ -8,3 +8,19 @@ entry:
%s = add i32 %t, 1
ret i32 %s
}
+
+; CHECK: f:
+; CHECK: leal v@TLSGD
+; CHECK: __tls_get_addr
+
+@alias = alias internal i32* @v
+define i32 @f_alias() nounwind {
+entry:
+ %t = load i32* @v
+ %s = add i32 %t, 1
+ ret i32 %s
+}
+
+; CHECK: f_alias:
+; CHECK: leal v@TLSGD
+; CHECK: __tls_get_addr
diff --git a/test/CodeGen/X86/fast-isel-x86-64.ll b/test/CodeGen/X86/fast-isel-x86-64.ll
index c4afc10..6a5a102 100644
--- a/test/CodeGen/X86/fast-isel-x86-64.ll
+++ b/test/CodeGen/X86/fast-isel-x86-64.ll
@@ -259,4 +259,27 @@ define void @test21(double* %p1) {
; CHECK: test21:
; CHECK-NOT: pxor
; CHECK: movsd LCPI
-} \ No newline at end of file
+}
+
+; Check that immediate arguments to a function
+; do not cause massive spilling and are used
+; as immediates just before the call.
+define void @test22() nounwind {
+entry:
+ call void @foo22(i32 0)
+ call void @foo22(i32 1)
+ call void @foo22(i32 2)
+ call void @foo22(i32 3)
+ ret void
+; CHECK: test22:
+; CHECK: movl $0, %edi
+; CHECK: callq _foo22
+; CHECK: movl $1, %edi
+; CHECK: callq _foo22
+; CHECK: movl $2, %edi
+; CHECK: callq _foo22
+; CHECK: movl $3, %edi
+; CHECK: callq _foo22
+}
+
+declare void @foo22(i32)
diff --git a/test/CodeGen/X86/fp-stack-O0-crash.ll b/test/CodeGen/X86/fp-stack-O0-crash.ll
index 9b629c0..ae83a02 100644
--- a/test/CodeGen/X86/fp-stack-O0-crash.ll
+++ b/test/CodeGen/X86/fp-stack-O0-crash.ll
@@ -1,4 +1,4 @@
-; RUN: llc %s -O0 -fast-isel -regalloc=fast -o -
+; RUN: llc %s -O0 -fast-isel -regalloc=fast -mcpu=i386 -o -
; PR4767
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
@@ -28,3 +28,22 @@ if.then: ; preds = %cond.false, %cond.t
if.end: ; preds = %if.then, %cond.false, %cond.true
ret void
}
+
+; PR10575
+; This produces a FP0 = IMPLICIT_DEF instruction.
+define void @__m_rankmerge_MOD_dindexmerge_() nounwind {
+entry:
+ br label %"20"
+
+"20": ; preds = %"23", %entry
+ %0 = phi double [ undef, %entry ], [ %0, %"23" ]
+ %1 = phi double [ 0.000000e+00, %entry ], [ %2, %"23" ]
+ br i1 undef, label %"21", label %"23"
+
+"21": ; preds = %"20"
+ ret void
+
+"23": ; preds = %"20"
+ %2 = select i1 undef, double %0, double %1
+ br label %"20"
+}
diff --git a/test/CodeGen/X86/global-sections.ll b/test/CodeGen/X86/global-sections.ll
index d0a1b4d..194f597 100644
--- a/test/CodeGen/X86/global-sections.ll
+++ b/test/CodeGen/X86/global-sections.ll
@@ -43,7 +43,7 @@
; _Complex long long const G4 = 34;
@G4 = unnamed_addr constant {i64,i64} { i64 34, i64 0 }
-; DARWIN: .section __TEXT,__const
+; DARWIN: .section __TEXT,__literal16,16byte_literals
; DARWIN: _G4:
; DARWIN: .long 34
diff --git a/test/CodeGen/X86/haddsub.ll b/test/CodeGen/X86/haddsub.ll
new file mode 100644
index 0000000..91758ea
--- /dev/null
+++ b/test/CodeGen/X86/haddsub.ll
@@ -0,0 +1,194 @@
+; RUN: llc < %s -march=x86-64 -mattr=+sse3,-avx | FileCheck %s -check-prefix=SSE3
+; RUN: llc < %s -march=x86-64 -mattr=-sse3,+avx | FileCheck %s -check-prefix=AVX
+
+; SSE3: haddpd1:
+; SSE3-NOT: vhaddpd
+; SSE3: haddpd
+; AVX: haddpd1:
+; AVX: vhaddpd
+define <2 x double> @haddpd1(<2 x double> %x, <2 x double> %y) {
+ %a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 0, i32 2>
+ %b = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 1, i32 3>
+ %r = fadd <2 x double> %a, %b
+ ret <2 x double> %r
+}
+
+; SSE3: haddpd2:
+; SSE3-NOT: vhaddpd
+; SSE3: haddpd
+; AVX: haddpd2:
+; AVX: vhaddpd
+define <2 x double> @haddpd2(<2 x double> %x, <2 x double> %y) {
+ %a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 1, i32 2>
+ %b = shufflevector <2 x double> %y, <2 x double> %x, <2 x i32> <i32 2, i32 1>
+ %r = fadd <2 x double> %a, %b
+ ret <2 x double> %r
+}
+
+; SSE3: haddpd3:
+; SSE3-NOT: vhaddpd
+; SSE3: haddpd
+; AVX: haddpd3:
+; AVX: vhaddpd
+define <2 x double> @haddpd3(<2 x double> %x) {
+ %a = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 0, i32 undef>
+ %b = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 1, i32 undef>
+ %r = fadd <2 x double> %a, %b
+ ret <2 x double> %r
+}
+
+; SSE3: haddps1:
+; SSE3-NOT: vhaddps
+; SSE3: haddps
+; AVX: haddps1:
+; AVX: vhaddps
+define <4 x float> @haddps1(<4 x float> %x, <4 x float> %y) {
+ %a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %b = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %r = fadd <4 x float> %a, %b
+ ret <4 x float> %r
+}
+
+; SSE3: haddps2:
+; SSE3-NOT: vhaddps
+; SSE3: haddps
+; AVX: haddps2:
+; AVX: vhaddps
+define <4 x float> @haddps2(<4 x float> %x, <4 x float> %y) {
+ %a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 1, i32 2, i32 5, i32 6>
+ %b = shufflevector <4 x float> %y, <4 x float> %x, <4 x i32> <i32 4, i32 7, i32 0, i32 3>
+ %r = fadd <4 x float> %a, %b
+ ret <4 x float> %r
+}
+
+; SSE3: haddps3:
+; SSE3-NOT: vhaddps
+; SSE3: haddps
+; AVX: haddps3:
+; AVX: vhaddps
+define <4 x float> @haddps3(<4 x float> %x) {
+ %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
+ %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 3, i32 5, i32 7>
+ %r = fadd <4 x float> %a, %b
+ ret <4 x float> %r
+}
+
+; SSE3: haddps4:
+; SSE3-NOT: vhaddps
+; SSE3: haddps
+; AVX: haddps4:
+; AVX: vhaddps
+define <4 x float> @haddps4(<4 x float> %x) {
+ %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
+ %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
+ %r = fadd <4 x float> %a, %b
+ ret <4 x float> %r
+}
+
+; SSE3: haddps5:
+; SSE3-NOT: vhaddps
+; SSE3: haddps
+; AVX: haddps5:
+; AVX: vhaddps
+define <4 x float> @haddps5(<4 x float> %x) {
+ %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 3, i32 undef, i32 undef>
+ %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 undef, i32 undef>
+ %r = fadd <4 x float> %a, %b
+ ret <4 x float> %r
+}
+
+; SSE3: haddps6:
+; SSE3-NOT: vhaddps
+; SSE3: haddps
+; AVX: haddps6:
+; AVX: vhaddps
+define <4 x float> @haddps6(<4 x float> %x) {
+ %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
+ %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %r = fadd <4 x float> %a, %b
+ ret <4 x float> %r
+}
+
+; SSE3: haddps7:
+; SSE3-NOT: vhaddps
+; SSE3: haddps
+; AVX: haddps7:
+; AVX: vhaddps
+define <4 x float> @haddps7(<4 x float> %x) {
+ %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 3, i32 undef, i32 undef>
+ %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 2, i32 undef, i32 undef>
+ %r = fadd <4 x float> %a, %b
+ ret <4 x float> %r
+}
+
+; SSE3: hsubpd1:
+; SSE3-NOT: vhsubpd
+; SSE3: hsubpd
+; AVX: hsubpd1:
+; AVX: vhsubpd
+define <2 x double> @hsubpd1(<2 x double> %x, <2 x double> %y) {
+ %a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 0, i32 2>
+ %b = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 1, i32 3>
+ %r = fsub <2 x double> %a, %b
+ ret <2 x double> %r
+}
+
+; SSE3: hsubpd2:
+; SSE3-NOT: vhsubpd
+; SSE3: hsubpd
+; AVX: hsubpd2:
+; AVX: vhsubpd
+define <2 x double> @hsubpd2(<2 x double> %x) {
+ %a = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 0, i32 undef>
+ %b = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 1, i32 undef>
+ %r = fsub <2 x double> %a, %b
+ ret <2 x double> %r
+}
+
+; SSE3: hsubps1:
+; SSE3-NOT: vhsubps
+; SSE3: hsubps
+; AVX: hsubps1:
+; AVX: vhsubps
+define <4 x float> @hsubps1(<4 x float> %x, <4 x float> %y) {
+ %a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %b = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %r = fsub <4 x float> %a, %b
+ ret <4 x float> %r
+}
+
+; SSE3: hsubps2:
+; SSE3-NOT: vhsubps
+; SSE3: hsubps
+; AVX: hsubps2:
+; AVX: vhsubps
+define <4 x float> @hsubps2(<4 x float> %x) {
+ %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
+ %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 3, i32 5, i32 7>
+ %r = fsub <4 x float> %a, %b
+ ret <4 x float> %r
+}
+
+; SSE3: hsubps3:
+; SSE3-NOT: vhsubps
+; SSE3: hsubps
+; AVX: hsubps3:
+; AVX: vhsubps
+define <4 x float> @hsubps3(<4 x float> %x) {
+ %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
+ %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
+ %r = fsub <4 x float> %a, %b
+ ret <4 x float> %r
+}
+
+; SSE3: hsubps4:
+; SSE3-NOT: vhsubps
+; SSE3: hsubps
+; AVX: hsubps4:
+; AVX: vhsubps
+define <4 x float> @hsubps4(<4 x float> %x) {
+ %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
+ %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %r = fsub <4 x float> %a, %b
+ ret <4 x float> %r
+}
diff --git a/test/CodeGen/X86/hidden-vis.ll b/test/CodeGen/X86/hidden-vis.ll
index a948bdf..fcb74fc 100644
--- a/test/CodeGen/X86/hidden-vis.ll
+++ b/test/CodeGen/X86/hidden-vis.ll
@@ -1,8 +1,11 @@
; RUN: llc < %s -mtriple=i686-pc-linux-gnu | FileCheck %s -check-prefix=LINUX
; RUN: llc < %s -mtriple=i686-apple-darwin8 | FileCheck %s -check-prefix=DARWIN
+; RUN: llc < %s -mtriple=x86_64-w64-mingw32 | FileCheck %s -check-prefix=WINDOWS
+
@a = hidden global i32 0
-@b = external global i32
+@b = external hidden global i32
+@c = global i32* @b
define weak hidden void @t1() nounwind {
; LINUX: .hidden t1
@@ -10,15 +13,19 @@ define weak hidden void @t1() nounwind {
; DARWIN: .private_extern _t1
; DARWIN: t1:
+
+; WINDOWS: t1:
+; WINDOWS-NOT: hidden
ret void
}
define weak void @t2() nounwind {
-; LINUX: t2:
-; LINUX: .hidden a
-
-; DARWIN: t2:
-; DARWIN: .private_extern _a
+; DARWIN: .weak_definition _t2
ret void
}
+; LINUX: .hidden a
+; LINUX: .hidden b
+
+; DARWIN: .private_extern _a
+; DARWIN-NOT: private_extern
diff --git a/test/CodeGen/X86/inline-asm-fpstack.ll b/test/CodeGen/X86/inline-asm-fpstack.ll
index 8e48bbe..c9a1c1c 100644
--- a/test/CodeGen/X86/inline-asm-fpstack.ll
+++ b/test/CodeGen/X86/inline-asm-fpstack.ll
@@ -329,3 +329,14 @@ entry:
%asmresult = extractvalue %complex %0, 0
ret float %asmresult
}
+
+; Pass the same value in two fixed stack slots.
+; CHECK: PR10602
+; CHECK: flds LCPI
+; CHECK: fld %st(0)
+; CHECK: fcomi %st(1), %st(0)
+define i32 @PR10602() nounwind ssp {
+entry:
+ %0 = tail call i32 asm "fcomi $2, $1; pushf; pop $0", "=r,{st},{st(1)},~{dirflag},~{fpsr},~{flags}"(double 2.000000e+00, double 2.000000e+00) nounwind
+ ret i32 %0
+}
diff --git a/test/CodeGen/X86/iv-users-in-other-loops.ll b/test/CodeGen/X86/iv-users-in-other-loops.ll
index 8385a29..8f79fb8 100644
--- a/test/CodeGen/X86/iv-users-in-other-loops.ll
+++ b/test/CodeGen/X86/iv-users-in-other-loops.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86-64 -o %t
+; RUN: llc < %s -march=x86-64 -enable-lsr-nested -o %t
; RUN: not grep inc %t
; RUN: grep dec %t | count 2
; RUN: grep addq %t | count 12
@@ -11,6 +11,10 @@
; to insert new induction variables. Previously it would create a
; flood of new induction variables.
; Also, the loop reversal should kick in once.
+;
+; In this example, performing LSR on the entire loop nest,
+; as opposed to only the inner loop can further reduce induction variables,
+; and their related instructions and registers.
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/CodeGen/X86/lfence.ll b/test/CodeGen/X86/lfence.ll
index 7a96ca3..1903a1e 100644
--- a/test/CodeGen/X86/lfence.ll
+++ b/test/CodeGen/X86/lfence.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -march=x86 -mattr=+sse2 | grep lfence
-declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1)
+declare void @llvm.x86.sse2.lfence() nounwind
define void @test() {
- call void @llvm.memory.barrier( i1 true, i1 false, i1 false, i1 false, i1 true)
- ret void
+ call void @llvm.x86.sse2.lfence()
+ ret void
}
diff --git a/test/CodeGen/X86/licm-dominance.ll b/test/CodeGen/X86/licm-dominance.ll
new file mode 100644
index 0000000..8a0958d
--- /dev/null
+++ b/test/CodeGen/X86/licm-dominance.ll
@@ -0,0 +1,36 @@
+; RUN: llc -asm-verbose=false < %s | FileCheck %s
+
+; MachineLICM should check dominance before hoisting instructions.
+; CHECK: jne LBB0_3
+; CHECK-NEXT: xorb %al, %al
+; CHECK-NEXT: testb %al, %al
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-macosx10.7.2"
+
+define void @CMSColorWorldCreateParametricData() nounwind uwtable optsize ssp {
+entry:
+ br label %for.body.i
+
+for.body.i:
+ br i1 undef, label %for.inc.i, label %if.then26.i
+
+if.then26.i:
+ br i1 undef, label %if.else.i.i, label %lor.lhs.false.i.i
+
+if.else.i.i:
+ br i1 undef, label %lor.lhs.false.i.i, label %if.then116.i.i
+
+lor.lhs.false.i.i:
+ br i1 undef, label %for.inc.i, label %if.then116.i.i
+
+if.then116.i.i:
+ unreachable
+
+for.inc.i:
+ %cmp17.i = icmp ult i64 undef, undef
+ br i1 %cmp17.i, label %for.body.i, label %if.end28.i
+
+if.end28.i:
+ ret void
+}
diff --git a/test/CodeGen/X86/licm-nested.ll b/test/CodeGen/X86/licm-nested.ll
index b0105ac..c3f991d 100644
--- a/test/CodeGen/X86/licm-nested.ll
+++ b/test/CodeGen/X86/licm-nested.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=x86_64-apple-darwin -march=x86-64 < %s -o /dev/null -stats -info-output-file - | grep machine-licm | grep 3
+; RUN: llc -mtriple=x86_64-apple-darwin -march=x86-64 < %s -o /dev/null -stats -info-output-file - | grep "hoisted out of loops" | grep 3
; MachineLICM should be able to hoist the symbolic addresses out of
; the inner loops.
diff --git a/test/CodeGen/X86/lock-inst-encoding.ll b/test/CodeGen/X86/lock-inst-encoding.ll
index 2d10fbc..9765fae 100644
--- a/test/CodeGen/X86/lock-inst-encoding.ll
+++ b/test/CodeGen/X86/lock-inst-encoding.ll
@@ -3,19 +3,42 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin10.0.0"
-; CHECK: f0:
-; CHECK: addq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,
+; CHECK: f1:
+; CHECK: addq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,0x48,0x01,0x37]
; CHECK: ret
-define void @f0(i64* %a0) nounwind {
- %t0 = and i64 1, 1
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) nounwind
- %1 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %a0, i64 %t0) nounwind
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) nounwind
+define void @f1(i64* %a, i64 %b) nounwind {
+ %1 = atomicrmw add i64* %a, i64 %b monotonic
ret void
}
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
+; CHECK: f2:
+; CHECK: subq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,0x48,0x29,0x37]
+; CHECK: ret
+define void @f2(i64* %a, i64 %b) nounwind {
+ %1 = atomicrmw sub i64* %a, i64 %b monotonic
+ ret void
+}
-declare i32 @llvm.atomic.load.and.i32.p0i32(i32* nocapture, i32) nounwind
+; CHECK: f3:
+; CHECK: andq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,0x48,0x21,0x37]
+; CHECK: ret
+define void @f3(i64* %a, i64 %b) nounwind {
+ %1 = atomicrmw and i64* %a, i64 %b monotonic
+ ret void
+}
-declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind
+; CHECK: f4:
+; CHECK: orq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,0x48,0x09,0x37]
+; CHECK: ret
+define void @f4(i64* %a, i64 %b) nounwind {
+ %1 = atomicrmw or i64* %a, i64 %b monotonic
+ ret void
+}
+
+; CHECK: f5:
+; CHECK: xorq %{{.*}}, ({{.*}}){{.*}}encoding: [0xf0,0x48,0x31,0x37]
+; CHECK: ret
+define void @f5(i64* %a, i64 %b) nounwind {
+ %1 = atomicrmw xor i64* %a, i64 %b monotonic
+ ret void
+}
diff --git a/test/CodeGen/X86/loop-strength-reduce3.ll b/test/CodeGen/X86/loop-strength-reduce3.ll
index c45a374..d6c265f 100644
--- a/test/CodeGen/X86/loop-strength-reduce3.ll
+++ b/test/CodeGen/X86/loop-strength-reduce3.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=x86 | grep cmp | grep 240
-; RUN: llc < %s -march=x86 | grep inc | count 1
+; RUN: llc < %s -march=x86 -enable-lsr-nested | grep cmp | grep 240
+; RUN: llc < %s -march=x86 -enable-lsr-nested | grep inc | count 1
define i32 @foo(i32 %A, i32 %B, i32 %C, i32 %D) nounwind {
entry:
diff --git a/test/CodeGen/X86/lzcnt.ll b/test/CodeGen/X86/lzcnt.ll
new file mode 100644
index 0000000..e5a55ab
--- /dev/null
+++ b/test/CodeGen/X86/lzcnt.ll
@@ -0,0 +1,38 @@
+; RUN: llc < %s -march=x86-64 -mattr=+lzcnt | FileCheck %s
+
+define i32 @t1(i32 %x) nounwind {
+ %tmp = tail call i32 @llvm.ctlz.i32( i32 %x )
+ ret i32 %tmp
+; CHECK: t1:
+; CHECK: lzcntl
+}
+
+declare i32 @llvm.ctlz.i32(i32) nounwind readnone
+
+define i16 @t2(i16 %x) nounwind {
+ %tmp = tail call i16 @llvm.ctlz.i16( i16 %x )
+ ret i16 %tmp
+; CHECK: t2:
+; CHECK: lzcntw
+}
+
+declare i16 @llvm.ctlz.i16(i16) nounwind readnone
+
+define i64 @t3(i64 %x) nounwind {
+ %tmp = tail call i64 @llvm.ctlz.i64( i64 %x )
+ ret i64 %tmp
+; CHECK: t3:
+; CHECK: lzcntq
+}
+
+declare i64 @llvm.ctlz.i64(i64) nounwind readnone
+
+define i8 @t4(i8 %x) nounwind {
+ %tmp = tail call i8 @llvm.ctlz.i8( i8 %x )
+ ret i8 %tmp
+; CHECK: t4:
+; CHECK: lzcntw
+}
+
+declare i8 @llvm.ctlz.i8(i8) nounwind readnone
+
diff --git a/test/CodeGen/X86/membarrier.ll b/test/CodeGen/X86/membarrier.ll
index 42f8ef5..5e569aa 100644
--- a/test/CodeGen/X86/membarrier.ll
+++ b/test/CodeGen/X86/membarrier.ll
@@ -5,11 +5,8 @@ define i32 @t() {
entry:
%i = alloca i32, align 4
store i32 1, i32* %i, align 4
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- %0 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %i, i32 1)
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ fence seq_cst
+ %0 = atomicrmw sub i32* %i, i32 1 monotonic
+ fence seq_cst
ret i32 0
}
-
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32* nocapture, i32) nounwind
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
diff --git a/test/CodeGen/X86/mfence.ll b/test/CodeGen/X86/mfence.ll
index a1b2283..6056add 100644
--- a/test/CodeGen/X86/mfence.ll
+++ b/test/CodeGen/X86/mfence.ll
@@ -2,19 +2,7 @@
; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep lfence
; RUN: llc < %s -march=x86 -mattr=+sse2 | grep mfence
-
-declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1)
-
define void @test() {
- call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 false, i1 true)
- call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 false, i1 true)
- call void @llvm.memory.barrier( i1 true, i1 false, i1 false, i1 true, i1 true)
-
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 false, i1 true)
- call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 true, i1 true)
- call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 true, i1 true)
-
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true , i1 true)
- call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 false , i1 true)
- ret void
+ fence seq_cst
+ ret void
}
diff --git a/test/CodeGen/X86/movbe.ll b/test/CodeGen/X86/movbe.ll
new file mode 100644
index 0000000..3d3d8cf
--- /dev/null
+++ b/test/CodeGen/X86/movbe.ll
@@ -0,0 +1,36 @@
+; RUN: llc -mtriple=x86_64-linux -mcpu=atom < %s | FileCheck %s
+
+declare i32 @llvm.bswap.i32(i32) nounwind readnone
+declare i64 @llvm.bswap.i64(i64) nounwind readnone
+
+define void @test1(i32* nocapture %x, i32 %y) nounwind {
+ %bswap = call i32 @llvm.bswap.i32(i32 %y)
+ store i32 %bswap, i32* %x, align 4
+ ret void
+; CHECK: test1:
+; CHECK: movbel %esi, (%rdi)
+}
+
+define i32 @test2(i32* %x) nounwind {
+ %load = load i32* %x, align 4
+ %bswap = call i32 @llvm.bswap.i32(i32 %load)
+ ret i32 %bswap
+; CHECK: test2:
+; CHECK: movbel (%rdi), %eax
+}
+
+define void @test3(i64* %x, i64 %y) nounwind {
+ %bswap = call i64 @llvm.bswap.i64(i64 %y)
+ store i64 %bswap, i64* %x, align 8
+ ret void
+; CHECK: test3:
+; CHECK: movbeq %rsi, (%rdi)
+}
+
+define i64 @test4(i64* %x) nounwind {
+ %load = load i64* %x, align 8
+ %bswap = call i64 @llvm.bswap.i64(i64 %load)
+ ret i64 %bswap
+; CHECK: test4:
+; CHECK: movbeq (%rdi), %rax
+}
diff --git a/test/CodeGen/X86/movgs.ll b/test/CodeGen/X86/movgs.ll
index 97b7fe7..aeb540f 100644
--- a/test/CodeGen/X86/movgs.ll
+++ b/test/CodeGen/X86/movgs.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mattr=sse41 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -mattr=sse41 | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-linux -mattr=sse41 | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-win32 -mattr=sse41 | FileCheck %s --check-prefix=X64
diff --git a/test/CodeGen/X86/2011-05-31-movmsk.ll b/test/CodeGen/X86/movmsk.ll
index 2b54d5c..2368548 100644
--- a/test/CodeGen/X86/2011-05-31-movmsk.ll
+++ b/test/CodeGen/X86/movmsk.ll
@@ -77,3 +77,34 @@ entry:
%shr.i = lshr i32 %2, 31
ret i32 %shr.i
}
+
+; rdar://10247336
+; movmskp{s|d} only set low 4/2 bits, high bits are known zero
+
+define i32 @t1(<4 x float> %x, i32* nocapture %indexTable) nounwind uwtable readonly ssp {
+entry:
+; CHECK: t1:
+; CHECK: movmskps
+; CHECK-NOT: movslq
+ %0 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %x) nounwind
+ %idxprom = sext i32 %0 to i64
+ %arrayidx = getelementptr inbounds i32* %indexTable, i64 %idxprom
+ %1 = load i32* %arrayidx, align 4
+ ret i32 %1
+}
+
+define i32 @t2(<4 x float> %x, i32* nocapture %indexTable) nounwind uwtable readonly ssp {
+entry:
+; CHECK: t2:
+; CHECK: movmskpd
+; CHECK-NOT: movslq
+ %0 = bitcast <4 x float> %x to <2 x double>
+ %1 = tail call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %0) nounwind
+ %idxprom = sext i32 %1 to i64
+ %arrayidx = getelementptr inbounds i32* %indexTable, i64 %idxprom
+ %2 = load i32* %arrayidx, align 4
+ ret i32 %2
+}
+
+declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone
+declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone
diff --git a/test/CodeGen/X86/nofence.ll b/test/CodeGen/X86/nofence.ll
deleted file mode 100644
index 244d2e9..0000000
--- a/test/CodeGen/X86/nofence.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep fence
-
-declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1)
-
-define void @test() {
- call void @llvm.memory.barrier( i1 true, i1 false, i1 false, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 false, i1 true, i1 false, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 false, i1 false, i1 true, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 true, i1 false)
-
- call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 true, i1 false, i1 false, i1 true, i1 false)
- call void @llvm.memory.barrier( i1 false, i1 true, i1 true, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 false, i1 true, i1 false, i1 true, i1 false)
- call void @llvm.memory.barrier( i1 false, i1 false, i1 true, i1 true, i1 false)
-
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 true, i1 false)
- call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 true, i1 false)
- call void @llvm.memory.barrier( i1 false, i1 true, i1 true, i1 true, i1 false)
-
-
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true , i1 false)
- call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 false , i1 false)
- ret void
-}
diff --git a/test/CodeGen/X86/norex-subreg.ll b/test/CodeGen/X86/norex-subreg.ll
new file mode 100644
index 0000000..2c529fd
--- /dev/null
+++ b/test/CodeGen/X86/norex-subreg.ll
@@ -0,0 +1,80 @@
+; RUN: llc -O0 < %s
+; RUN: llc < %s
+target triple = "x86_64-apple-macosx10.7"
+
+; This test case extracts a sub_8bit_hi sub-register:
+;
+; %R8B<def> = COPY %BH, %EBX<imp-use,kill>
+; %ESI<def> = MOVZX32_NOREXrr8 %R8B<kill>
+;
+; The register allocation above is invalid, %BH can only be encoded without an
+; REX prefix, so the destination register must be GR8_NOREX. The code above
+; triggers an assertion in copyPhysReg.
+;
+; <rdar://problem/10248099>
+
+define void @f() nounwind uwtable ssp {
+entry:
+ %0 = load i32* undef, align 4
+ %add = add i32 0, %0
+ %conv1 = trunc i32 %add to i16
+ %bf.value = and i16 %conv1, 255
+ %1 = and i16 %bf.value, 255
+ %2 = shl i16 %1, 8
+ %3 = load i16* undef, align 1
+ %4 = and i16 %3, 255
+ %5 = or i16 %4, %2
+ store i16 %5, i16* undef, align 1
+ %6 = load i16* undef, align 1
+ %7 = lshr i16 %6, 8
+ %bf.clear2 = and i16 %7, 255
+ %conv3 = zext i16 %bf.clear2 to i32
+ %rem = srem i32 %conv3, 15
+ %conv4 = trunc i32 %rem to i16
+ %bf.value5 = and i16 %conv4, 255
+ %8 = and i16 %bf.value5, 255
+ %9 = shl i16 %8, 8
+ %10 = or i16 undef, %9
+ store i16 %10, i16* undef, align 1
+ ret void
+}
+
+; This test case extracts a sub_8bit_hi sub-register:
+;
+; %vreg2<def> = COPY %vreg1:sub_8bit_hi; GR8:%vreg2 GR64_ABCD:%vreg1
+; TEST8ri %vreg2, 1, %EFLAGS<imp-def>; GR8:%vreg2
+;
+; %vreg2 must be constrained to GR8_NOREX, or the COPY could become impossible.
+;
+; PR11088
+
+define fastcc i32 @g(i64 %FB) nounwind uwtable readnone align 2 {
+entry:
+ %and32 = and i64 %FB, 256
+ %cmp33 = icmp eq i64 %and32, 0
+ %Features.6.or35 = select i1 %cmp33, i32 0, i32 undef
+ %cmp38 = icmp eq i64 undef, 0
+ %or40 = or i32 %Features.6.or35, 4
+ %Features.8 = select i1 %cmp38, i32 %Features.6.or35, i32 %or40
+ %and42 = and i64 %FB, 32
+ %or45 = or i32 %Features.8, 2
+ %cmp43 = icmp eq i64 %and42, 0
+ %Features.8.or45 = select i1 %cmp43, i32 %Features.8, i32 %or45
+ %and47 = and i64 %FB, 8192
+ %cmp48 = icmp eq i64 %and47, 0
+ %or50 = or i32 %Features.8.or45, 32
+ %Features.10 = select i1 %cmp48, i32 %Features.8.or45, i32 %or50
+ %or55 = or i32 %Features.10, 64
+ %Features.10.or55 = select i1 undef, i32 %Features.10, i32 %or55
+ %and57 = lshr i64 %FB, 2
+ %and57.tr = trunc i64 %and57 to i32
+ %or60 = and i32 %and57.tr, 1
+ %Features.12 = or i32 %Features.10.or55, %or60
+ %and62 = and i64 %FB, 128
+ %or65 = or i32 %Features.12, 8
+ %cmp63 = icmp eq i64 %and62, 0
+ %Features.12.or65 = select i1 %cmp63, i32 %Features.12, i32 %or65
+ %Features.14 = select i1 undef, i32 undef, i32 %Features.12.or65
+ %Features.16 = select i1 undef, i32 undef, i32 %Features.14
+ ret i32 %Features.16
+}
diff --git a/test/CodeGen/X86/opt-shuff-tstore.ll b/test/CodeGen/X86/opt-shuff-tstore.ll
new file mode 100644
index 0000000..fc24913
--- /dev/null
+++ b/test/CodeGen/X86/opt-shuff-tstore.ll
@@ -0,0 +1,39 @@
+; RUN: llc -mcpu=corei7 -mtriple=x86_64-linux < %s -promote-elements -mattr=+sse2,+sse41 | FileCheck %s
+
+; CHECK: func_4_8
+; A single memory write
+; CHECK: movd
+; CHECK-NEXT: ret
+define void @func_4_8(<4 x i8> %param, <4 x i8>* %p) {
+ %r = add <4 x i8> %param, <i8 1, i8 2, i8 3, i8 4>
+ store <4 x i8> %r, <4 x i8>* %p
+ ret void
+}
+
+; CHECK: func_4_16
+; CHECK: movq
+; CHECK-NEXT: ret
+define void @func_4_16(<4 x i16> %param, <4 x i16>* %p) {
+ %r = add <4 x i16> %param, <i16 1, i16 2, i16 3, i16 4>
+ store <4 x i16> %r, <4 x i16>* %p
+ ret void
+}
+
+; CHECK: func_8_8
+; CHECK: movq
+; CHECK-NEXT: ret
+define void @func_8_8(<8 x i8> %param, <8 x i8>* %p) {
+ %r = add <8 x i8> %param, <i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4>
+ store <8 x i8> %r, <8 x i8>* %p
+ ret void
+}
+
+; CHECK: func_2_32
+; CHECK: movq
+; CHECK-NEXT: ret
+define void @func_2_32(<2 x i32> %param, <2 x i32>* %p) {
+ %r = add <2 x i32> %param, <i32 1, i32 2>
+ store <2 x i32> %r, <2 x i32>* %p
+ ret void
+}
+
diff --git a/test/CodeGen/X86/or-address.ll b/test/CodeGen/X86/or-address.ll
index b3fc627..f866e41 100644
--- a/test/CodeGen/X86/or-address.ll
+++ b/test/CodeGen/X86/or-address.ll
@@ -47,10 +47,10 @@ return: ; preds = %bb
}
; CHECK: test1:
-; CHECK: movl %{{.*}}, (%rdi,%rcx,4)
-; CHECK: movl %{{.*}}, 8(%rdi,%rcx,4)
-; CHECK: movl %{{.*}}, 4(%rdi,%rcx,4)
-; CHECK: movl %{{.*}}, 12(%rdi,%rcx,4)
+; CHECK: movl %{{.*}}, (%[[RDI:...]],%[[RCX:...]],4)
+; CHECK: movl %{{.*}}, 8(%[[RDI]],%[[RCX]],4)
+; CHECK: movl %{{.*}}, 4(%[[RDI]],%[[RCX]],4)
+; CHECK: movl %{{.*}}, 12(%[[RDI]],%[[RCX]],4)
define void @test1(i32* nocapture %array, i32 %r0, i8 signext %k, i8 signext %i0) nounwind {
bb.nph:
diff --git a/test/CodeGen/X86/palignr.ll b/test/CodeGen/X86/palignr.ll
index 3812c72..6875fb3 100644
--- a/test/CodeGen/X86/palignr.ll
+++ b/test/CodeGen/X86/palignr.ll
@@ -1,7 +1,8 @@
-; RUN: llc < %s -march=x86 -mcpu=core2 | FileCheck %s
+; RUN: llc < %s -march=x86 -mcpu=core2 -mattr=+ssse3 | FileCheck %s
; RUN: llc < %s -march=x86 -mcpu=yonah | FileCheck --check-prefix=YONAH %s
define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) nounwind {
+; CHECK: test1:
; CHECK: pshufd
; CHECK-YONAH: pshufd
%C = shufflevector <4 x i32> %A, <4 x i32> undef, <4 x i32> < i32 1, i32 2, i32 3, i32 0 >
@@ -9,6 +10,7 @@ define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) nounwind {
}
define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) nounwind {
+; CHECK: test2:
; CHECK: palignr
; CHECK-YONAH: shufps
%C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 1, i32 2, i32 3, i32 4 >
@@ -16,43 +18,56 @@ define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) nounwind {
}
define <4 x i32> @test3(<4 x i32> %A, <4 x i32> %B) nounwind {
+; CHECK: test3:
; CHECK: palignr
%C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 1, i32 2, i32 undef, i32 4 >
ret <4 x i32> %C
}
define <4 x i32> @test4(<4 x i32> %A, <4 x i32> %B) nounwind {
+; CHECK: test4:
; CHECK: palignr
%C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 6, i32 7, i32 undef, i32 1 >
ret <4 x i32> %C
}
define <4 x float> @test5(<4 x float> %A, <4 x float> %B) nounwind {
+; CHECK: test5:
; CHECK: palignr
%C = shufflevector <4 x float> %A, <4 x float> %B, <4 x i32> < i32 6, i32 7, i32 undef, i32 1 >
ret <4 x float> %C
}
define <8 x i16> @test6(<8 x i16> %A, <8 x i16> %B) nounwind {
+; CHECK: test6:
; CHECK: palignr
%C = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 3, i32 4, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10 >
ret <8 x i16> %C
}
define <8 x i16> @test7(<8 x i16> %A, <8 x i16> %B) nounwind {
+; CHECK: test7:
; CHECK: palignr
%C = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 undef, i32 6, i32 undef, i32 8, i32 9, i32 10, i32 11, i32 12 >
ret <8 x i16> %C
}
-define <8 x i16> @test8(<8 x i16> %A, <8 x i16> %B) nounwind {
-; CHECK: palignr
- %C = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 undef, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0 >
- ret <8 x i16> %C
-}
-
-define <16 x i8> @test9(<16 x i8> %A, <16 x i8> %B) nounwind {
+define <16 x i8> @test8(<16 x i8> %A, <16 x i8> %B) nounwind {
+; CHECK: test8:
; CHECK: palignr
%C = shufflevector <16 x i8> %A, <16 x i8> %B, <16 x i32> < i32 5, i32 6, i32 7, i32 undef, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20 >
ret <16 x i8> %C
}
+
+; Check that we don't do unary (circular on single operand) palignr incorrectly.
+; (It is possible, but before this testcase was committed, it was being done
+; incorrectly. In particular, one of the operands of the palignr node
+; was an UNDEF.)
+define <8 x i16> @test9(<8 x i16> %A, <8 x i16> %B) nounwind {
+; CHECK: test9:
+; CHECK-NOT: palignr
+; CHECK: pshufb
+ %C = shufflevector <8 x i16> %B, <8 x i16> %A, <8 x i32> < i32 undef, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0 >
+ ret <8 x i16> %C
+}
+
diff --git a/test/CodeGen/X86/personality.ll b/test/CodeGen/X86/personality.ll
index d3d8e3f..51be7bc 100644
--- a/test/CodeGen/X86/personality.ll
+++ b/test/CodeGen/X86/personality.ll
@@ -8,6 +8,8 @@ entry:
to label %return unwind label %unwind
unwind: ; preds = %entry
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
br i1 false, label %eh_then, label %cleanup20
eh_then: ; preds = %unwind
@@ -15,7 +17,9 @@ eh_then: ; preds = %unwind
to label %return unwind label %unwind10
unwind10: ; preds = %eh_then
- %upgraded.eh_select13 = tail call i32 (i8*, i8*, ...)* @llvm.eh.selector(i8* null, i8* bitcast (void ()* @__gxx_personality_v0 to i8*), i32 1)
+ %exn10 = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
+ %upgraded.eh_select13 = extractvalue { i8*, i32 } %exn10, 1
%upgraded.eh_select131 = sext i32 %upgraded.eh_select13 to i64
%tmp18 = icmp slt i64 %upgraded.eh_select131, 0
br i1 %tmp18, label %filter, label %cleanup20
@@ -33,11 +37,9 @@ return: ; preds = %eh_then, %entry
declare void @_Z1gv()
-declare void @__gxx_personality_v0()
-
declare void @__cxa_end_catch()
-declare i32 @llvm.eh.selector(i8*, i8*, ...) nounwind
+declare i32 @__gxx_personality_v0(...)
; X64: zPLR
; X64: .byte 155
diff --git a/test/CodeGen/X86/pr10420.ll b/test/CodeGen/X86/pr10420.ll
new file mode 100644
index 0000000..3993f24
--- /dev/null
+++ b/test/CodeGen/X86/pr10420.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -mtriple=x86_64-apple-macosx -disable-cfi | FileCheck %s
+
+define private void @foo() {
+ ret void
+}
+
+define void @bar() {
+ call void @foo()
+ ret void;
+}
+
+; CHECK: _bar: ## @bar
+; CHECK-NEXT: Ltmp2:
+
+; CHECK: Ltmp12:
+; CHECK-NEXT: Ltmp13 = L_foo-Ltmp12 ## FDE initial location
+; CHECK-NEXT: .quad Ltmp13
+
+; CHECK: Ltmp19:
+; CHECK-NEXT: Ltmp20 = Ltmp2-Ltmp19 ## FDE initial location
+; CHECK-NEXT: .quad Ltmp20
diff --git a/test/CodeGen/X86/pr3495.ll b/test/CodeGen/X86/pr3495.ll
index c612a6e..7efd35b 100644
--- a/test/CodeGen/X86/pr3495.ll
+++ b/test/CodeGen/X86/pr3495.ll
@@ -1,7 +1,9 @@
-; RUN: llc < %s -march=x86 -stats -regalloc=linearscan |& grep {Number of loads added} | grep 2
-; RUN: llc < %s -march=x86 -stats -regalloc=linearscan |& grep {Number of register spills} | grep 1
-; RUN: llc < %s -march=x86 -stats -regalloc=linearscan |& grep {Number of machine instrs printed} | grep 34
+; RUN: llc < %s -march=x86 -stats -regalloc=linearscan -enable-lsr-nested |& grep {Number of loads added} | grep 2
+; RUN: llc < %s -march=x86 -stats -regalloc=linearscan -enable-lsr-nested |& grep {Number of spill slots allocated} | grep 1
+; RUN: llc < %s -march=x86 -stats -regalloc=linearscan -enable-lsr-nested |& grep {Number of machine instrs printed} | grep 34
; PR3495
+;
+; Note: this should not spill at all with either good LSR or good regalloc.
target triple = "i386-pc-linux-gnu"
@x = external global [8 x i32], align 32 ; <[8 x i32]*> [#uses=1]
diff --git a/test/CodeGen/X86/pr3522.ll b/test/CodeGen/X86/pr3522.ll
index da16237..1122530 100644
--- a/test/CodeGen/X86/pr3522.ll
+++ b/test/CodeGen/X86/pr3522.ll
@@ -21,6 +21,8 @@ return: ; preds = %lpad
ret void
lpad: ; preds = %entry
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
%2 = icmp eq i8 %1, 90 ; <i1> [#uses=1]
br i1 %2, label %return, label %bb22
}
@@ -28,3 +30,5 @@ lpad: ; preds = %entry
declare void @__gnat_rcheck_12(i8*, i32) noreturn
declare i32 @report__ident_int(i32)
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/ptr-rotate.ll b/test/CodeGen/X86/ptr-rotate.ll
new file mode 100644
index 0000000..6debd16
--- /dev/null
+++ b/test/CodeGen/X86/ptr-rotate.ll
@@ -0,0 +1,11 @@
+; RUN: llc -mtriple=i386-apple-darwin -o - < %s | FileCheck %s
+
+define i32 @func(i8* %A) nounwind readnone {
+entry:
+ %tmp = ptrtoint i8* %A to i32
+ %shr = lshr i32 %tmp, 5
+ %shl = shl i32 %tmp, 27
+ %or = or i32 %shr, %shl
+; CHECK: roll $27
+ ret i32 %or
+}
diff --git a/test/CodeGen/X86/scev-interchange.ll b/test/CodeGen/X86/scev-interchange.ll
index 81c919f..71a4d21 100644
--- a/test/CodeGen/X86/scev-interchange.ll
+++ b/test/CodeGen/X86/scev-interchange.ll
@@ -149,6 +149,8 @@ bb71.i: ; preds = %bb.i.i.i262.i, %bb66.i
to label %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i.i unwind label %lpad.i.i.i.i.i.i ; <i8*> [#uses=0]
lpad.i.i.i.i.i.i: ; preds = %bb71.i
+ %exn.i.i.i.i.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i.i: ; preds = %bb71.i
@@ -162,6 +164,8 @@ _ZNSt6vectorIjSaIjEED1Ev.exit.i.i: ; preds = %_ZNSt12_Vector_baseIjSaIjEEC2EmRK
to label %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i12.i.i unwind label %lpad.i.i.i.i8.i.i ; <i8*> [#uses=0]
lpad.i.i.i.i8.i.i: ; preds = %_ZNSt6vectorIjSaIjEED1Ev.exit.i.i
+ %exn.i.i.i.i8.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
invoke void @_Unwind_Resume(i8* undef)
to label %.noexc.i9.i.i unwind label %lpad.i19.i.i
@@ -179,6 +183,8 @@ bb50.i.i.i: ; preds = %bb.i.i.i.i.i.i.i.i.i.i, %_ZNSt12_Vector_baseIjSaIjEEC2Em
to label %bb83.i unwind label %lpad188.i
lpad.i19.i.i: ; preds = %lpad.i.i.i.i8.i.i
+ %exn.i19.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
bb83.i: ; preds = %bb50.i.i.i
@@ -192,6 +198,8 @@ invcont84.i: ; preds = %bb83.i
to label %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i unwind label %lpad.i.i.i.i315.i ; <i8*> [#uses=0]
lpad.i.i.i.i315.i: ; preds = %invcont84.i
+ %exn.i.i.i.i315.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
invoke void @_Unwind_Resume(i8* undef)
to label %.noexc.i316.i unwind label %lpad.i352.i
@@ -209,6 +217,8 @@ bb50.i.i: ; preds = %bb.i.i.i.i.i.i.i.i320.i, %_ZNSt12_Vector_baseIjSaIjEEC2EmR
to label %invcont86.i unwind label %lpad200.i
lpad.i352.i: ; preds = %lpad.i.i.i.i315.i
+ %exn.i352.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
invcont86.i: ; preds = %bb50.i.i
@@ -232,6 +242,8 @@ invcont101.i: ; preds = %bb100.i
to label %_ZN10FullMatrixIdEC1Ejj.exit.i.i unwind label %lpad.i.i.i.i.i
lpad.i.i.i.i.i: ; preds = %invcont101.i
+ %exn.i.i.i.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
_ZN10FullMatrixIdEC1Ejj.exit.i.i: ; preds = %invcont101.i
@@ -239,6 +251,8 @@ _ZN10FullMatrixIdEC1Ejj.exit.i.i: ; preds = %invcont101.i
to label %_ZN10FullMatrixIdEC1Ejj.exit28.i.i unwind label %lpad.i.i.i27.i.i
lpad.i.i.i27.i.i: ; preds = %_ZN10FullMatrixIdEC1Ejj.exit.i.i
+ %exn.i.i.i27.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
invoke void @_Unwind_Resume(i8* undef)
to label %.noexc.i.i unwind label %lpad.i.i
@@ -258,6 +272,8 @@ bb.i.i.i297.i.i: ; preds = %bb58.i.i
unreachable
lpad.i.i: ; preds = %lpad.i.i.i27.i.i
+ %exn.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
bb.i191.i: ; preds = %.noexc232.i, %bb58.i.i
@@ -296,43 +312,71 @@ bb29.loopexit.i.i: ; preds = %.noexc232.i
br label %bb9.i216.i
lpad.i: ; preds = %entry
+ %exn.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
lpad120.i: ; preds = %invcont.i
+ %exn120.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
lpad124.i: ; preds = %invcont1.i
+ %exn124.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
lpad128.i: ; preds = %invcont3.i
+ %exn128.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
lpad132.i: ; preds = %invcont4.i
+ %exn132.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
lpad136.i: ; preds = %invcont6.i
+ %exn136.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
lpad140.i: ; preds = %bb21.i, %invcont7.i
+ %exn140.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
lpad144.i: ; preds = %bb10.i168.i, %invcont9.i
+ %exn144.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
lpad148.i: ; preds = %invcont10.i
+ %exn148.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
lpad188.i: ; preds = %bb50.i.i.i
+ %exn188.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
lpad196.i: ; preds = %bb.i191.i
+ %exn196 = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
lpad200.i: ; preds = %bb50.i.i
+ %exn200.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
lpad204.i: ; preds = %invcont86.i
+ %exn204.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
}
declare fastcc void @_ZN11Polynomials19LagrangeEquidistant23generate_complete_basisEj(%"struct.std::vector<Polynomials::Polynomial<double>,std::allocator<Polynomials::Polynomial<double> > >"* noalias nocapture sret, i32)
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/segmented-stacks.ll b/test/CodeGen/X86/segmented-stacks.ll
new file mode 100644
index 0000000..ecdb00d
--- /dev/null
+++ b/test/CodeGen/X86/segmented-stacks.ll
@@ -0,0 +1,87 @@
+; RUN: llc < %s -mtriple=i686-linux -segmented-stacks | FileCheck %s -check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-linux -segmented-stacks | FileCheck %s -check-prefix=X64
+
+; Just to prevent the alloca from being optimized away
+declare void @dummy_use(i32*, i32)
+
+define i32 @test_basic(i32 %l) {
+ %mem = alloca i32, i32 %l
+ call void @dummy_use (i32* %mem, i32 %l)
+ %terminate = icmp eq i32 %l, 0
+ br i1 %terminate, label %true, label %false
+
+true:
+ ret i32 0
+
+false:
+ %newlen = sub i32 %l, 1
+ %retvalue = call i32 @test_basic(i32 %newlen)
+ ret i32 %retvalue
+
+; X32: test_basic:
+
+; X32: leal -12(%esp), %ecx
+; X32-NEXT: cmpl %gs:48, %ecx
+
+; X32: subl $8, %esp
+; X32-NEXT: pushl $4
+; X32-NEXT: pushl $12
+; X32-NEXT: calll __morestack
+; X32-NEXT: addl $8, %esp
+; X32-NEXT: ret
+
+; X32: movl %eax, %esp
+
+; X32: subl $12, %esp
+; X32-NEXT: pushl %ecx
+; X32-NEXT: calll __morestack_allocate_stack_space
+; X32-NEXT: addl $16, %esp
+
+; X64: test_basic:
+
+; X64: leaq -24(%rsp), %r11
+; X64-NEXT: cmpq %fs:112, %r11
+
+; X64: movabsq $24, %r10
+; X64-NEXT: movabsq $0, %r11
+; X64-NEXT: callq __morestack
+; X64-NEXT: ret
+
+; X64: movq %rsp, %rax
+; X64-NEXT: subq %rcx, %rax
+; X64-NEXT: cmpq %rax, %fs:112
+
+; X64: movq %rax, %rsp
+
+; X64: movq %rcx, %rdi
+; X64-NEXT: callq __morestack_allocate_stack_space
+
+}
+
+define i32 @test_nested(i32 * nest %closure, i32 %other) {
+ %addend = load i32 * %closure
+ %result = add i32 %other, %addend
+ ret i32 %result
+
+; X32: leal (%esp), %edx
+; X32-NEXT: cmpl %gs:48, %edx
+
+
+; X32: subl $8, %esp
+; X32-NEXT: pushl $4
+; X32-NEXT: pushl $0
+; X32-NEXT: calll __morestack
+; X32-NEXT: addl $8, %esp
+; X32-NEXT: ret
+
+; X64: leaq (%rsp), %r11
+; X64-NEXT: cmpq %fs:112, %r11
+
+; X64: movq %r10, %rax
+; X64-NEXT: movabsq $0, %r10
+; X64-NEXT: movabsq $0, %r11
+; X64-NEXT: callq __morestack
+; X64-NEXT: ret
+; X64: movq %rax, %r10
+
+}
diff --git a/test/CodeGen/X86/sfence.ll b/test/CodeGen/X86/sfence.ll
index 4782879..0c28407 100644
--- a/test/CodeGen/X86/sfence.ll
+++ b/test/CodeGen/X86/sfence.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -march=x86 -mattr=+sse2 | grep sfence
-declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1)
+declare void @llvm.x86.sse.sfence() nounwind
define void @test() {
- call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 true, i1 true)
- ret void
+ call void @llvm.x86.sse.sfence()
+ ret void
}
diff --git a/test/CodeGen/X86/sink-hoist.ll b/test/CodeGen/X86/sink-hoist.ll
index 31f41ee..e13a817 100644
--- a/test/CodeGen/X86/sink-hoist.ll
+++ b/test/CodeGen/X86/sink-hoist.ll
@@ -102,6 +102,7 @@ entry:
br label %bb60
bb: ; preds = %bb60
+ %i.0 = phi i32 [ 0, %bb60 ] ; <i32> [#uses=2]
%0 = bitcast float* %x_addr.0 to <4 x float>* ; <<4 x float>*> [#uses=1]
%1 = load <4 x float>* %0, align 16 ; <<4 x float>> [#uses=4]
%tmp20 = bitcast <4 x float> %1 to <4 x i32> ; <<4 x i32>> [#uses=1]
@@ -129,15 +130,14 @@ bb: ; preds = %bb60
%5 = getelementptr float* %x_addr.0, i64 4 ; <float*> [#uses=1]
%6 = getelementptr float* %y_addr.0, i64 4 ; <float*> [#uses=1]
%7 = add i32 %i.0, 4 ; <i32> [#uses=1]
- br label %bb60
+ %8 = load i32* %n, align 4 ; <i32> [#uses=1]
+ %9 = icmp sgt i32 %8, %7 ; <i1> [#uses=1]
+ br i1 %9, label %bb60, label %return
bb60: ; preds = %bb, %entry
- %i.0 = phi i32 [ 0, %entry ], [ %7, %bb ] ; <i32> [#uses=2]
%x_addr.0 = phi float* [ %x, %entry ], [ %5, %bb ] ; <float*> [#uses=2]
%y_addr.0 = phi float* [ %y, %entry ], [ %6, %bb ] ; <float*> [#uses=2]
- %8 = load i32* %n, align 4 ; <i32> [#uses=1]
- %9 = icmp sgt i32 %8, %i.0 ; <i1> [#uses=1]
- br i1 %9, label %bb, label %return
+ br label %bb
return: ; preds = %bb60
ret void
diff --git a/test/CodeGen/X86/split-eh-lpad-edges.ll b/test/CodeGen/X86/split-eh-lpad-edges.ll
index fd40a7f..756a3dd 100644
--- a/test/CodeGen/X86/split-eh-lpad-edges.ll
+++ b/test/CodeGen/X86/split-eh-lpad-edges.ll
@@ -28,7 +28,11 @@ invcont27: ; preds = %invcont26
lpad: ; preds = %invcont26, %invcont, %entry
%pool.1 = phi %struct.NSAutoreleasePool* [ null, %entry ], [ null, %invcont ], [ null, %invcont26 ] ; <%struct.NSAutoreleasePool*> [#uses=0]
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
unreachable
}
declare %struct.NSObject* @objc_msgSend(%struct.NSObject*, %struct.objc_selector*, ...)
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/split-vector-bitcast.ll b/test/CodeGen/X86/split-vector-bitcast.ll
new file mode 100644
index 0000000..fae15cf
--- /dev/null
+++ b/test/CodeGen/X86/split-vector-bitcast.ll
@@ -0,0 +1,12 @@
+; RUN: llc < %s -march=x86 -mattr=-sse2,+sse | grep addps
+
+; PR10497 + another isel issue with sse2 disabled
+; (This is primarily checking that this construct doesn't crash.)
+define void @a(<2 x float>* %a, <2 x i32>* %b) {
+ %cc = load <2 x float>* %a
+ %c = fadd <2 x float> %cc, %cc
+ %dd = bitcast <2 x float> %c to <2 x i32>
+ %d = add <2 x i32> %dd, %dd
+ store <2 x i32> %d, <2 x i32>* %b
+ ret void
+}
diff --git a/test/CodeGen/X86/sse-minmax.ll b/test/CodeGen/X86/sse-minmax.ll
index ff0af25..af1a73b 100644
--- a/test/CodeGen/X86/sse-minmax.ll
+++ b/test/CodeGen/X86/sse-minmax.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=x86-64 -asm-verbose=false -join-physregs | FileCheck %s
-; RUN: llc < %s -march=x86-64 -asm-verbose=false -join-physregs -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck -check-prefix=UNSAFE %s
-; RUN: llc < %s -march=x86-64 -asm-verbose=false -join-physregs -enable-no-nans-fp-math | FileCheck -check-prefix=FINITE %s
+; RUN: llc < %s -march=x86-64 -asm-verbose=false -join-physregs -promote-elements | FileCheck %s
+; RUN: llc < %s -march=x86-64 -asm-verbose=false -join-physregs -enable-unsafe-fp-math -enable-no-nans-fp-math -promote-elements | FileCheck -check-prefix=UNSAFE %s
+; RUN: llc < %s -march=x86-64 -asm-verbose=false -join-physregs -enable-no-nans-fp-math -promote-elements | FileCheck -check-prefix=FINITE %s
; Some of these patterns can be matched as SSE min or max. Some of
; then can be matched provided that the operands are swapped.
@@ -933,3 +933,35 @@ entry:
%x_addr.0 = select i1 %0, double 3.000000e+03, double %x ; <double> [#uses=1]
ret double %x_addr.0
}
+
+; UNSAFE: maxpd:
+; UNSAFE: maxpd
+define <2 x double> @maxpd(<2 x double> %x, <2 x double> %y) {
+ %max_is_x = fcmp oge <2 x double> %x, %y
+ %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %max
+}
+
+; UNSAFE: minpd:
+; UNSAFE: minpd
+define <2 x double> @minpd(<2 x double> %x, <2 x double> %y) {
+ %min_is_x = fcmp ole <2 x double> %x, %y
+ %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %min
+}
+
+; UNSAFE: maxps:
+; UNSAFE: maxps
+define <4 x float> @maxps(<4 x float> %x, <4 x float> %y) {
+ %max_is_x = fcmp oge <4 x float> %x, %y
+ %max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %max
+}
+
+; UNSAFE: minps:
+; UNSAFE: minps
+define <4 x float> @minps(<4 x float> %x, <4 x float> %y) {
+ %min_is_x = fcmp ole <4 x float> %x, %y
+ %min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %min
+}
diff --git a/test/CodeGen/X86/sse2-blend.ll b/test/CodeGen/X86/sse2-blend.ll
new file mode 100644
index 0000000..56b099e
--- /dev/null
+++ b/test/CodeGen/X86/sse2-blend.ll
@@ -0,0 +1,55 @@
+; RUN: llc < %s -march=x86 -mcpu=yonah -promote-elements -mattr=+sse2,-sse41 | FileCheck %s
+
+
+; currently (xor v4i32) is defined as illegal, so we scalarize the code.
+
+define void@vsel_float(<4 x float>* %v1, <4 x float>* %v2) {
+ %A = load <4 x float>* %v1
+ %B = load <4 x float>* %v2
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %A, <4 x float> %B
+ store <4 x float > %vsel, <4 x float>* %v1
+ ret void
+}
+
+; currently (xor v4i32) is defined as illegal, so we scalarize the code.
+
+define void@vsel_i32(<4 x i32>* %v1, <4 x i32>* %v2) {
+ %A = load <4 x i32>* %v1
+ %B = load <4 x i32>* %v2
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> %A, <4 x i32> %B
+ store <4 x i32 > %vsel, <4 x i32>* %v1
+ ret void
+}
+
+; CHECK: vsel_i64
+; CHECK: pxor
+; CHECK: pand
+; CHECK: andnps
+; CHECK: orps
+; CHECK: ret
+
+define void@vsel_i64(<4 x i64>* %v1, <4 x i64>* %v2) {
+ %A = load <4 x i64>* %v1
+ %B = load <4 x i64>* %v2
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> %A, <4 x i64> %B
+ store <4 x i64 > %vsel, <4 x i64>* %v1
+ ret void
+}
+
+; CHECK: vsel_double
+; CHECK: pxor
+; CHECK: pand
+; CHECK: andnps
+; CHECK: orps
+; CHECK: ret
+
+
+define void@vsel_double(<4 x double>* %v1, <4 x double>* %v2) {
+ %A = load <4 x double>* %v1
+ %B = load <4 x double>* %v2
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x double> %A, <4 x double> %B
+ store <4 x double > %vsel, <4 x double>* %v1
+ ret void
+}
+
+
diff --git a/test/CodeGen/X86/sse41-blend.ll b/test/CodeGen/X86/sse41-blend.ll
new file mode 100644
index 0000000..78604a0
--- /dev/null
+++ b/test/CodeGen/X86/sse41-blend.ll
@@ -0,0 +1,82 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -promote-elements -mattr=+sse41 | FileCheck %s
+
+;CHECK: vsel_float
+;CHECK: blendvps
+;CHECK: ret
+define <4 x float> @vsel_float(<4 x float> %v1, <4 x float> %v2) {
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %v1, <4 x float> %v2
+ ret <4 x float> %vsel
+}
+
+
+;CHECK: vsel_4xi8
+;CHECK: blendvps
+;CHECK: ret
+define <4 x i8> @vsel_4xi8(<4 x i8> %v1, <4 x i8> %v2) {
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i8> %v1, <4 x i8> %v2
+ ret <4 x i8> %vsel
+}
+
+;CHECK: vsel_4xi16
+;CHECK: blendvps
+;CHECK: ret
+define <4 x i16> @vsel_4xi16(<4 x i16> %v1, <4 x i16> %v2) {
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i16> %v1, <4 x i16> %v2
+ ret <4 x i16> %vsel
+}
+
+
+;CHECK: vsel_i32
+;CHECK: blendvps
+;CHECK: ret
+define <4 x i32> @vsel_i32(<4 x i32> %v1, <4 x i32> %v2) {
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> %v1, <4 x i32> %v2
+ ret <4 x i32> %vsel
+}
+
+
+;CHECK: vsel_double
+;CHECK: blendvpd
+;CHECK: ret
+define <4 x double> @vsel_double(<4 x double> %v1, <4 x double> %v2) {
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x double> %v1, <4 x double> %v2
+ ret <4 x double> %vsel
+}
+
+
+;CHECK: vsel_i64
+;CHECK: blendvpd
+;CHECK: ret
+define <4 x i64> @vsel_i64(<4 x i64> %v1, <4 x i64> %v2) {
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> %v1, <4 x i64> %v2
+ ret <4 x i64> %vsel
+}
+
+
+;CHECK: vsel_i8
+;CHECK: pblendvb
+;CHECK: ret
+define <16 x i8> @vsel_i8(<16 x i8> %v1, <16 x i8> %v2) {
+ %vsel = select <16 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <16 x i8> %v1, <16 x i8> %v2
+ ret <16 x i8> %vsel
+}
+
+;; TEST blend + compares
+; CHECK: A
+define <2 x double> @A(<2 x double> %x, <2 x double> %y) {
+ ; CHECK: cmplepd
+ ; CHECK: blendvpd
+ %max_is_x = fcmp oge <2 x double> %x, %y
+ %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %max
+}
+
+; CHECK: B
+define <2 x double> @B(<2 x double> %x, <2 x double> %y) {
+ ; CHECK: cmpnlepd
+ ; CHECK: blendvpd
+ %min_is_x = fcmp ult <2 x double> %x, %y
+ %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %min
+}
+
diff --git a/test/CodeGen/X86/sub.ll b/test/CodeGen/X86/sub.ll
new file mode 100644
index 0000000..ee5ea1d
--- /dev/null
+++ b/test/CodeGen/X86/sub.ll
@@ -0,0 +1,11 @@
+; RUN: llc -march=x86 < %s | FileCheck %s
+
+define i32 @test1(i32 %x) {
+ %xor = xor i32 %x, 31
+ %sub = sub i32 32, %xor
+ ret i32 %sub
+; CHECK: test1:
+; CHECK: xorl $-32
+; CHECK-NEXT: addl $33
+; CHECK-NEXT: ret
+}
diff --git a/test/CodeGen/X86/tail-call-got.ll b/test/CodeGen/X86/tail-call-got.ll
new file mode 100644
index 0000000..1d7eb2e
--- /dev/null
+++ b/test/CodeGen/X86/tail-call-got.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s -relocation-model=pic -mattr=+sse2 | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
+target triple = "i386-unknown-freebsd9.0"
+
+define double @test1(double %x) nounwind readnone {
+; CHECK: test1:
+; CHECK: movl foo@GOT
+; CHECK-NEXT: jmpl
+ %1 = tail call double @foo(double %x) nounwind readnone
+ ret double %1
+}
+
+declare double @foo(double) readnone
+
+define double @test2(double %x) nounwind readnone {
+; CHECK: test2:
+; CHECK: movl sin@GOT
+; CHECK-NEXT: jmpl
+ %1 = tail call double @sin(double %x) nounwind readnone
+ ret double %1
+}
+
+declare double @sin(double) readnone
diff --git a/test/CodeGen/X86/tlv-1.ll b/test/CodeGen/X86/tlv-1.ll
index 5773260..92dac30 100644
--- a/test/CodeGen/X86/tlv-1.ll
+++ b/test/CodeGen/X86/tlv-1.ll
@@ -5,6 +5,7 @@
@c = external thread_local global %struct.A, align 4
define void @main() nounwind ssp {
+; CHECK: main:
entry:
call void @llvm.memset.p0i8.i64(i8* getelementptr inbounds (%struct.A* @c, i32 0, i32 0, i32 0), i8 0, i64 60, i32 1, i1 false)
unreachable
@@ -14,6 +15,22 @@ entry:
; CHECK-NEXT: movq $0, 48(%rax)
}
+; rdar://10291355
+define i32 @test() nounwind readonly ssp {
+entry:
+; CHECK: test:
+; CHECK: movq _a@TLVP(%rip),
+; CHECK: callq *
+; CHECK: movl (%rax), [[REGISTER:%[a-z]+]]
+; CHECK: movq _b@TLVP(%rip),
+; CHECK: callq *
+; CHECK: subl (%rax), [[REGISTER]]
+ %0 = load i32* @a, align 4
+ %1 = load i32* @b, align 4
+ %sub = sub nsw i32 %0, %1
+ ret i32 %sub
+}
+
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
@a = thread_local global i32 0 ; <i32*> [#uses=0]
diff --git a/test/CodeGen/X86/trunc-ext-ld-st.ll b/test/CodeGen/X86/trunc-ext-ld-st.ll
new file mode 100644
index 0000000..57d6e97
--- /dev/null
+++ b/test/CodeGen/X86/trunc-ext-ld-st.ll
@@ -0,0 +1,82 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 -promote-elements -mattr=+sse41 | FileCheck %s
+
+;CHECK: load_2_i8
+; A single 16-bit load
+;CHECK: movzwl
+;CHECK: pshufb
+;CHECK: paddq
+;CHECK: pshufb
+; A single 16-bit store
+;CHECK: movw
+;CHECK: ret
+
+define void @load_2_i8(<2 x i8>* %A) {
+ %T = load <2 x i8>* %A
+ %G = add <2 x i8> %T, <i8 9, i8 7>
+ store <2 x i8> %G, <2 x i8>* %A
+ ret void
+}
+
+;CHECK: load_2_i16
+; Read 32-bits
+;CHECK: movd
+;CHECK: pshufb
+;CHECK: paddq
+;CHECK: pshufb
+;CHECK: movd
+;CHECK: ret
+define void @load_2_i16(<2 x i16>* %A) {
+ %T = load <2 x i16>* %A
+ %G = add <2 x i16> %T, <i16 9, i16 7>
+ store <2 x i16> %G, <2 x i16>* %A
+ ret void
+}
+
+;CHECK: load_2_i32
+;CHECK: pshufd
+;CHECK: paddq
+;CHECK: pshufd
+;CHECK: ret
+define void @load_2_i32(<2 x i32>* %A) {
+ %T = load <2 x i32>* %A
+ %G = add <2 x i32> %T, <i32 9, i32 7>
+ store <2 x i32> %G, <2 x i32>* %A
+ ret void
+}
+
+;CHECK: load_4_i8
+;CHECK: movd
+;CHECK: pshufb
+;CHECK: paddd
+;CHECK: pshufb
+;CHECK: ret
+define void @load_4_i8(<4 x i8>* %A) {
+ %T = load <4 x i8>* %A
+ %G = add <4 x i8> %T, <i8 1, i8 4, i8 9, i8 7>
+ store <4 x i8> %G, <4 x i8>* %A
+ ret void
+}
+
+;CHECK: load_4_i16
+;CHECK: punpcklwd
+;CHECK: paddd
+;CHECK: pshufb
+;CHECK: ret
+define void @load_4_i16(<4 x i16>* %A) {
+ %T = load <4 x i16>* %A
+ %G = add <4 x i16> %T, <i16 1, i16 4, i16 9, i16 7>
+ store <4 x i16> %G, <4 x i16>* %A
+ ret void
+}
+
+;CHECK: load_8_i8
+;CHECK: punpcklbw
+;CHECK: paddw
+;CHECK: pshufb
+;CHECK: ret
+define void @load_8_i8(<8 x i8>* %A) {
+ %T = load <8 x i8>* %A
+ %G = add <8 x i8> %T, %T
+ store <8 x i8> %G, <8 x i8>* %A
+ ret void
+}
diff --git a/test/CodeGen/X86/twoaddr-sink-terminator.ll b/test/CodeGen/X86/twoaddr-sink-terminator.ll
new file mode 100644
index 0000000..209d474
--- /dev/null
+++ b/test/CodeGen/X86/twoaddr-sink-terminator.ll
@@ -0,0 +1,43 @@
+; RUN: llc < %s -verify-coalescing
+; PR10998
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
+target triple = "i386-unknown-freebsd8.2"
+
+define void @test(i32 %arg1) nounwind align 2 {
+bb11:
+ %tmp13 = and i32 %arg1, 7
+ %tmp14 = add i32 %tmp13, -5
+ switch i32 %tmp13, label %bb18 [
+ i32 0, label %bb21
+ i32 4, label %bb22
+ i32 3, label %bb21
+ i32 2, label %bb19
+ ]
+
+bb18:
+ %tmp202 = call i32 @f() nounwind
+ unreachable
+
+bb19:
+ %tmp20 = call i32 @f() nounwind
+ br label %bb24
+
+bb21:
+ %tmp203 = call i32 @f() nounwind
+ br label %bb24
+
+bb22:
+ %tmp23 = call i32 @f() nounwind
+ br label %bb24
+
+bb24:
+ %tmp15 = icmp ult i32 %tmp14, 2
+ %tmp55 = select i1 %tmp15, i32 45, i32 44
+ %tmp56 = call i32 @f2(i32 %tmp55)
+ unreachable
+}
+
+declare i32 @f()
+
+declare i32 @f2(i32)
diff --git a/test/CodeGen/X86/uint64-to-float.ll b/test/CodeGen/X86/uint64-to-float.ll
index d9f753c..1dbbdcf 100644
--- a/test/CodeGen/X86/uint64-to-float.ll
+++ b/test/CodeGen/X86/uint64-to-float.ll
@@ -6,12 +6,37 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin10.0.0"
-; CHECK: testq %rdi, %rdi
-; CHECK-NEXT: jns LBB0_2
+; FIXME: This test could generate this code:
+;
+; ## BB#0: ## %entry
+; testq %rdi, %rdi
+; jns LBB0_2
+; ## BB#1:
+; movq %rdi, %rax
+; shrq %rax
+; andq $1, %rdi
+; orq %rax, %rdi
+; cvtsi2ssq %rdi, %xmm0
+; addss %xmm0, %xmm0
+; ret
+; LBB0_2: ## %entry
+; cvtsi2ssq %rdi, %xmm0
+; ret
+;
+; The blocks come from lowering:
+;
+; %vreg7<def> = CMOV_FR32 %vreg6<kill>, %vreg5<kill>, 15, %EFLAGS<imp-use>; FR32:%vreg7,%vreg6,%vreg5
+;
+; If the instruction had an EFLAGS<kill> flag, it wouldn't need to mark EFLAGS
+; as live-in on the new blocks, and machine sinking would be able to sink
+; everything below the test.
+
; CHECK: shrq
-; CHECK-NEXT: andq
+; CHECK: andq
; CHECK-NEXT: orq
-; CHECK-NEXT: cvtsi2ss
+; CHECK: testq %rdi, %rdi
+; CHECK-NEXT: jns LBB0_2
+; CHECK: cvtsi2ss
; CHECK: LBB0_2
; CHECK-NEXT: cvtsi2ss
define float @test(i64 %a) {
diff --git a/test/CodeGen/X86/uint_to_fp-2.ll b/test/CodeGen/X86/uint_to_fp-2.ll
index da5105d..7536fb8 100644
--- a/test/CodeGen/X86/uint_to_fp-2.ll
+++ b/test/CodeGen/X86/uint_to_fp-2.ll
@@ -1,8 +1,33 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movsd | count 1
-; rdar://6504833
+; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
-define float @f(i32 %x) nounwind readnone {
+; rdar://6504833
+define float @test1(i32 %x) nounwind readnone {
+; CHECK: test1
+; CHECK: movd
+; CHECK: orpd
+; CHECK: subsd
+; CHECK: cvtsd2ss
+; CHECK: movss
+; CHECK: flds
+; CHECK: ret
entry:
%0 = uitofp i32 %x to float
ret float %0
}
+
+; PR10802
+define float @test2(<4 x i32> %x) nounwind readnone ssp {
+; CHECK: test2
+; CHECK: xorps [[ZERO:%xmm[0-9]+]]
+; CHECK: movss {{.*}}, [[ZERO]]
+; CHECK: orps
+; CHECK: subsd
+; CHECK: cvtsd2ss
+; CHECK: movss
+; CHECK: flds
+; CHECK: ret
+entry:
+ %vecext = extractelement <4 x i32> %x, i32 0
+ %conv = uitofp i32 %vecext to float
+ ret float %conv
+}
diff --git a/test/CodeGen/X86/v2f32.ll b/test/CodeGen/X86/v2f32.ll
index 6d14099..ba54833 100644
--- a/test/CodeGen/X86/v2f32.ll
+++ b/test/CodeGen/X86/v2f32.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -mtriple=x86_64-linux -mcpu=penryn -asm-verbose=0 -o - | FileCheck %s -check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=penryn -asm-verbose=0 -o - | FileCheck %s -check-prefix=W64
-; RUN: llc < %s -mcpu=yonah -march=x86 -asm-verbose=0 -o - | FileCheck %s -check-prefix=X32
+; RUN: llc < %s -mcpu=yonah -march=x86 -mtriple=i386-linux-gnu -asm-verbose=0 -o - | FileCheck %s -check-prefix=X32
; PR7518
define void @test1(<2 x float> %Q, float *%P2) nounwind {
diff --git a/test/CodeGen/X86/vec_compare-sse4.ll b/test/CodeGen/X86/vec_compare-sse4.ll
new file mode 100644
index 0000000..b4a4a4c
--- /dev/null
+++ b/test/CodeGen/X86/vec_compare-sse4.ll
@@ -0,0 +1,35 @@
+; RUN: llc < %s -march=x86 -mattr=-sse3,+sse2 | FileCheck %s -check-prefix=SSE2
+; RUN: llc < %s -march=x86 -mattr=-sse42,+sse41 | FileCheck %s -check-prefix=SSE41
+; RUN: llc < %s -march=x86 -mattr=+sse42 | FileCheck %s -check-prefix=SSE42
+
+define <2 x i64> @test1(<2 x i64> %A, <2 x i64> %B) nounwind {
+; SSE42: test1:
+; SSE42: pcmpgtq
+; SSE42: ret
+; SSE41: test1:
+; SSE41-NOT: pcmpgtq
+; SSE41: ret
+; SSE2: test1:
+; SSE2-NOT: pcmpgtq
+; SSE2: ret
+
+ %C = icmp sgt <2 x i64> %A, %B
+ %D = sext <2 x i1> %C to <2 x i64>
+ ret <2 x i64> %D
+}
+
+define <2 x i64> @test2(<2 x i64> %A, <2 x i64> %B) nounwind {
+; SSE42: test2:
+; SSE42: pcmpeqq
+; SSE42: ret
+; SSE41: test2:
+; SSE41: pcmpeqq
+; SSE41: ret
+; SSE2: test2:
+; SSE2-NOT: pcmpeqq
+; SSE2: ret
+
+ %C = icmp eq <2 x i64> %A, %B
+ %D = sext <2 x i1> %C to <2 x i64>
+ ret <2 x i64> %D
+}
diff --git a/test/CodeGen/X86/vec_set-C.ll b/test/CodeGen/X86/vec_set-C.ll
index 7636ac3..133f23b 100644
--- a/test/CodeGen/X86/vec_set-C.ll
+++ b/test/CodeGen/X86/vec_set-C.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movq
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep mov | count 1
-; RUN: llc < %s -march=x86-64 -mattr=+sse2 | grep movd
+; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -mattr=+sse2 | grep movq
+; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -mattr=+sse2 | grep mov | count 1
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-pc-linux -mattr=+sse2 | grep movd
define <2 x i64> @t1(i64 %x) nounwind {
%tmp8 = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
diff --git a/test/CodeGen/X86/vec_shuffle-37.ll b/test/CodeGen/X86/vec_shuffle-37.ll
index 2efdb14..950040a 100644
--- a/test/CodeGen/X86/vec_shuffle-37.ll
+++ b/test/CodeGen/X86/vec_shuffle-37.ll
@@ -5,8 +5,8 @@
define <4 x i32> @t00(<4 x i32>* %a0) nounwind ssp {
entry:
; CHECK: movaps ({{%rdi|%rcx}}), %xmm0
-; CHECK-NEXT: movaps %xmm0, %xmm1
-; CHECK-NEXT: movlps (%rax), %xmm1
+; CHECK: movaps %xmm0, %xmm1
+; CHECK-NEXT: movss %xmm2, %xmm1
; CHECK-NEXT: shufps $36, %xmm1, %xmm0
%0 = load <4 x i32>* undef, align 16
%1 = load <4 x i32>* %a0, align 16
@@ -23,3 +23,23 @@ entry:
store <2 x double> %vecinit94, <2 x double>* undef
ret void
}
+
+define void @t02(<8 x i32>* %source, <2 x i32>* %dest) nounwind noinline {
+entry:
+; CHECK: movaps 32({{%rdi|%rcx}}), %xmm0
+; CHECK-NEXT: movaps 48({{%rdi|%rcx}}), %xmm1
+; CHECK-NEXT: movss %xmm1, %xmm0
+; CHECK-NEXT: movq %xmm0, ({{%rsi|%rdx}})
+ %0 = bitcast <8 x i32>* %source to <4 x i32>*
+ %arrayidx = getelementptr inbounds <4 x i32>* %0, i64 3
+ %tmp2 = load <4 x i32>* %arrayidx, align 16
+ %tmp3 = extractelement <4 x i32> %tmp2, i32 0
+ %tmp5 = insertelement <2 x i32> <i32 undef, i32 0>, i32 %tmp3, i32 0
+ %arrayidx7 = getelementptr inbounds <8 x i32>* %source, i64 1
+ %1 = bitcast <8 x i32>* %arrayidx7 to <4 x i32>*
+ %tmp8 = load <4 x i32>* %1, align 16
+ %tmp9 = extractelement <4 x i32> %tmp8, i32 1
+ %tmp11 = insertelement <2 x i32> %tmp5, i32 %tmp9, i32 1
+ store <2 x i32> %tmp11, <2 x i32>* %dest, align 8
+ ret void
+}
diff --git a/test/CodeGen/X86/vec_shuffle-38.ll b/test/CodeGen/X86/vec_shuffle-38.ll
new file mode 100644
index 0000000..69a2ede
--- /dev/null
+++ b/test/CodeGen/X86/vec_shuffle-38.ll
@@ -0,0 +1,59 @@
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+
+define <2 x double> @ld(<2 x double> %p) nounwind optsize ssp {
+; CHECK: unpcklpd
+ %shuffle = shufflevector <2 x double> %p, <2 x double> undef, <2 x i32> zeroinitializer
+ ret <2 x double> %shuffle
+}
+
+define <2 x double> @hd(<2 x double> %p) nounwind optsize ssp {
+; CHECK: unpckhpd
+ %shuffle = shufflevector <2 x double> %p, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+ ret <2 x double> %shuffle
+}
+
+define <2 x i64> @ldi(<2 x i64> %p) nounwind optsize ssp {
+; CHECK: punpcklqdq
+ %shuffle = shufflevector <2 x i64> %p, <2 x i64> undef, <2 x i32> zeroinitializer
+ ret <2 x i64> %shuffle
+}
+
+define <2 x i64> @hdi(<2 x i64> %p) nounwind optsize ssp {
+; CHECK: punpckhqdq
+ %shuffle = shufflevector <2 x i64> %p, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
+ ret <2 x i64> %shuffle
+}
+
+; rdar://10050549
+%struct.Float2 = type { float, float }
+
+define <4 x float> @loadhpi(%struct.Float2* %vPtr, <4 x float> %vecin1) nounwind readonly ssp {
+entry:
+; CHECK: loadhpi
+; CHECK-NOT: movq
+; CHECK: movhps (
+ %tmp1 = bitcast %struct.Float2* %vPtr to <1 x i64>*
+ %addptr7 = getelementptr inbounds <1 x i64>* %tmp1, i64 0
+ %tmp2 = bitcast <1 x i64>* %addptr7 to float*
+ %tmp3 = load float* %tmp2, align 4
+ %vec = insertelement <4 x float> undef, float %tmp3, i32 0
+ %addptr.i12 = getelementptr inbounds float* %tmp2, i64 1
+ %tmp4 = load float* %addptr.i12, align 4
+ %vecin2 = insertelement <4 x float> %vec, float %tmp4, i32 1
+ %shuffle = shufflevector <4 x float> %vecin1, <4 x float> %vecin2, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+ ret <4 x float> %shuffle
+}
+
+; rdar://10119696
+; CHECK: f
+define <4 x float> @f(<4 x float> %x, double* nocapture %y) nounwind uwtable readonly ssp {
+entry:
+ ; CHECK: movsd (%
+ ; CHECK-NEXT: movsd %xmm
+ %u110.i = load double* %y, align 1
+ %tmp8.i = insertelement <2 x double> undef, double %u110.i, i32 0
+ %tmp9.i = bitcast <2 x double> %tmp8.i to <4 x float>
+ %shuffle.i = shufflevector <4 x float> %x, <4 x float> %tmp9.i, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
+ ret <4 x float> %shuffle.i
+}
+
diff --git a/test/CodeGen/X86/widen_shuffle-1.ll b/test/CodeGen/X86/widen_shuffle-1.ll
index 034c42c..8e951b7 100644
--- a/test/CodeGen/X86/widen_shuffle-1.ll
+++ b/test/CodeGen/X86/widen_shuffle-1.ll
@@ -30,6 +30,7 @@ entry:
; opA with opB, the DAG will produce new operations with opA.
define void @shuf3(<4 x float> %tmp10, <4 x float> %vecinit15, <4 x float>* %dst) nounwind {
entry:
+; CHECK: shuf3:
; CHECK: pshufd
%shuffle.i.i.i12 = shufflevector <4 x float> %tmp10, <4 x float> %vecinit15, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
%tmp25.i.i = shufflevector <4 x float> %shuffle.i.i.i12, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
@@ -46,3 +47,10 @@ entry:
ret void
}
+; PR10421: make sure we correctly handle extreme widening with CONCAT_VECTORS
+define <8 x i8> @shuf4(<4 x i8> %a, <4 x i8> %b) nounwind readnone {
+; CHECK: shuf4:
+; CHECK: punpckldq
+ %vshuf = shufflevector <4 x i8> %a, <4 x i8> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i8> %vshuf
+}
diff --git a/test/CodeGen/XCore/2011-08-01-DynamicAllocBug.ll b/test/CodeGen/XCore/2011-08-01-DynamicAllocBug.ll
new file mode 100644
index 0000000..7d6d7ba
--- /dev/null
+++ b/test/CodeGen/XCore/2011-08-01-DynamicAllocBug.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=xcore | FileCheck %s
+
+declare void @g()
+declare i8* @llvm.stacksave() nounwind
+declare void @llvm.stackrestore(i8*) nounwind
+
+define void @f(i32** %p, i32 %size) {
+allocas:
+ %0 = call i8* @llvm.stacksave()
+ %a = alloca i32, i32 %size
+ store i32* %a, i32** %p
+ call void @g()
+ call void @llvm.stackrestore(i8* %0)
+ ret void
+}
+; CHECK: f:
+; CHECK: ldaw [[REGISTER:r[0-9]+]], {{r[0-9]+}}[-r1]
+; CHECK: set sp, [[REGISTER]]
+; CHECK extsp 1
+; CHECK bl g
diff --git a/test/CodeGen/XCore/2011-08-01-VarargsBug.ll b/test/CodeGen/XCore/2011-08-01-VarargsBug.ll
new file mode 100644
index 0000000..2076057
--- /dev/null
+++ b/test/CodeGen/XCore/2011-08-01-VarargsBug.ll
@@ -0,0 +1,17 @@
+; RUN: llc < %s -march=xcore | FileCheck %s
+define void @_Z1fz(...) {
+entry:
+; CHECK: _Z1fz:
+; CHECK: extsp 3
+; CHECK: stw r[[REG:[0-3]{1,1}]]
+; CHECK: , sp{{\[}}[[REG]]{{\]}}
+; CHECK: stw r[[REG:[0-3]{1,1}]]
+; CHECK: , sp{{\[}}[[REG]]{{\]}}
+; CHECK: stw r[[REG:[0-3]{1,1}]]
+; CHECK: , sp{{\[}}[[REG]]{{\]}}
+; CHECK: stw r[[REG:[0-3]{1,1}]]
+; CHECK: , sp{{\[}}[[REG]]{{\]}}
+; CHECK: ldaw sp, sp[3]
+; CHECK: retsp 0
+ ret void
+}
diff --git a/test/CodeGen/XCore/licm-ldwcp.ll b/test/CodeGen/XCore/licm-ldwcp.ll
new file mode 100644
index 0000000..4884f70
--- /dev/null
+++ b/test/CodeGen/XCore/licm-ldwcp.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -march=xcore -asm-verbose=0 | FileCheck %s
+
+; MachineLICM should hoist the LDWCP out of the loop.
+
+; CHECK: f:
+; CHECK-NEXT: ldw [[REG:r[0-9]+]], cp[.LCPI0_0]
+; CHECK-NEXT: .LBB0_1:
+; CHECK-NEXT: stw [[REG]], r0[0]
+; CHECK-NEXT: bu .LBB0_1
+
+define void @f(i32* nocapture %p) noreturn nounwind {
+entry:
+ br label %bb
+
+bb: ; preds = %bb, %entry
+ volatile store i32 525509670, i32* %p, align 4
+ br label %bb
+}
diff --git a/test/CodeGen/XCore/misc-intrinsics.ll b/test/CodeGen/XCore/misc-intrinsics.ll
index f504a2e..6d39d77 100644
--- a/test/CodeGen/XCore/misc-intrinsics.ll
+++ b/test/CodeGen/XCore/misc-intrinsics.ll
@@ -4,6 +4,10 @@
declare i32 @llvm.xcore.bitrev(i32)
declare i32 @llvm.xcore.crc32(i32, i32, i32)
declare %0 @llvm.xcore.crc8(i32, i32, i32)
+declare i32 @llvm.xcore.zext(i32, i32)
+declare i32 @llvm.xcore.sext(i32, i32)
+declare i32 @llvm.xcore.geted()
+declare i32 @llvm.xcore.getet()
define i32 @bitrev(i32 %val) {
; CHECK: bitrev:
@@ -25,3 +29,47 @@ define %0 @crc8(i32 %crc, i32 %data, i32 %poly) {
%result = call %0 @llvm.xcore.crc8(i32 %crc, i32 %data, i32 %poly)
ret %0 %result
}
+
+define i32 @zext(i32 %a, i32 %b) {
+; CHECK: zext:
+; CHECK: zext r0, r1
+ %result = call i32 @llvm.xcore.zext(i32 %a, i32 %b)
+ ret i32 %result
+}
+
+define i32 @zexti(i32 %a) {
+; CHECK: zexti:
+; CHECK: zext r0, 4
+ %result = call i32 @llvm.xcore.zext(i32 %a, i32 4)
+ ret i32 %result
+}
+
+define i32 @sext(i32 %a, i32 %b) {
+; CHECK: sext:
+; CHECK: sext r0, r1
+ %result = call i32 @llvm.xcore.sext(i32 %a, i32 %b)
+ ret i32 %result
+}
+
+define i32 @sexti(i32 %a) {
+; CHECK: sexti:
+; CHECK: sext r0, 4
+ %result = call i32 @llvm.xcore.sext(i32 %a, i32 4)
+ ret i32 %result
+}
+
+define i32 @geted() {
+; CHECK: geted:
+; CHECK: get r11, ed
+; CHECK-NEXT: mov r0, r11
+ %result = call i32 @llvm.xcore.geted()
+ ret i32 %result
+}
+
+define i32 @getet() {
+; CHECK: getet:
+; CHECK: get r11, et
+; CHECK-NEXT: mov r0, r11
+ %result = call i32 @llvm.xcore.getet()
+ ret i32 %result
+}
diff --git a/test/CodeGen/XCore/resources.ll b/test/CodeGen/XCore/resources.ll
index bd0492c..8f00fed 100644
--- a/test/CodeGen/XCore/resources.ll
+++ b/test/CodeGen/XCore/resources.ll
@@ -9,6 +9,8 @@ declare void @llvm.xcore.out.p1i8(i8 addrspace(1)* %r, i32 %value)
declare void @llvm.xcore.outt.p1i8(i8 addrspace(1)* %r, i32 %value)
declare void @llvm.xcore.outct.p1i8(i8 addrspace(1)* %r, i32 %value)
declare void @llvm.xcore.chkct.p1i8(i8 addrspace(1)* %r, i32 %value)
+declare i32 @llvm.xcore.testct.p1i8(i8 addrspace(1)* %r)
+declare i32 @llvm.xcore.testwct.p1i8(i8 addrspace(1)* %r)
declare void @llvm.xcore.setd.p1i8(i8 addrspace(1)* %r, i32 %value)
declare void @llvm.xcore.setc.p1i8(i8 addrspace(1)* %r, i32 %value)
declare i32 @llvm.xcore.inshr.p1i8(i8 addrspace(1)* %r, i32 %value)
@@ -18,10 +20,13 @@ declare i32 @llvm.xcore.getts.p1i8(i8 addrspace(1)* %r)
declare void @llvm.xcore.syncr.p1i8(i8 addrspace(1)* %r)
declare void @llvm.xcore.settw.p1i8(i8 addrspace(1)* %r, i32 %value)
declare void @llvm.xcore.setv.p1i8(i8 addrspace(1)* %r, i8* %p)
+declare void @llvm.xcore.setev.p1i8(i8 addrspace(1)* %r, i8* %p)
declare void @llvm.xcore.eeu.p1i8(i8 addrspace(1)* %r)
declare void @llvm.xcore.setclk.p1i8.p1i8(i8 addrspace(1)* %a, i8 addrspace(1)* %b)
declare void @llvm.xcore.setrdy.p1i8.p1i8(i8 addrspace(1)* %a, i8 addrspace(1)* %b)
declare void @llvm.xcore.setpsc.p1i8(i8 addrspace(1)* %r, i32 %value)
+declare i32 @llvm.xcore.peek.p1i8(i8 addrspace(1)* %r)
+declare i32 @llvm.xcore.endin.p1i8(i8 addrspace(1)* %r)
define i8 addrspace(1)* @getr() {
; CHECK: getr:
@@ -171,6 +176,14 @@ define void @setv(i8 addrspace(1)* %r, i8* %p) {
ret void
}
+define void @setev(i8 addrspace(1)* %r, i8* %p) {
+; CHECK: setev:
+; CHECK: mov r11, r1
+; CHECK-NEXT: setev res[r0], r11
+ call void @llvm.xcore.setev.p1i8(i8 addrspace(1)* %r, i8* %p)
+ ret void
+}
+
define void @eeu(i8 addrspace(1)* %r) {
; CHECK: eeu:
; CHECK: eeu res[r0]
@@ -198,3 +211,31 @@ define void @setpsc(i8 addrspace(1)* %r, i32 %value) {
call void @llvm.xcore.setpsc.p1i8(i8 addrspace(1)* %r, i32 %value)
ret void
}
+
+define i32 @peek(i8 addrspace(1)* %r) {
+; CHECK: peek:
+; CHECK: peek r0, res[r0]
+ %result = call i32 @llvm.xcore.peek.p1i8(i8 addrspace(1)* %r)
+ ret i32 %result
+}
+
+define i32 @endin(i8 addrspace(1)* %r) {
+; CHECK: endin:
+; CHECK: endin r0, res[r0]
+ %result = call i32 @llvm.xcore.endin.p1i8(i8 addrspace(1)* %r)
+ ret i32 %result
+}
+
+define i32 @testct(i8 addrspace(1)* %r) {
+; CHECK: testct:
+; CHECK: testct r0, res[r0]
+ %result = call i32 @llvm.xcore.testct.p1i8(i8 addrspace(1)* %r)
+ ret i32 %result
+}
+
+define i32 @testwct(i8 addrspace(1)* %r) {
+; CHECK: testwct:
+; CHECK: testwct r0, res[r0]
+ %result = call i32 @llvm.xcore.testwct.p1i8(i8 addrspace(1)* %r)
+ ret i32 %result
+}
diff --git a/test/CodeGen/XCore/trampoline.ll b/test/CodeGen/XCore/trampoline.ll
index 4e1aba0..6b42134 100644
--- a/test/CodeGen/XCore/trampoline.ll
+++ b/test/CodeGen/XCore/trampoline.ll
@@ -11,7 +11,8 @@ entry:
%FRAME.0 = alloca %struct.FRAME.f, align 4
%TRAMP.23.sub = getelementptr inbounds [20 x i8]* %TRAMP.23, i32 0, i32 0
%FRAME.02 = bitcast %struct.FRAME.f* %FRAME.0 to i8*
- %tramp = call i8* @llvm.init.trampoline(i8* %TRAMP.23.sub, i8* bitcast (i32 (%struct.FRAME.f*)* @g.1101 to i8*), i8* %FRAME.02)
+ call void @llvm.init.trampoline(i8* %TRAMP.23.sub, i8* bitcast (i32 (%struct.FRAME.f*)* @g.1101 to i8*), i8* %FRAME.02)
+ %tramp = call i8* @llvm.adjust.trampoline(i8* %TRAMP.23.sub)
%0 = getelementptr inbounds %struct.FRAME.f* %FRAME.0, i32 0, i32 1
%1 = bitcast i8* %tramp to i32 ()*
store i32 ()* %1, i32 ()** %0, align 4
@@ -32,6 +33,7 @@ entry:
ret i32 %1
}
-declare i8* @llvm.init.trampoline(i8*, i8*, i8*) nounwind
+declare void @llvm.init.trampoline(i8*, i8*, i8*) nounwind
+declare i8* @llvm.adjust.trampoline(i8*) nounwind
declare void @h(i32 ()*)
OpenPOWER on IntegriCloud