diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2016-12-02 19:20:10 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2016-12-02 19:20:10 +0000 |
commit | 2cf3bd4601bbc6fc1f3ffe845eb57c2da2dff02c (patch) | |
tree | 6c704a1a1c04abaaf72aa6d9a019103c822f0c3e | |
parent | 6449741f4c1842221757c062f4abbae7bb524ba9 (diff) |
Vendor import of llvm release_39 branch r288513:vendor/llvm/llvm-release_39-r288513
Notes
Notes:
svn path=/vendor/llvm/dist/; revision=309427
svn path=/vendor/llvm/llvm-release_39-r288513/; revision=309428; tag=vendor/llvm/llvm-release_39-r288513
-rw-r--r-- | include/llvm/Support/Threading.h | 10 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp | 25 | ||||
-rw-r--r-- | lib/Target/AMDGPU/SIInstrInfo.cpp | 18 | ||||
-rw-r--r-- | lib/Target/AMDGPU/SIInstructions.td | 1 | ||||
-rw-r--r-- | lib/Target/AMDGPU/SIWholeQuadMode.cpp | 7 | ||||
-rw-r--r-- | lib/Transforms/InstCombine/InstCombineCompares.cpp | 2 | ||||
-rw-r--r-- | lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp | 14 | ||||
-rw-r--r-- | lib/Transforms/Utils/SimplifyCFG.cpp | 10 | ||||
-rw-r--r-- | test/CodeGen/AMDGPU/mubuf-shader-vgpr.ll | 49 | ||||
-rw-r--r-- | test/CodeGen/AMDGPU/wqm.ll | 49 | ||||
-rw-r--r-- | test/CodeGen/X86/mul-i1024.ll | 5938 | ||||
-rw-r--r-- | test/CodeGen/X86/mul-i256.ll | 9 | ||||
-rw-r--r-- | test/CodeGen/X86/mul-i512.ll | 1238 | ||||
-rw-r--r-- | test/LTO/X86/type-mapping-bug.ll | 2 | ||||
-rw-r--r-- | test/Transforms/InstCombine/indexed-gep-compares.ll | 20 | ||||
-rw-r--r-- | test/Transforms/InstCombine/unpack-fca.ll | 17 | ||||
-rw-r--r-- | test/Transforms/SimplifyCFG/PR29163.ll | 31 |
17 files changed, 7403 insertions, 37 deletions
diff --git a/include/llvm/Support/Threading.h b/include/llvm/Support/Threading.h index 09b96dfb4c1c..fe407b725314 100644 --- a/include/llvm/Support/Threading.h +++ b/include/llvm/Support/Threading.h @@ -20,11 +20,11 @@ #include <ciso646> // So we can check the C++ standard lib macros. #include <functional> -// We use std::call_once on all Unix platforms except for NetBSD with -// libstdc++. That platform has a bug they are working to fix, and they'll -// remove the NetBSD checks once fixed. -#if defined(LLVM_ON_UNIX) && \ - !(defined(__NetBSD__) && !defined(_LIBCPP_VERSION)) && !defined(__ppc__) +// std::call_once from libc++ is used on all Unix platforms. Other +// implementations like libstdc++ are known to have problems on NetBSD, +// OpenBSD and PowerPC. +#if defined(LLVM_ON_UNIX) && (defined(_LIBCPP_VERSION) || \ + !(defined(__NetBSD__) || defined(__OpenBSD__) || defined(__ppc__))) #define LLVM_THREADING_USE_STD_CALL_ONCE 1 #else #define LLVM_THREADING_USE_STD_CALL_ONCE 0 diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index 3ab9459c8af7..9a18943291c8 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -2185,24 +2185,29 @@ void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N, // options. This is a trivially-generalized version of the code from // Hacker's Delight (itself derived from Knuth's Algorithm M from section // 4.3.1). - SDValue Mask = - DAG.getConstant(APInt::getLowBitsSet(NVT.getSizeInBits(), - NVT.getSizeInBits() >> 1), dl, NVT); + unsigned Bits = NVT.getSizeInBits(); + unsigned HalfBits = Bits >> 1; + SDValue Mask = DAG.getConstant(APInt::getLowBitsSet(Bits, HalfBits), dl, + NVT); SDValue LLL = DAG.getNode(ISD::AND, dl, NVT, LL, Mask); SDValue RLL = DAG.getNode(ISD::AND, dl, NVT, RL, Mask); SDValue T = DAG.getNode(ISD::MUL, dl, NVT, LLL, RLL); SDValue TL = DAG.getNode(ISD::AND, dl, NVT, T, Mask); - SDValue Shift = - DAG.getConstant(NVT.getSizeInBits() >> 1, dl, - TLI.getShiftAmountTy(NVT, DAG.getDataLayout())); + EVT ShiftAmtTy = TLI.getShiftAmountTy(NVT, DAG.getDataLayout()); + if (APInt::getMaxValue(ShiftAmtTy.getSizeInBits()).ult(HalfBits)) { + // The type from TLI is too small to fit the shift amount we want. + // Override it with i32. The shift will have to be legalized. + ShiftAmtTy = MVT::i32; + } + SDValue Shift = DAG.getConstant(HalfBits, dl, ShiftAmtTy); SDValue TH = DAG.getNode(ISD::SRL, dl, NVT, T, Shift); SDValue LLH = DAG.getNode(ISD::SRL, dl, NVT, LL, Shift); SDValue RLH = DAG.getNode(ISD::SRL, dl, NVT, RL, Shift); SDValue U = DAG.getNode(ISD::ADD, dl, NVT, - DAG.getNode(ISD::MUL, dl, NVT, LLH, RLL), TL); + DAG.getNode(ISD::MUL, dl, NVT, LLH, RLL), TH); SDValue UL = DAG.getNode(ISD::AND, dl, NVT, U, Mask); SDValue UH = DAG.getNode(ISD::SRL, dl, NVT, U, Shift); @@ -2211,14 +2216,14 @@ void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N, SDValue VH = DAG.getNode(ISD::SRL, dl, NVT, V, Shift); SDValue W = DAG.getNode(ISD::ADD, dl, NVT, - DAG.getNode(ISD::MUL, dl, NVT, LL, RL), + DAG.getNode(ISD::MUL, dl, NVT, LLH, RLH), DAG.getNode(ISD::ADD, dl, NVT, UH, VH)); - Lo = DAG.getNode(ISD::ADD, dl, NVT, TH, + Lo = DAG.getNode(ISD::ADD, dl, NVT, TL, DAG.getNode(ISD::SHL, dl, NVT, V, Shift)); Hi = DAG.getNode(ISD::ADD, dl, NVT, W, DAG.getNode(ISD::ADD, dl, NVT, - DAG.getNode(ISD::MUL, dl, NVT, RH, LL), + DAG.getNode(ISD::MUL, dl, NVT, RH, LL), DAG.getNode(ISD::MUL, dl, NVT, RL, LH))); return; } diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp index 5cc6a4e0e83e..919081902a9c 100644 --- a/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -2203,7 +2203,8 @@ void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, } void SIInstrInfo::legalizeOperands(MachineInstr &MI) const { - MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); + MachineFunction &MF = *MI.getParent()->getParent(); + MachineRegisterInfo &MRI = MF.getRegInfo(); // Legalize VOP2 if (isVOP2(MI) || isVOPC(MI)) { @@ -2321,8 +2322,14 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI) const { return; } - // Legalize MIMG - if (isMIMG(MI)) { + // Legalize MIMG and MUBUF/MTBUF for shaders. + // + // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via + // scratch memory access. In both cases, the legalization never involves + // conversion to the addr64 form. + if (isMIMG(MI) || + (AMDGPU::isShader(MF.getFunction()->getCallingConv()) && + (isMUBUF(MI) || isMTBUF(MI)))) { MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) { unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI); @@ -2337,9 +2344,10 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI) const { return; } - // Legalize MUBUF* instructions + // Legalize MUBUF* instructions by converting to addr64 form. // FIXME: If we start using the non-addr64 instructions for compute, we - // may need to legalize them here. + // may need to legalize them as above. This especially applies to the + // buffer_load_format_* variants and variants with idxen (or bothen). int SRsrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); if (SRsrcIdx != -1) { diff --git a/lib/Target/AMDGPU/SIInstructions.td b/lib/Target/AMDGPU/SIInstructions.td index 18b7d5d62efe..dde5f2fc6b40 100644 --- a/lib/Target/AMDGPU/SIInstructions.td +++ b/lib/Target/AMDGPU/SIInstructions.td @@ -2029,6 +2029,7 @@ def SI_RETURN : PseudoInstSI < let hasSideEffects = 1; let SALU = 1; let hasNoSchedulingInfo = 1; + let DisableWQM = 1; } let Uses = [EXEC], Defs = [EXEC, VCC, M0], diff --git a/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/lib/Target/AMDGPU/SIWholeQuadMode.cpp index b200c153df0b..1534d5825696 100644 --- a/lib/Target/AMDGPU/SIWholeQuadMode.cpp +++ b/lib/Target/AMDGPU/SIWholeQuadMode.cpp @@ -219,13 +219,6 @@ char SIWholeQuadMode::scanInstructions(MachineFunction &MF, markInstruction(MI, Flags, Worklist); GlobalFlags |= Flags; } - - if (WQMOutputs && MBB.succ_empty()) { - // This is a prolog shader. Make sure we go back to exact mode at the end. - Blocks[&MBB].OutNeeds = StateExact; - Worklist.push_back(&MBB); - GlobalFlags |= StateExact; - } } return GlobalFlags; diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp index bfd73f4bbac5..961497fe3c2d 100644 --- a/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -634,7 +634,7 @@ static bool canRewriteGEPAsOffset(Value *Start, Value *Base, } if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) && - !isa<GEPOperator>(V) && !isa<PHINode>(V)) + !isa<GetElementPtrInst>(V) && !isa<PHINode>(V)) // We've found some value that we can't explore which is different from // the base. Therefore we can't do this transformation. return false; diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index d312983ed51b..d88456ee4adc 100644 --- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -579,6 +579,13 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) { UndefValue::get(T), NewLoad, 0, Name)); } + // Bail out if the array is too large. Ideally we would like to optimize + // arrays of arbitrary size but this has a terrible impact on compile time. + // The threshold here is chosen arbitrarily, maybe needs a little bit of + // tuning. + if (NumElements > 1024) + return nullptr; + const DataLayout &DL = IC.getDataLayout(); auto EltSize = DL.getTypeAllocSize(ET); auto Align = LI.getAlignment(); @@ -1081,6 +1088,13 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) { return true; } + // Bail out if the array is too large. Ideally we would like to optimize + // arrays of arbitrary size but this has a terrible impact on compile time. + // The threshold here is chosen arbitrarily, maybe needs a little bit of + // tuning. + if (NumElements > 1024) + return false; + const DataLayout &DL = IC.getDataLayout(); auto EltSize = DL.getTypeAllocSize(AT->getElementType()); auto Align = SI.getAlignment(); diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp index 0504646c304e..c197317ac771 100644 --- a/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/lib/Transforms/Utils/SimplifyCFG.cpp @@ -2024,14 +2024,20 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetTransformInfo &TTI, // Move all 'aggressive' instructions, which are defined in the // conditional parts of the if's up to the dominating block. - if (IfBlock1) + if (IfBlock1) { + for (auto &I : *IfBlock1) + I.dropUnknownNonDebugMetadata(); DomBlock->getInstList().splice(InsertPt->getIterator(), IfBlock1->getInstList(), IfBlock1->begin(), IfBlock1->getTerminator()->getIterator()); - if (IfBlock2) + } + if (IfBlock2) { + for (auto &I : *IfBlock2) + I.dropUnknownNonDebugMetadata(); DomBlock->getInstList().splice(InsertPt->getIterator(), IfBlock2->getInstList(), IfBlock2->begin(), IfBlock2->getTerminator()->getIterator()); + } while (PHINode *PN = dyn_cast<PHINode>(BB->begin())) { // Change the PHI node into a select instruction. diff --git a/test/CodeGen/AMDGPU/mubuf-shader-vgpr.ll b/test/CodeGen/AMDGPU/mubuf-shader-vgpr.ll new file mode 100644 index 000000000000..b528577a7eaa --- /dev/null +++ b/test/CodeGen/AMDGPU/mubuf-shader-vgpr.ll @@ -0,0 +1,49 @@ +;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s -check-prefix=CHECK +;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s -check-prefix=CHECK + +; Test that buffer_load_format with VGPR resource descriptor is properly +; legalized. + +; CHECK-LABEL: {{^}}test_none: +; CHECK: buffer_load_format_x v0, off, {{s\[[0-9]+:[0-9]+\]}}, 0{{$}} +define amdgpu_vs float @test_none(<4 x i32> addrspace(2)* inreg %base, i32 %i) { +main_body: + %ptr = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %base, i32 %i + %tmp2 = load <4 x i32>, <4 x i32> addrspace(2)* %ptr, align 32 + %tmp7 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %tmp2, i32 0, i32 0, i1 0, i1 0) + ret float %tmp7 +} + +; CHECK-LABEL: {{^}}test_idxen: +; CHECK: buffer_load_format_x v0, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 idxen{{$}} +define amdgpu_vs float @test_idxen(<4 x i32> addrspace(2)* inreg %base, i32 %i) { +main_body: + %ptr = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %base, i32 %i + %tmp2 = load <4 x i32>, <4 x i32> addrspace(2)* %ptr, align 32 + %tmp7 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %tmp2, i32 undef, i32 0, i1 0, i1 0) + ret float %tmp7 +} + +; CHECK-LABEL: {{^}}test_offen: +; CHECK: buffer_load_format_x v0, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offen{{$}} +define amdgpu_vs float @test_offen(<4 x i32> addrspace(2)* inreg %base, i32 %i) { +main_body: + %ptr = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %base, i32 %i + %tmp2 = load <4 x i32>, <4 x i32> addrspace(2)* %ptr, align 32 + %tmp7 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %tmp2, i32 0, i32 undef, i1 0, i1 0) + ret float %tmp7 +} + +; CHECK-LABEL: {{^}}test_both: +; CHECK: buffer_load_format_x v0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 idxen offen{{$}} +define amdgpu_vs float @test_both(<4 x i32> addrspace(2)* inreg %base, i32 %i) { +main_body: + %ptr = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %base, i32 %i + %tmp2 = load <4 x i32>, <4 x i32> addrspace(2)* %ptr, align 32 + %tmp7 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %tmp2, i32 undef, i32 undef, i1 0, i1 0) + ret float %tmp7 +} + +declare float @llvm.amdgcn.buffer.load.format.f32(<4 x i32>, i32, i32, i1, i1) nounwind readonly + +attributes #0 = { nounwind readnone } diff --git a/test/CodeGen/AMDGPU/wqm.ll b/test/CodeGen/AMDGPU/wqm.ll index 809a7ba9b826..41e426457889 100644 --- a/test/CodeGen/AMDGPU/wqm.ll +++ b/test/CodeGen/AMDGPU/wqm.ll @@ -17,17 +17,18 @@ main_body: ;CHECK-LABEL: {{^}}test2: ;CHECK-NEXT: ; %main_body ;CHECK-NEXT: s_wqm_b64 exec, exec -;CHECK: image_sample ;CHECK-NOT: exec -;CHECK: _load_dword v0, -define amdgpu_ps float @test2(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, float addrspace(1)* inreg %ptr, <4 x i32> %c) { +define amdgpu_ps void @test2(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, float addrspace(1)* inreg %ptr, <4 x i32> %c) { main_body: %c.1 = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %c, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0) %c.2 = bitcast <4 x float> %c.1 to <4 x i32> %c.3 = extractelement <4 x i32> %c.2, i32 0 %gep = getelementptr float, float addrspace(1)* %ptr, i32 %c.3 %data = load float, float addrspace(1)* %gep - ret float %data + + call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %data, float undef, float undef, float undef) + + ret void } ; ... but disabled for stores (and, in this simple case, not re-enabled). @@ -414,6 +415,46 @@ entry: ret void } +; Must return to exact at the end of a non-void returning shader, +; otherwise the EXEC mask exported by the epilog will be wrong. This is true +; even if the shader has no kills, because a kill could have happened in a +; previous shader fragment. +; +; CHECK-LABEL: {{^}}test_nonvoid_return: +; CHECK: s_mov_b64 [[LIVE:s\[[0-9]+:[0-9]+\]]], exec +; CHECK: s_wqm_b64 exec, exec +; +; CHECK: s_and_b64 exec, exec, [[LIVE]] +; CHECK-NOT: exec +define amdgpu_ps <4 x float> @test_nonvoid_return() nounwind { + %tex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0) + %tex.i = bitcast <4 x float> %tex to <4 x i32> + %dtex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %tex.i, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0) + ret <4 x float> %dtex +} + +; CHECK-LABEL: {{^}}test_nonvoid_return_unreachable: +; CHECK: s_mov_b64 [[LIVE:s\[[0-9]+:[0-9]+\]]], exec +; CHECK: s_wqm_b64 exec, exec +; +; CHECK: s_and_b64 exec, exec, [[LIVE]] +; CHECK-NOT: exec +define amdgpu_ps <4 x float> @test_nonvoid_return_unreachable(i32 inreg %c) nounwind { +entry: + %tex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0) + %tex.i = bitcast <4 x float> %tex to <4 x i32> + %dtex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %tex.i, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0) + + %cc = icmp sgt i32 %c, 0 + br i1 %cc, label %if, label %else + +if: + store volatile <4 x float> %dtex, <4 x float>* undef + unreachable + +else: + ret <4 x float> %dtex +} declare void @llvm.amdgcn.image.store.v4i32(<4 x float>, <4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #1 declare void @llvm.amdgcn.buffer.store.f32(float, <4 x i32>, i32, i32, i1, i1) #1 diff --git a/test/CodeGen/X86/mul-i1024.ll b/test/CodeGen/X86/mul-i1024.ll new file mode 100644 index 000000000000..60933b1e1fa1 --- /dev/null +++ b/test/CodeGen/X86/mul-i1024.ll @@ -0,0 +1,5938 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i386-unknown | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64 + +define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { +; X32-LABEL: test_1024: +; X32: # BB#0: +; X32-NEXT: pushl %ebp +; X32-NEXT: movl %esp, %ebp +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: andl $-8, %esp +; X32-NEXT: subl $2640, %esp # imm = 0xA50 +; X32-NEXT: movl 8(%ebp), %eax +; X32-NEXT: movl 64(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 68(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 72(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 76(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 80(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 84(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 88(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 92(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 96(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 100(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 104(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 108(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 112(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 116(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 120(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 124(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl (%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 4(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 8(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 12(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 16(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 20(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 24(%eax), %ecx +; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill +; X32-NEXT: movl 28(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 32(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 36(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 40(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 44(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 48(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 52(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 56(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 60(%eax), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 12(%ebp), %eax +; X32-NEXT: movl 48(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 52(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 56(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 60(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 32(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 36(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 40(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 44(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 16(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 20(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 24(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 28(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 8(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 12(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 112(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 116(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 120(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 124(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 96(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 100(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 104(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 108(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 80(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 84(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 88(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 92(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 64(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 68(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 72(%eax), %edi +; X32-NEXT: movl 76(%eax), %esi +; X32-NEXT: movl (%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 4(%eax), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edx +; X32-NEXT: pushl %ecx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl %esi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: pushl %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: pushl %ecx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: movl %ebx, %esi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: movl %edi, %ebx +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: movl %ebx, %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl %edi, %ebx +; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: movl %esi, %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %edi, %ecx +; X32-NEXT: movl %edx, %eax +; X32-NEXT: adcl %esi, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, (%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %edx, %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %edi, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl $0, %edi +; X32-NEXT: addl %ebx, %eax +; X32-NEXT: adcl %esi, %edi +; X32-NEXT: movl $0, %ebx +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %ecx, %edx +; X32-NEXT: addl %eax, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %ecx, %esi +; X32-NEXT: adcl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %eax, %ecx +; X32-NEXT: movl %ecx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edi, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ebx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %esi, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, (%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %edx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %esi +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %edi, %esi +; X32-NEXT: adcl %ebx, %ecx +; X32-NEXT: movl $0, %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: sbbl %eax, %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %edi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edi, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: addl %esi, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %edx, %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %eax, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %edi, %eax +; X32-NEXT: adcl %edx, %ecx +; X32-NEXT: movl $0, %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: sbbl %esi, %esi +; X32-NEXT: andl $1, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: addl %eax, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, %ebx +; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %esi, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %edx, %esi +; X32-NEXT: adcl %edi, %ecx +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %edx, %edx +; X32-NEXT: andl $1, %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: addl %esi, %eax +; X32-NEXT: adcl %ecx, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: addl %edx, %eax +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %eax +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: adcl $0, %edi +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, (%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: sbbl %eax, %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: addl %edx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %edi, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl (%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %esi, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %edi, %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %esi +; X32-NEXT: adcl $0, %edx +; X32-NEXT: addl %ebx, %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl $0, %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: sbbl %eax, %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %ebx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ebx, %edi +; X32-NEXT: movl %edi, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: movl %eax, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: addl %esi, %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edx, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %edi, %edx +; X32-NEXT: adcl (%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl $0, %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: sbbl %eax, %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, (%esp) # 4-byte Spill +; X32-NEXT: movl (%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, (%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: addl %edx, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edi, (%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: addl %edx, %eax +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: addl %edx, %ecx +; X32-NEXT: adcl %esi, %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %eax, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: adcl $0, %edi +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: movl %edx, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl (%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: sbbl %eax, %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: addl %edx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %ebx, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill +; X32-NEXT: addl %eax, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %esi, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %eax, %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %eax, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl $0, %eax +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %ebx, %eax +; X32-NEXT: movl %eax, %edx +; X32-NEXT: adcl %edi, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl (%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %ecx, %ebx +; X32-NEXT: addl %eax, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill +; X32-NEXT: movl %ecx, %edi +; X32-NEXT: adcl %eax, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %edx, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %esi, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %edx, %eax +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl $0, %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: sbbl %esi, %esi +; X32-NEXT: andl $1, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl (%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: addl %eax, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %esi, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: addl %edx, %eax +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: addl %edx, %ecx +; X32-NEXT: adcl %esi, %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %eax, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: adcl $0, %edi +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: movl %edx, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: sbbl %eax, %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: addl %edx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %edi, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl $0, %edx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl $0, %eax +; X32-NEXT: adcl $0, %esi +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: sbbl %eax, %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %edi, %edx +; X32-NEXT: adcl %eax, %ecx +; X32-NEXT: movl $0, %ebx +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: sbbl %eax, %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: addl %edx, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl $0, %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: sbbl %ebx, %ebx +; X32-NEXT: andl $1, %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl (%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, (%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: addl %edx, %ecx +; X32-NEXT: movl (%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %esi, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %edi, %esi +; X32-NEXT: adcl %ebx, %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, (%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl %edx, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %esi, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl (%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, (%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: movl %edx, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl %esi, %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: sbbl %eax, %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %edi, %edx +; X32-NEXT: adcl %ebx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ebx, %ebx +; X32-NEXT: andl $1, %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl %edx, %eax +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %ebx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %edx, %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %esi, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, (%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edi +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl %eax, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %edx, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: addl %edi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %esi, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %edx, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl %esi, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %eax, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ebx, %ebx +; X32-NEXT: andl $1, %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl %ecx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %edi, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl %edx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %esi, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl %ebx, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %edx, %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: movl %esi, %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl %edi, %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: sbbl %eax, %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %edi, %edx +; X32-NEXT: adcl %ebx, %ecx +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ebx, %ebx +; X32-NEXT: andl $1, %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl %edx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %esi, %eax +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %ebx, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %edi, %edx +; X32-NEXT: adcl %ebx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: addl %edx, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %esi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %edx, %eax +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl $0, %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: sbbl %esi, %esi +; X32-NEXT: andl $1, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %edx, %eax +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl $0, %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: sbbl %esi, %esi +; X32-NEXT: andl $1, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %edi, %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: addl %eax, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %esi, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ebx, %ebx +; X32-NEXT: andl $1, %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl %ecx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl %edx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %esi, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %ebx, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ebx, %ebx +; X32-NEXT: andl $1, %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: addl %edx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edi, %esi +; X32-NEXT: adcl %ebx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl $0, %eax +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: sbbl %eax, %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %edx, %eax +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl $0, %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: sbbl %esi, %esi +; X32-NEXT: andl $1, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: addl %eax, %ebx +; X32-NEXT: adcl %ecx, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %edx, %ecx +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %edi, %edx +; X32-NEXT: adcl %ebx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: addl %edx, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %esi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: addl %ecx, %ebx +; X32-NEXT: adcl %edx, %eax +; X32-NEXT: movl $0, %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: sbbl %edi, %edi +; X32-NEXT: andl $1, %edi +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: addl %ebx, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl %eax, %ebx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl %edi, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl 16(%ebp), %ebx +; X32-NEXT: movl %ecx, 4(%ebx) +; X32-NEXT: movl 16(%ebp), %ecx +; X32-NEXT: movl %eax, (%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 8(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 12(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 16(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 20(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 24(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 28(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 32(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 36(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 40(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 44(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 48(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 52(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 56(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 60(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 64(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 68(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 72(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 76(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 80(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 84(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 88(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 92(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 96(%ecx) +; X32-NEXT: movl %edx, 100(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 104(%ecx) +; X32-NEXT: movl %esi, 108(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 112(%ecx) +; X32-NEXT: movl %edi, 116(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 120(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %eax, 124(%ecx) +; X32-NEXT: leal -12(%ebp), %esp +; X32-NEXT: popl %esi +; X32-NEXT: popl %edi +; X32-NEXT: popl %ebx +; X32-NEXT: popl %ebp +; X32-NEXT: retl +; +; X64-LABEL: test_1024: +; X64: # BB#0: +; X64-NEXT: pushq %rbp +; X64-NEXT: pushq %r15 +; X64-NEXT: pushq %r14 +; X64-NEXT: pushq %r13 +; X64-NEXT: pushq %r12 +; X64-NEXT: pushq %rbx +; X64-NEXT: subq $360, %rsp # imm = 0x168 +; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq 48(%rdi), %r9 +; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq 40(%rdi), %rcx +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq 32(%rdi), %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdi, %r10 +; X64-NEXT: xorl %r8d, %r8d +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: addq %rdi, %rcx +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: addq %r11, %rcx +; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdi, %rbp +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: addq %rbx, %rbp +; X64-NEXT: sbbq %rbx, %rbx +; X64-NEXT: andl $1, %ebx +; X64-NEXT: addq %rax, %rbp +; X64-NEXT: adcq %rdx, %rbx +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r11, %r13 +; X64-NEXT: addq %rax, %r13 +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, %r15 +; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rdx, %rax +; X64-NEXT: addq %rbp, %r13 +; X64-NEXT: movq %r13, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rbx, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rsi, %r8 +; X64-NEXT: movq (%r8), %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: xorl %ebp, %ebp +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rax, %r14 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq 8(%r8), %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %rbp +; X64-NEXT: xorl %r9d, %r9d +; X64-NEXT: movq %rax, %r12 +; X64-NEXT: addq %rcx, %r12 +; X64-NEXT: movq %rdx, %rbp +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: addq %r14, %r12 +; X64-NEXT: movq %rcx, %rbx +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: addq %rbp, %rbx +; X64-NEXT: sbbq %rbp, %rbp +; X64-NEXT: andl $1, %ebp +; X64-NEXT: addq %rax, %rbx +; X64-NEXT: adcq %rdx, %rbp +; X64-NEXT: movq 16(%r8), %rax +; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r14, %rdi +; X64-NEXT: movq %rdi, %r9 +; X64-NEXT: addq %rax, %r9 +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: adcq %rdx, %rax +; X64-NEXT: addq %rbx, %r9 +; X64-NEXT: adcq %rbp, %rax +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: movq %r11, %rax +; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: addq %rdi, %rax +; X64-NEXT: adcq %rcx, %r15 +; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq (%r10), %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: xorl %r15d, %r15d +; X64-NEXT: mulq %r15 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %r14 +; X64-NEXT: addq %rdi, %rax +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: adcq %rcx, %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq 32(%r8), %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r15 +; X64-NEXT: xorl %r8d, %r8d +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r14, %rcx +; X64-NEXT: addq %rax, %rcx +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: adcq %rdx, %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: addq %rdi, %r11 +; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdi, %r11 +; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: adcq %r12, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r9, %r13 +; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r9, %rdi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: adcq %rbp, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbp, %r9 +; X64-NEXT: movq 8(%r10), %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r8 +; X64-NEXT: xorl %ecx, %ecx +; X64-NEXT: movq %rax, %r15 +; X64-NEXT: addq %rsi, %r15 +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: addq %r14, %r15 +; X64-NEXT: movq %rsi, %rbp +; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: addq %rbx, %rbp +; X64-NEXT: sbbq %r8, %r8 +; X64-NEXT: andl $1, %r8d +; X64-NEXT: addq %rax, %rbp +; X64-NEXT: adcq %rdx, %r8 +; X64-NEXT: movq 16(%r10), %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r14, %rbx +; X64-NEXT: addq %rax, %rbx +; X64-NEXT: movq %rsi, %r10 +; X64-NEXT: adcq %rdx, %r10 +; X64-NEXT: addq %rbp, %rbx +; X64-NEXT: adcq %r8, %r10 +; X64-NEXT: movq %r14, %rax +; X64-NEXT: movq %r14, (%rsp) # 8-byte Spill +; X64-NEXT: addq %r11, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r15, %rcx +; X64-NEXT: adcq %rcx, %r12 +; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rbx, %rdi +; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbx, %r8 +; X64-NEXT: adcq %r10, %r9 +; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload +; X64-NEXT: movq 40(%r13), %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: mulq %rdx +; X64-NEXT: xorl %r11d, %r11d +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: addq %r9, %rsi +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: addq %rdi, %rsi +; X64-NEXT: movq %r9, %rbp +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: addq %rbx, %rbp +; X64-NEXT: sbbq %rbx, %rbx +; X64-NEXT: andl $1, %ebx +; X64-NEXT: addq %rax, %rbp +; X64-NEXT: adcq %rdx, %rbx +; X64-NEXT: movq 48(%r13), %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r11 +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdi, %r15 +; X64-NEXT: movq %rdi, %r11 +; X64-NEXT: addq %rax, %r15 +; X64-NEXT: movq %r9, %rdi +; X64-NEXT: adcq %rdx, %rdi +; X64-NEXT: addq %rbp, %r15 +; X64-NEXT: adcq %rbx, %rdi +; X64-NEXT: addq %r11, %r14 +; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rsi, %rcx +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r15, %r8 +; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rdi, %r10 +; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload +; X64-NEXT: movq %r14, %rax +; X64-NEXT: addq %r11, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: adcq %r9, %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r14, %rax +; X64-NEXT: addq %r11, %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload +; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload +; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq 56(%rax), %rax +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdi, %r10 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rbx, %rbp +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: addq %rbp, %r8 +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: addq %rdi, %rbx +; X64-NEXT: sbbq %rdi, %rdi +; X64-NEXT: andl $1, %edi +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rcx, %r11 +; X64-NEXT: addq %rbx, %rax +; X64-NEXT: adcq %rdi, %rdx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12 # 8-byte Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload +; X64-NEXT: addq %rax, %r15 +; X64-NEXT: adcq %rdx, %r12 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %r10, %rsi +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rdx, %r10 +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rdx, %rbp +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %r10, %rbx +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, %r10 +; X64-NEXT: mulq %r11 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: addq %rbx, %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: addq %rbp, %rdi +; X64-NEXT: sbbq %rbp, %rbp +; X64-NEXT: andl $1, %ebp +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %rsi +; X64-NEXT: mulq %r11 +; X64-NEXT: movq %rdx, %r13 +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %rdi, %rbx +; X64-NEXT: adcq %rbp, %r13 +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload +; X64-NEXT: addq %r9, %rbx +; X64-NEXT: adcq %r8, %r13 +; X64-NEXT: adcq $0, %r15 +; X64-NEXT: adcq $0, %r12 +; X64-NEXT: movq %r10, %rbp +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %rsi, %r8 +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rdi +; X64-NEXT: addq %rcx, %rdi +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq 24(%rax), %r14 +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: addq %rdi, %rax +; X64-NEXT: movq %rax, %rdi +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rsi, %rcx +; X64-NEXT: sbbq %rsi, %rsi +; X64-NEXT: andl $1, %esi +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %r14 +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; X64-NEXT: addq %r11, %rbp +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: addq %rax, %rbp +; X64-NEXT: adcq %rdx, %rsi +; X64-NEXT: addq %rbx, %r9 +; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r13, %rdi +; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: addq %r15, %rbp +; X64-NEXT: adcq %r12, %rsi +; X64-NEXT: movl $0, %r10d +; X64-NEXT: adcq $0, %r10 +; X64-NEXT: sbbq %r15, %r15 +; X64-NEXT: andl $1, %r15d +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r12 +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq %rax, %rdi +; X64-NEXT: addq %r12, %rdi +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: addq %rdi, %r9 +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rbx, %rcx +; X64-NEXT: sbbq %rdi, %rdi +; X64-NEXT: andl $1, %edi +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %r14 +; X64-NEXT: movq %r14, %r12 +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: adcq %rdi, %rdx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: addq %r11, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload +; X64-NEXT: addq %rax, %rcx +; X64-NEXT: adcq %rdx, %rdi +; X64-NEXT: addq %rbp, %r13 +; X64-NEXT: adcq %rsi, %r9 +; X64-NEXT: adcq %r10, %rcx +; X64-NEXT: adcq %r15, %rdi +; X64-NEXT: addq {{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload +; X64-NEXT: movq %r13, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload +; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload +; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq 24(%rax), %rax +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdi, %r13 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %r15 +; X64-NEXT: addq %rbx, %r15 +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rdi, %rcx +; X64-NEXT: sbbq %rsi, %rsi +; X64-NEXT: andl $1, %esi +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rbp, %r14 +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload +; X64-NEXT: addq %rax, %r8 +; X64-NEXT: adcq %rdx, %r10 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: mulq %r13 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %r13 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %rsi, %r9 +; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: addq %rbx, %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: addq %rdi, %rsi +; X64-NEXT: sbbq %rdi, %rdi +; X64-NEXT: andl $1, %edi +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %rsi, %rbx +; X64-NEXT: adcq %rdi, %rcx +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: addq %r11, %rbx +; X64-NEXT: adcq %r15, %rcx +; X64-NEXT: adcq $0, %r8 +; X64-NEXT: adcq $0, %r10 +; X64-NEXT: movq %r9, %rsi +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r9 +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: movq %rbp, %r14 +; X64-NEXT: movq %r14, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdi, %r13 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %r9, %rbp +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: mulq %r12 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: addq %rbp, %rax +; X64-NEXT: movq %rax, %r15 +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: addq %rdi, %rsi +; X64-NEXT: sbbq %rdi, %rdi +; X64-NEXT: andl $1, %edi +; X64-NEXT: movq %r14, %rax +; X64-NEXT: mulq %r12 +; X64-NEXT: addq %rsi, %rax +; X64-NEXT: adcq %rdi, %rdx +; X64-NEXT: movq (%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload +; X64-NEXT: addq %r14, %rsi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: addq %rax, %rsi +; X64-NEXT: adcq %rdx, %rbp +; X64-NEXT: addq %rbx, %r11 +; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rcx, %r15 +; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: addq %r8, %rsi +; X64-NEXT: adcq %r10, %rbp +; X64-NEXT: movl $0, %r10d +; X64-NEXT: adcq $0, %r10 +; X64-NEXT: sbbq %r15, %r15 +; X64-NEXT: andl $1, %r15d +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r13 +; X64-NEXT: movq %rdx, %r9 +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %r13 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %r9, %rbx +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r12 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: addq %rbx, %r9 +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rdi, %rcx +; X64-NEXT: sbbq %rdi, %rdi +; X64-NEXT: andl $1, %edi +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %r12 +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: adcq %rdi, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: addq %r14, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12 # 8-byte Reload +; X64-NEXT: movq %r12, %r13 +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload +; X64-NEXT: addq %rax, %rcx +; X64-NEXT: adcq %rdx, %r13 +; X64-NEXT: addq %rsi, %r11 +; X64-NEXT: adcq %rbp, %r9 +; X64-NEXT: adcq %r10, %rcx +; X64-NEXT: adcq %r15, %r13 +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload +; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload +; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: adcq $0, {{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rcx, %r11 +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rdi, %rbp +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %r10 +; X64-NEXT: addq %rbp, %r10 +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rbx, %rcx +; X64-NEXT: sbbq %rsi, %rsi +; X64-NEXT: andl $1, %esi +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdi, %r14 +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload +; X64-NEXT: movq %r12, %r15 +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload +; X64-NEXT: addq %rax, %r9 +; X64-NEXT: adcq %rdx, %r15 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %r11, %rdi +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %r8, %rbp +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %r8 +; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: addq %rbp, %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rbx, %rcx +; X64-NEXT: sbbq %rbp, %rbp +; X64-NEXT: andl $1, %ebp +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %rsi, %rdi +; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: adcq %rbp, %rsi +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: adcq %r10, %rsi +; X64-NEXT: adcq $0, %r9 +; X64-NEXT: adcq $0, %r15 +; X64-NEXT: movq %r8, %rbp +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %r14 +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, %r11 +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %r12 +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: addq %r14, %rcx +; X64-NEXT: adcq $0, %r12 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq 56(%rax), %rdi +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, %rbp +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: movq %rax, %r10 +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: addq %r12, %rbp +; X64-NEXT: sbbq %rcx, %rcx +; X64-NEXT: andl $1, %ecx +; X64-NEXT: movq %r11, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: addq %rbp, %rax +; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: movq (%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; X64-NEXT: addq %r11, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12 # 8-byte Reload +; X64-NEXT: adcq %r12, %rdi +; X64-NEXT: addq %rax, %rcx +; X64-NEXT: adcq %rdx, %rdi +; X64-NEXT: addq %rbx, %r8 +; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rsi, %r10 +; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: addq %r9, %rcx +; X64-NEXT: adcq %r15, %rdi +; X64-NEXT: movl $0, %r8d +; X64-NEXT: adcq $0, %r8 +; X64-NEXT: sbbq %r9, %r9 +; X64-NEXT: andl $1, %r9d +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload +; X64-NEXT: mulq %rbx +; X64-NEXT: movq %rdx, %r10 +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload +; X64-NEXT: movq %r14, %rax +; X64-NEXT: mulq %rbx +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %r10, %rbp +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; X64-NEXT: mulq %r10 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %r15 +; X64-NEXT: addq %rbp, %r15 +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: addq %rbx, %rsi +; X64-NEXT: sbbq %rbp, %rbp +; X64-NEXT: andl $1, %ebp +; X64-NEXT: movq %r14, %rax +; X64-NEXT: mulq %r10 +; X64-NEXT: addq %rsi, %rax +; X64-NEXT: adcq %rbp, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; X64-NEXT: addq %r11, %r10 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload +; X64-NEXT: adcq %r12, %r14 +; X64-NEXT: addq %rax, %r10 +; X64-NEXT: adcq %rdx, %r14 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: adcq %rdi, %r15 +; X64-NEXT: adcq %r8, %r10 +; X64-NEXT: adcq %r9, %r14 +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: addq %rcx, {{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: adcq %rcx, {{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: adcq %rcx, {{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: adcq %r13, {{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: adcq $0, %rax +; X64-NEXT: adcq $0, %r15 +; X64-NEXT: adcq $0, %r10 +; X64-NEXT: adcq $0, %r14 +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; X64-NEXT: movl $0, %eax +; X64-NEXT: adcq $0, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movl $0, %eax +; X64-NEXT: adcq $0, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movl $0, %eax +; X64-NEXT: adcq $0, %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: sbbq %rax, %rax +; X64-NEXT: andl $1, %eax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rcx, %r9 +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: addq %rsi, %rcx +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: addq %rcx, %r13 +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: addq %rbx, %rsi +; X64-NEXT: sbbq %rcx, %rcx +; X64-NEXT: andl $1, %ecx +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdi, %r11 +; X64-NEXT: addq %rsi, %rax +; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12 # 8-byte Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: addq %rax, %r12 +; X64-NEXT: adcq %rdx, %r8 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, %r9 +; X64-NEXT: mulq %r11 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: addq %rbx, %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rsi, %rcx +; X64-NEXT: sbbq %rdi, %rdi +; X64-NEXT: andl $1, %edi +; X64-NEXT: movq %rbp, %rbx +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: mulq %r11 +; X64-NEXT: movq %rdx, %r11 +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: addq %rcx, %rsi +; X64-NEXT: adcq %rdi, %r11 +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: adcq %r13, %r11 +; X64-NEXT: adcq $0, %r12 +; X64-NEXT: adcq $0, %r8 +; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r9, %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq %rbx, %r9 +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %rbp +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: addq %r8, %rcx +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: addq %rbp, %rbx +; X64-NEXT: sbbq %rcx, %rcx +; X64-NEXT: andl $1, %ecx +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: addq %rbx, %rax +; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: addq %rax, %rdi +; X64-NEXT: adcq %rdx, %rcx +; X64-NEXT: addq %rsi, %r13 +; X64-NEXT: adcq %r11, %r8 +; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %r12, %rdi +; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movl $0, %r12d +; X64-NEXT: adcq $0, %r12 +; X64-NEXT: sbbq %r9, %r9 +; X64-NEXT: andl $1, %r9d +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rcx, %rbp +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %rbp, %rbx +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rsi, %rcx +; X64-NEXT: sbbq %rsi, %rsi +; X64-NEXT: andl $1, %esi +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: addq %rax, %rsi +; X64-NEXT: adcq %rdx, %rcx +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: adcq %r12, %rsi +; X64-NEXT: adcq %r9, %rcx +; X64-NEXT: addq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: addq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: adcq %r15, {{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: adcq %r10, %r13 +; X64-NEXT: movq %r13, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r14, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq 64(%rsi), %r14 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: movq 72(%rsi), %rcx +; X64-NEXT: movq %rsi, %r13 +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rcx, %rsi +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %r10 +; X64-NEXT: addq %rbx, %r10 +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rdi, %rcx +; X64-NEXT: sbbq %rdi, %rdi +; X64-NEXT: andl $1, %edi +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rsi, %r8 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rcx, %rbp +; X64-NEXT: adcq %rdi, %rsi +; X64-NEXT: movq %r14, %rax +; X64-NEXT: xorl %ecx, %ecx +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: movq %rdx, %r11 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload +; X64-NEXT: addq %rbx, %r15 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload +; X64-NEXT: adcq %r11, %r12 +; X64-NEXT: addq %rbp, %r15 +; X64-NEXT: adcq %rsi, %r12 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rdx, %r9 +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %r9, %rbp +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %r14 +; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: addq %rbp, %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rsi, %rcx +; X64-NEXT: sbbq %rsi, %rsi +; X64-NEXT: andl $1, %esi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, %r9 +; X64-NEXT: mulq %r8 +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: addq %rax, %rbx +; X64-NEXT: adcq %rdx, %r11 +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: adcq %r10, %r11 +; X64-NEXT: adcq $0, %r15 +; X64-NEXT: adcq $0, %r12 +; X64-NEXT: movq 80(%r13), %rbp +; X64-NEXT: movq %r14, %rsi +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rax, %r14 +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rdx, %r10 +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: addq %r8, %rcx +; X64-NEXT: adcq $0, %r10 +; X64-NEXT: movq 88(%r13), %r13 +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: mulq %r13 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: addq %rcx, %r8 +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: addq %r10, %rdi +; X64-NEXT: sbbq %rsi, %rsi +; X64-NEXT: andl $1, %esi +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %r13 +; X64-NEXT: movq %rdx, %r10 +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: addq %rdi, %rcx +; X64-NEXT: adcq %rsi, %r10 +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: mulq %rdx +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, %rdi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: addq %rdi, %rsi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: adcq %rdx, %rax +; X64-NEXT: addq %rcx, %rsi +; X64-NEXT: adcq %r10, %rax +; X64-NEXT: addq %rbx, %r14 +; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r11, %r8 +; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: adcq $0, %rax +; X64-NEXT: addq %r15, %rsi +; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r12, %rax +; X64-NEXT: movq %rax, %r10 +; X64-NEXT: movl $0, %r15d +; X64-NEXT: adcq $0, %r15 +; X64-NEXT: sbbq %r12, %r12 +; X64-NEXT: andl $1, %r12d +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rax, %r14 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; X64-NEXT: movq %r11, %rax +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %r8, %rbx +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r13 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: addq %rbx, %rax +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rsi, %rcx +; X64-NEXT: sbbq %rsi, %rsi +; X64-NEXT: andl $1, %esi +; X64-NEXT: movq %r11, %rax +; X64-NEXT: mulq %r13 +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: addq %rdi, %rsi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: addq %rax, %rsi +; X64-NEXT: adcq %rdx, %rcx +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r10, %rbx +; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r15, %rsi +; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r12, %rcx +; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: imulq %rax, %r13 +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: addq %r13, %rdx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: imulq %rdi, %rbp +; X64-NEXT: addq %rdx, %rbp +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload +; X64-NEXT: imulq %rbx, %rsi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rax, %r10 +; X64-NEXT: addq %rsi, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: imulq %rcx, %rax +; X64-NEXT: addq %rdx, %rax +; X64-NEXT: addq %r9, %r10 +; X64-NEXT: adcq %rbp, %rax +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %rbp +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq %rbx, %r12 +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rbp +; X64-NEXT: movq %rax, %r14 +; X64-NEXT: addq %rbx, %r14 +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: addq %rsi, %rbp +; X64-NEXT: sbbq %rcx, %rcx +; X64-NEXT: andl $1, %ecx +; X64-NEXT: movq %r12, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r12 +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: addq %rbp, %r8 +; X64-NEXT: adcq %rcx, %r12 +; X64-NEXT: addq %r10, %r8 +; X64-NEXT: adcq %r9, %r12 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; X64-NEXT: movq 120(%rdx), %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: imulq %r9, %rcx +; X64-NEXT: movq 112(%rdx), %rsi +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rax, %r15 +; X64-NEXT: addq %rcx, %rdx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; X64-NEXT: imulq %r10, %rsi +; X64-NEXT: addq %rdx, %rsi +; X64-NEXT: movq 96(%rdi), %rbp +; X64-NEXT: movq 104(%rdi), %rbx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: imulq %rbx, %rcx +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: addq %rcx, %rdx +; X64-NEXT: imulq %rbp, %r11 +; X64-NEXT: addq %rdx, %r11 +; X64-NEXT: addq %r15, %r13 +; X64-NEXT: adcq %rsi, %r11 +; X64-NEXT: movq %r11, %r15 +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: addq %rsi, %rcx +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %r10 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rcx, %rbp +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: addq %rdi, %rsi +; X64-NEXT: sbbq %rcx, %rcx +; X64-NEXT: andl $1, %ecx +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: mulq %r10 +; X64-NEXT: addq %rsi, %rax +; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: addq %r13, %rax +; X64-NEXT: adcq %r15, %rdx +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: adcq %r14, %rbp +; X64-NEXT: adcq %r8, %rax +; X64-NEXT: adcq %r12, %rdx +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq 80(%rsi), %rax +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq 88(%rsi), %rax +; X64-NEXT: movq %rsi, %r11 +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdi, %rbx +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rdi +; X64-NEXT: addq %rcx, %rdi +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %rbp, %r9 +; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload +; X64-NEXT: mulq %r15 +; X64-NEXT: movq %rdx, %rbp +; X64-NEXT: movq %rax, %r14 +; X64-NEXT: addq %rdi, %r14 +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: addq %rsi, %rbp +; X64-NEXT: sbbq %rdi, %rdi +; X64-NEXT: andl $1, %edi +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %r15 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: addq %rbp, %rcx +; X64-NEXT: adcq %rdi, %rsi +; X64-NEXT: movq %r9, %rax +; X64-NEXT: xorl %r13d, %r13d +; X64-NEXT: mulq %r13 +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, %r12 +; X64-NEXT: movq (%rsp), %r10 # 8-byte Reload +; X64-NEXT: addq %r10, %r12 +; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: adcq %r9, %r8 +; X64-NEXT: addq %rcx, %r12 +; X64-NEXT: adcq %rsi, %r8 +; X64-NEXT: movq %r11, %rsi +; X64-NEXT: movq 64(%rsi), %rax +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: movq %rbx, %rdi +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq 72(%rsi), %rbx +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rcx, %rbp +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: movq %r11, %rax +; X64-NEXT: mulq %r15 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: addq %rbp, %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rsi, %rcx +; X64-NEXT: sbbq %rdi, %rdi +; X64-NEXT: andl $1, %edi +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: mulq %r15 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rcx, %rbp +; X64-NEXT: adcq %rdi, %rsi +; X64-NEXT: movq %r11, %rdi +; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: mulq %r13 +; X64-NEXT: movq %rdx, %r11 +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: addq %r13, %r10 +; X64-NEXT: adcq %r11, %r9 +; X64-NEXT: addq %rbp, %r10 +; X64-NEXT: adcq %rsi, %r9 +; X64-NEXT: addq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload +; X64-NEXT: movq %r10, (%rsp) # 8-byte Spill +; X64-NEXT: adcq %r14, %r9 +; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %r12 +; X64-NEXT: adcq $0, %r8 +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %r9 +; X64-NEXT: movq %rax, %r15 +; X64-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %rbp +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: addq %r9, %rsi +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; X64-NEXT: mulq %r10 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %rdi +; X64-NEXT: addq %rsi, %rdi +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rbp, %rcx +; X64-NEXT: sbbq %rsi, %rsi +; X64-NEXT: andl $1, %esi +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: mulq %r10 +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload +; X64-NEXT: adcq %r14, %r11 +; X64-NEXT: addq %rax, %r13 +; X64-NEXT: adcq %rdx, %r11 +; X64-NEXT: addq (%rsp), %r15 # 8-byte Folded Reload +; X64-NEXT: movq %r15, (%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload +; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %r13 +; X64-NEXT: adcq $0, %r11 +; X64-NEXT: addq %r12, %r13 +; X64-NEXT: adcq %r8, %r11 +; X64-NEXT: movl $0, %r8d +; X64-NEXT: adcq $0, %r8 +; X64-NEXT: sbbq %r9, %r9 +; X64-NEXT: andl $1, %r9d +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload +; X64-NEXT: mulq %r15 +; X64-NEXT: movq %rdx, %r12 +; X64-NEXT: movq %rax, %rdi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: mulq %r15 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %r12, %rbp +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r10 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: addq %rbp, %rax +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rsi, %rcx +; X64-NEXT: sbbq %rsi, %rsi +; X64-NEXT: andl $1, %esi +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: mulq %r10 +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: adcq %r14, %rcx +; X64-NEXT: addq %rax, %rsi +; X64-NEXT: adcq %rdx, %rcx +; X64-NEXT: addq %r13, %rdi +; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r11, %rbp +; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r8, %rsi +; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r9, %rcx +; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq 96(%rbp), %rcx +; X64-NEXT: imulq %rcx, %r10 +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r15 +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: addq %r10, %rdx +; X64-NEXT: movq 104(%rbp), %r8 +; X64-NEXT: imulq %r8, %r15 +; X64-NEXT: addq %rdx, %r15 +; X64-NEXT: movq 112(%rbp), %rax +; X64-NEXT: movq %rbp, %rdi +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload +; X64-NEXT: imulq %rbx, %rsi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: addq %rsi, %rdx +; X64-NEXT: movq 120(%rdi), %rdi +; X64-NEXT: imulq %rbp, %rdi +; X64-NEXT: addq %rdx, %rdi +; X64-NEXT: addq %r9, %r13 +; X64-NEXT: adcq %r15, %rdi +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %rbp, %r9 +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %rbp +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: addq %rbp, %rsi +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rbp +; X64-NEXT: movq %rax, %r12 +; X64-NEXT: addq %rsi, %r12 +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: addq %rcx, %rbp +; X64-NEXT: sbbq %rcx, %rcx +; X64-NEXT: andl $1, %ecx +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: addq %rbp, %r9 +; X64-NEXT: adcq %rcx, %r8 +; X64-NEXT: addq %r13, %r9 +; X64-NEXT: adcq %rdi, %r8 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload +; X64-NEXT: imulq %rbx, %rsi +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rax, %r10 +; X64-NEXT: addq %rsi, %rdx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; X64-NEXT: imulq %r11, %rcx +; X64-NEXT: addq %rdx, %rcx +; X64-NEXT: movq %rcx, %rsi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload +; X64-NEXT: imulq %r14, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: addq %rcx, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: imulq %rbp, %rax +; X64-NEXT: addq %rdx, %rax +; X64-NEXT: addq %r10, %r13 +; X64-NEXT: adcq %rsi, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %rbp, %r10 +; X64-NEXT: mulq %rbx +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: movq %r14, %rax +; X64-NEXT: mulq %rbx +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: addq %rsi, %rcx +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: movq %r10, %rax +; X64-NEXT: mulq %r11 +; X64-NEXT: movq %rdx, %r15 +; X64-NEXT: movq %rax, %r10 +; X64-NEXT: addq %rcx, %r10 +; X64-NEXT: adcq $0, %r15 +; X64-NEXT: addq %rdi, %r15 +; X64-NEXT: sbbq %rcx, %rcx +; X64-NEXT: andl $1, %ecx +; X64-NEXT: movq %r14, %rax +; X64-NEXT: mulq %r11 +; X64-NEXT: addq %r15, %rax +; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: addq %r13, %rax +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: adcq %r12, %r10 +; X64-NEXT: adcq %r9, %rax +; X64-NEXT: adcq %r8, %rdx +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload +; X64-NEXT: movq (%rsp), %rbx # 8-byte Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq %rcx, %r8 +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload +; X64-NEXT: movq %rdi, %r9 +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, (%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 8(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 16(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 24(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 32(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 40(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 48(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 56(%rcx) +; X64-NEXT: movq %r8, 64(%rcx) +; X64-NEXT: movq %r9, 72(%rcx) +; X64-NEXT: movq %rbx, 80(%rcx) +; X64-NEXT: movq %rsi, 88(%rcx) +; X64-NEXT: movq %rbp, 96(%rcx) +; X64-NEXT: movq %r10, 104(%rcx) +; X64-NEXT: movq %rax, 112(%rcx) +; X64-NEXT: movq %rdx, 120(%rcx) +; X64-NEXT: addq $360, %rsp # imm = 0x168 +; X64-NEXT: popq %rbx +; X64-NEXT: popq %r12 +; X64-NEXT: popq %r13 +; X64-NEXT: popq %r14 +; X64-NEXT: popq %r15 +; X64-NEXT: popq %rbp +; X64-NEXT: retq + %av = load i1024, i1024* %a + %bv = load i1024, i1024* %b + %r = mul i1024 %av, %bv + store i1024 %r, i1024* %out + ret void +} diff --git a/test/CodeGen/X86/mul-i256.ll b/test/CodeGen/X86/mul-i256.ll index 8b8b10aa1790..8f207b8dd086 100644 --- a/test/CodeGen/X86/mul-i256.ll +++ b/test/CodeGen/X86/mul-i256.ll @@ -15,12 +15,17 @@ entry: ; There is a lot of inter-register motion, and so matching the instruction ; sequence will be fragile. There should be 6 underlying multiplications. ; CHECK: imulq +; CHECK: mulq ; CHECK: imulq ; CHECK: imulq +; CHECK: mulq ; CHECK: imulq -; CHECK: imulq -; CHECK: imulq +; CHECK: mulq +; CHECK: mulq +; CHECK: mulq +; CHECK: mulq ; CHECK-NOT: imulq +; CHECK-NOT: mulq ; CHECK: retq attributes #0 = { norecurse nounwind uwtable "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" } diff --git a/test/CodeGen/X86/mul-i512.ll b/test/CodeGen/X86/mul-i512.ll new file mode 100644 index 000000000000..f9cea6d2ba32 --- /dev/null +++ b/test/CodeGen/X86/mul-i512.ll @@ -0,0 +1,1238 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i386-unknown | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64 + +define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { +; X32-LABEL: test_512: +; X32: # BB#0: +; X32-NEXT: pushl %ebp +; X32-NEXT: movl %esp, %ebp +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: andl $-8, %esp +; X32-NEXT: subl $656, %esp # imm = 0x290 +; X32-NEXT: movl 8(%ebp), %eax +; X32-NEXT: movl 48(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 52(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 56(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 60(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 40(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 44(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 32(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 36(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl (%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 4(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 16(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 20(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 8(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 12(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 24(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 28(%eax), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 12(%ebp), %eax +; X32-NEXT: movl 48(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 52(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 56(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 60(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 32(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 36(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 40(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 44(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl (%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 4(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 8(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 12(%eax), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 16(%eax), %esi +; X32-NEXT: movl 20(%eax), %edi +; X32-NEXT: movl 24(%eax), %ebx +; X32-NEXT: movl 28(%eax), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %eax +; X32-NEXT: pushl %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: pushl %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl %edi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl $0 +; X32-NEXT: pushl $0 +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %esi +; X32-NEXT: adcl $0, %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %esi, %eax +; X32-NEXT: adcl %edx, %ecx +; X32-NEXT: movl $0, %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: sbbl %edx, %edx +; X32-NEXT: andl $1, %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %edx, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %esi, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %esi, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %esi, %edx +; X32-NEXT: addl %eax, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %edx, %eax +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl $0, %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: sbbl %esi, %esi +; X32-NEXT: andl $1, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl %edi, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl %eax, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl %ecx, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %edx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %edi, %edx +; X32-NEXT: adcl %ebx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ebx, %ebx +; X32-NEXT: andl $1, %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl %ecx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %edi, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: addl %edx, %eax +; X32-NEXT: movl %eax, %edx +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl %ebx, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl %edx, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: movl %esi, %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl %edi, %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: sbbl %eax, %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %edx, %eax +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl $0, %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: sbbl %esi, %esi +; X32-NEXT: andl $1, %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl %eax, %edi +; X32-NEXT: adcl %ecx, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %edi, %edx +; X32-NEXT: adcl %ebx, %esi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: addl %edx, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %esi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: addl %eax, %esi +; X32-NEXT: adcl %ecx, %edi +; X32-NEXT: movl $0, %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: sbbl %ecx, %ecx +; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: leal {{[0-9]+}}(%esp), %eax +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %eax +; X32-NEXT: calll __multi3 +; X32-NEXT: addl $32, %esp +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl %esi, %ebx +; X32-NEXT: adcl %edi, %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl 16(%ebp), %edi +; X32-NEXT: movl %esi, 4(%edi) +; X32-NEXT: movl 16(%ebp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: movl %edi, (%esi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: movl %edi, 8(%esi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: movl %edi, 12(%esi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: movl %edi, 16(%esi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: movl %edi, 20(%esi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: movl %edi, 24(%esi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: movl %edi, 28(%esi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: movl %edi, 32(%esi) +; X32-NEXT: movl %ebx, 36(%esi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: movl %edi, 40(%esi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: movl %edi, 44(%esi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: movl %edi, 48(%esi) +; X32-NEXT: movl %ecx, 52(%esi) +; X32-NEXT: movl %edx, 56(%esi) +; X32-NEXT: movl %eax, 60(%esi) +; X32-NEXT: leal -12(%ebp), %esp +; X32-NEXT: popl %esi +; X32-NEXT: popl %edi +; X32-NEXT: popl %ebx +; X32-NEXT: popl %ebp +; X32-NEXT: retl +; +; X64-LABEL: test_512: +; X64: # BB#0: +; X64-NEXT: pushq %rbp +; X64-NEXT: pushq %r15 +; X64-NEXT: pushq %r14 +; X64-NEXT: pushq %r13 +; X64-NEXT: pushq %r12 +; X64-NEXT: pushq %rbx +; X64-NEXT: pushq %rax +; X64-NEXT: movq %rdx, (%rsp) # 8-byte Spill +; X64-NEXT: movq 24(%rdi), %rbp +; X64-NEXT: movq 16(%rdi), %rcx +; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq (%rsi), %rdx +; X64-NEXT: movq 8(%rsi), %r8 +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rdx, %r10 +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rsi, %r9 +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: addq %r10, %rsi +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %r11 +; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %r12 +; X64-NEXT: addq %rsi, %r12 +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rbx, %rcx +; X64-NEXT: sbbq %rbx, %rbx +; X64-NEXT: andl $1, %ebx +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rcx, %rbp +; X64-NEXT: adcq %rbx, %rsi +; X64-NEXT: xorl %ecx, %ecx +; X64-NEXT: movq %r9, %rbx +; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %r13 +; X64-NEXT: movq %rax, %r10 +; X64-NEXT: movq %r11, %rax +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %r9 +; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, %r15 +; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: addq %r10, %r15 +; X64-NEXT: adcq %r13, %r9 +; X64-NEXT: addq %rbp, %r15 +; X64-NEXT: adcq %rsi, %r9 +; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq (%rdi), %rax +; X64-NEXT: movq %rax, %r14 +; X64-NEXT: mulq %rbx +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, %r11 +; X64-NEXT: movq 8(%rdi), %rax +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %rbx +; X64-NEXT: movq %rdx, %rbp +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: addq %r11, %rsi +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: movq %r14, %rax +; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: addq %rsi, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: addq %rbp, %rbx +; X64-NEXT: sbbq %rdi, %rdi +; X64-NEXT: andl $1, %edi +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rbx, %rbp +; X64-NEXT: adcq %rdi, %rsi +; X64-NEXT: movq %r14, %rcx +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: mulq %rdx +; X64-NEXT: movq %rdx, %r14 +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: addq %r11, %r10 +; X64-NEXT: adcq %r14, %r13 +; X64-NEXT: addq %rbp, %r10 +; X64-NEXT: adcq %rsi, %r13 +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload +; X64-NEXT: adcq %r12, %r13 +; X64-NEXT: adcq $0, %r15 +; X64-NEXT: adcq $0, %r9 +; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq 16(%rsi), %r8 +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %r9 +; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %r12 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rdi, %rbp +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: movq 24(%rsi), %rdi +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: addq %rbp, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: addq %rbx, %rsi +; X64-NEXT: sbbq %rbp, %rbp +; X64-NEXT: andl $1, %ebp +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: addq %rsi, %r9 +; X64-NEXT: adcq %rbp, %rbx +; X64-NEXT: movq %r8, %rax +; X64-NEXT: xorl %ecx, %ecx +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rbp, %r11 +; X64-NEXT: adcq %rdx, %r14 +; X64-NEXT: addq %r9, %r11 +; X64-NEXT: adcq %rbx, %r14 +; X64-NEXT: addq %r10, %r12 +; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r13, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: adcq $0, %r11 +; X64-NEXT: adcq $0, %r14 +; X64-NEXT: addq %r15, %r11 +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: movq %rcx, %r13 +; X64-NEXT: sbbq %r9, %r9 +; X64-NEXT: andl $1, %r9d +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %r15 +; X64-NEXT: movq %rax, %r12 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; X64-NEXT: movq %r10, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %r15, %rbx +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: addq %rbx, %rax +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rsi, %rcx +; X64-NEXT: sbbq %rsi, %rsi +; X64-NEXT: andl $1, %esi +; X64-NEXT: movq %r10, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: addq %rbp, %rsi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: addq %rax, %rsi +; X64-NEXT: adcq %rdx, %rcx +; X64-NEXT: addq %r11, %r12 +; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r14, %rbx +; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r13, %rsi +; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r9, %rcx +; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq 32(%rcx), %rsi +; X64-NEXT: imulq %rsi, %rdi +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: addq %rdi, %rdx +; X64-NEXT: movq 40(%rcx), %r9 +; X64-NEXT: imulq %r9, %r8 +; X64-NEXT: addq %rdx, %r8 +; X64-NEXT: movq 48(%rcx), %rax +; X64-NEXT: movq %rcx, %rbx +; X64-NEXT: movq %rax, %rdi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: imulq %rcx, %rdi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rax, %r12 +; X64-NEXT: addq %rdi, %rdx +; X64-NEXT: movq 56(%rbx), %rbx +; X64-NEXT: imulq %rbp, %rbx +; X64-NEXT: addq %rdx, %rbx +; X64-NEXT: addq %r11, %r12 +; X64-NEXT: adcq %r8, %rbx +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %rbp, %r8 +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rdi, %rbp +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: addq %rbp, %r11 +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: addq %rsi, %rdi +; X64-NEXT: sbbq %rsi, %rsi +; X64-NEXT: andl $1, %esi +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdx, %r14 +; X64-NEXT: movq %rax, %r15 +; X64-NEXT: addq %rdi, %r15 +; X64-NEXT: adcq %rsi, %r14 +; X64-NEXT: addq %r12, %r15 +; X64-NEXT: adcq %rbx, %r14 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; X64-NEXT: movq 56(%rdx), %rcx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload +; X64-NEXT: imulq %r8, %rcx +; X64-NEXT: movq 48(%rdx), %rbp +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: addq %rcx, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; X64-NEXT: imulq %r10, %rbp +; X64-NEXT: addq %rdx, %rbp +; X64-NEXT: movq 32(%rbx), %rdi +; X64-NEXT: movq 40(%rbx), %r12 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: imulq %r12, %rcx +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: addq %rcx, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r13 # 8-byte Reload +; X64-NEXT: imulq %rdi, %r13 +; X64-NEXT: addq %rdx, %r13 +; X64-NEXT: addq %rsi, %r9 +; X64-NEXT: adcq %rbp, %r13 +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: movq %r12, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rcx, %rbp +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: mulq %r10 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %rdi +; X64-NEXT: addq %rbp, %rdi +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq %rbx, %rcx +; X64-NEXT: sbbq %rbp, %rbp +; X64-NEXT: andl $1, %ebp +; X64-NEXT: movq %r12, %rax +; X64-NEXT: mulq %r10 +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: adcq %rbp, %rdx +; X64-NEXT: addq %r9, %rax +; X64-NEXT: adcq %r13, %rdx +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: adcq %r11, %rdi +; X64-NEXT: adcq %r15, %rax +; X64-NEXT: adcq %r14, %rdx +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; X64-NEXT: movq (%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq %rbp, (%rcx) +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq %rbp, 8(%rcx) +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq %rbp, 16(%rcx) +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq %rbp, 24(%rcx) +; X64-NEXT: movq %rsi, 32(%rcx) +; X64-NEXT: movq %rdi, 40(%rcx) +; X64-NEXT: movq %rax, 48(%rcx) +; X64-NEXT: movq %rdx, 56(%rcx) +; X64-NEXT: addq $8, %rsp +; X64-NEXT: popq %rbx +; X64-NEXT: popq %r12 +; X64-NEXT: popq %r13 +; X64-NEXT: popq %r14 +; X64-NEXT: popq %r15 +; X64-NEXT: popq %rbp +; X64-NEXT: retq + %av = load i512, i512* %a + %bv = load i512, i512* %b + %r = mul i512 %av, %bv + store i512 %r, i512* %out + ret void +} diff --git a/test/LTO/X86/type-mapping-bug.ll b/test/LTO/X86/type-mapping-bug.ll index 3a1891234c86..c2aeb8817ec5 100644 --- a/test/LTO/X86/type-mapping-bug.ll +++ b/test/LTO/X86/type-mapping-bug.ll @@ -1,6 +1,6 @@ ; RUN: llvm-as -o %t.dst.bc %s ; RUN: llvm-as -o %t.src.bc %S/Inputs/type-mapping-src.ll -; RUN: llvm-lto %t.dst.bc %t.src.bc -o=/dev/null +; RUN: llvm-lto %t.dst.bc %t.src.bc -o=%t.lto.bc target triple = "x86_64-pc-windows-msvc18.0.0" diff --git a/test/Transforms/InstCombine/indexed-gep-compares.ll b/test/Transforms/InstCombine/indexed-gep-compares.ll index 495881549e25..64dff2712976 100644 --- a/test/Transforms/InstCombine/indexed-gep-compares.ll +++ b/test/Transforms/InstCombine/indexed-gep-compares.ll @@ -167,4 +167,24 @@ lpad: ; CHECK: ret i32* %[[PTR]] } + +@pr30402 = constant i64 3 +define i1 @test7() { +entry: + br label %bb7 + +bb7: ; preds = %bb10, %entry-block + %phi = phi i64* [ @pr30402, %entry ], [ getelementptr inbounds (i64, i64* @pr30402, i32 1), %bb7 ] + %cmp = icmp eq i64* %phi, getelementptr inbounds (i64, i64* @pr30402, i32 1) + br i1 %cmp, label %bb10, label %bb7 + +bb10: + ret i1 %cmp +} +; CHECK-LABEL: @test7( +; CHECK: %[[phi:.*]] = phi i64* [ @pr30402, %entry ], [ getelementptr inbounds (i64, i64* @pr30402, i32 1), %bb7 ] +; CHECK: %[[cmp:.*]] = icmp eq i64* %[[phi]], getelementptr inbounds (i64, i64* @pr30402, i32 1) +; CHECK: ret i1 %[[cmp]] + + declare i32 @__gxx_personality_v0(...) diff --git a/test/Transforms/InstCombine/unpack-fca.ll b/test/Transforms/InstCombine/unpack-fca.ll index 47e747ccc468..3c5e4177d69f 100644 --- a/test/Transforms/InstCombine/unpack-fca.ll +++ b/test/Transforms/InstCombine/unpack-fca.ll @@ -49,6 +49,15 @@ define void @storeArrayOfA([1 x %A]* %aa.ptr) { ret void } +define void @storeLargeArrayOfA([2000 x %A]* %aa.ptr) { +; CHECK-LABEL: storeLargeArrayOfA +; CHECK-NEXT: store [2000 x %A] +; CHECK-NEXT: ret void + %i1 = insertvalue [2000 x %A] undef, %A { %A__vtbl* @A__vtblZ }, 1 + store [2000 x %A] %i1, [2000 x %A]* %aa.ptr, align 8 + ret void +} + define void @storeStructOfArrayOfA({ [1 x %A] }* %saa.ptr) { ; CHECK-LABEL: storeStructOfArrayOfA ; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds { [1 x %A] }, { [1 x %A] }* %saa.ptr, i64 0, i32 0, i64 0, i32 0 @@ -179,6 +188,14 @@ define [2 x %B] @loadArrayOfB([2 x %B]* %ab.ptr) { ret [2 x %B] %1 } +define [2000 x %B] @loadLargeArrayOfB([2000 x %B]* %ab.ptr) { +; CHECK-LABEL: loadLargeArrayOfB +; CHECK-NEXT: load [2000 x %B], [2000 x %B]* %ab.ptr, align 8 +; CHECK-NEXT: ret [2000 x %B] + %1 = load [2000 x %B], [2000 x %B]* %ab.ptr, align 8 + ret [2000 x %B] %1 +} + %struct.S = type <{ i8, %struct.T }> %struct.T = type { i32, i32 } diff --git a/test/Transforms/SimplifyCFG/PR29163.ll b/test/Transforms/SimplifyCFG/PR29163.ll new file mode 100644 index 000000000000..65f9090dd135 --- /dev/null +++ b/test/Transforms/SimplifyCFG/PR29163.ll @@ -0,0 +1,31 @@ +; RUN: opt -S -simplifycfg < %s | FileCheck %s +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +@GV = external constant i64* + +define i64* @test1(i1 %cond, i8* %P) { +entry: + br i1 %cond, label %if, label %then + +then: + %bc = bitcast i8* %P to i64* + br label %join + +if: + %load = load i64*, i64** @GV, align 8, !dereferenceable !0 + br label %join + +join: + %phi = phi i64* [ %bc, %then ], [ %load, %if ] + ret i64* %phi +} + +; CHECK-LABEL: define i64* @test1( +; CHECK: %[[bc:.*]] = bitcast i8* %P to i64* +; CHECK: %[[load:.*]] = load i64*, i64** @GV, align 8{{$}} +; CHECK: %[[phi:.*]] = select i1 %cond, i64* %[[load]], i64* %[[bc]] +; CHECK: ret i64* %[[phi]] + + +!0 = !{i64 8} |