diff options
author | Duncan Wilkie <antigravityd@gmail.com> | 2023-11-18 06:11:09 -0600 |
---|---|---|
committer | Duncan Wilkie <antigravityd@gmail.com> | 2023-11-18 06:11:09 -0600 |
commit | 11da511c784eca003deb90c23570f0873954e0de (patch) | |
tree | e14fdd3d5d6345956d67e79ae771d0633d28362b /gmp-6.3.0/mpn/x86_64/coreisbr/aorsmul_1.asm |
Initial commit.
Diffstat (limited to 'gmp-6.3.0/mpn/x86_64/coreisbr/aorsmul_1.asm')
-rw-r--r-- | gmp-6.3.0/mpn/x86_64/coreisbr/aorsmul_1.asm | 212 |
1 files changed, 212 insertions, 0 deletions
diff --git a/gmp-6.3.0/mpn/x86_64/coreisbr/aorsmul_1.asm b/gmp-6.3.0/mpn/x86_64/coreisbr/aorsmul_1.asm new file mode 100644 index 0000000..b4c1572 --- /dev/null +++ b/gmp-6.3.0/mpn/x86_64/coreisbr/aorsmul_1.asm @@ -0,0 +1,212 @@ +dnl X86-64 mpn_addmul_1 and mpn_submul_1 optimised for Intel Sandy Bridge. + +dnl Contributed to the GNU project by Torbjörn Granlund. + +dnl Copyright 2003-2005, 2007, 2008, 2011-2013 Free Software Foundation, Inc. + +dnl This file is part of the GNU MP Library. +dnl +dnl The GNU MP Library is free software; you can redistribute it and/or modify +dnl it under the terms of either: +dnl +dnl * the GNU Lesser General Public License as published by the Free +dnl Software Foundation; either version 3 of the License, or (at your +dnl option) any later version. +dnl +dnl or +dnl +dnl * the GNU General Public License as published by the Free Software +dnl Foundation; either version 2 of the License, or (at your option) any +dnl later version. +dnl +dnl or both in parallel, as here. +dnl +dnl The GNU MP Library is distributed in the hope that it will be useful, but +dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +dnl for more details. +dnl +dnl You should have received copies of the GNU General Public License and the +dnl GNU Lesser General Public License along with the GNU MP Library. If not, +dnl see https://www.gnu.org/licenses/. + +include(`../config.m4') + +C cycles/limb +C AMD K8,K9 4.27 +C AMD K10 4.27 4.54 +C AMD bull 4.76 +C AMD pile 4.55 +C AMD steam +C AMD excavator +C AMD bobcat 5.30 +C AMD jaguar 5.28 +C Intel P4 16.2 17.1 +C Intel core2 5.26 +C Intel NHM 5.09 +C Intel SBR 3.21 +C Intel IBR 2.96 +C Intel HWL 2.81 +C Intel BWL 2.76 +C Intel SKL 2.76 +C Intel atom 21.5 +C Intel SLM 9.5 +C VIA nano + +C The loop of this code is the result of running a code generation and +C optimization tool suite written by David Harvey and Torbjörn Granlund. + +define(`rp', `%rdi') C rcx +define(`up', `%rsi') C rdx +define(`n_param', `%rdx') C r8 +define(`v0', `%rcx') C r9 + +define(`n', `%rbx') + +define(`I',`$1') + +ifdef(`OPERATION_addmul_1',` + define(`ADDSUB', `add') + define(`func', `mpn_addmul_1') +') +ifdef(`OPERATION_submul_1',` + define(`ADDSUB', `sub') + define(`func', `mpn_submul_1') +') + +ABI_SUPPORT(DOS64) +ABI_SUPPORT(STD64) + +MULFUNC_PROLOGUE(mpn_addmul_1 mpn_submul_1) + +IFDOS(` define(`up', ``%rsi'')') dnl +IFDOS(` define(`rp', ``%rcx'')') dnl +IFDOS(` define(`v0', ``%r9'')') dnl +IFDOS(` define(`r9', ``rdi'')') dnl +IFDOS(` define(`n_param',``%r8'')') dnl + +ASM_START() + TEXT + ALIGN(32) +PROLOGUE(func) + +IFDOS(``push %rsi '') +IFDOS(``push %rdi '') +IFDOS(``mov %rdx, %rsi '') + + mov (up), %rax + push %rbx + lea (up,n_param,8), up + lea (rp,n_param,8), rp + + test $1, R8(n_param) + jnz L(b13) + +L(b02): xor R32(%r11), R32(%r11) + test $2, R8(n_param) + jnz L(b2) + +L(b0): mov $1, R32(n) + sub n_param, n + mul v0 + mov %rdx, %r9 + mov -8(rp,n,8), %r8 + jmp L(e0) + + ALIGN(16) +L(b2): mov $-1, n + sub n_param, n + mul v0 + mov 8(rp,n,8), %r8 + mov %rdx, %r9 + jmp L(e2) + + ALIGN(16) +L(b13): xor R32(%r9), R32(%r9) + test $2, R8(n_param) + jnz L(b3) + +L(b1): mov $2, R32(n) + sub n_param, n + jns L(1) + mul v0 + mov -16(rp,n,8), %r10 + mov %rdx, %r11 + jmp L(e1) + + ALIGN(16) +L(b3): xor R32(n), R32(n) + sub n_param, n + mul v0 + mov (rp,n,8), %r10 + jmp L(e3) + + ALIGN(32) +L(top): mul v0 + mov -16(rp,n,8), %r10 + ADDSUB %r11, %r8 + mov %rdx, %r11 + adc $0, %r9 + mov %r8, -24(rp,n,8) +L(e1): ADDSUB %rax, %r10 + mov -8(up,n,8), %rax + adc $0, %r11 + mul v0 + ADDSUB %r9, %r10 + mov %rdx, %r9 + mov -8(rp,n,8), %r8 + adc $0, %r11 + mov %r10, -16(rp,n,8) +L(e0): ADDSUB %rax, %r8 + adc $0, %r9 + mov (up,n,8), %rax + mul v0 + mov (rp,n,8), %r10 + ADDSUB %r11, %r8 + mov %r8, -8(rp,n,8) + adc $0, %r9 +L(e3): mov %rdx, %r11 + ADDSUB %rax, %r10 + mov 8(up,n,8), %rax + adc $0, %r11 + mul v0 + mov 8(rp,n,8), %r8 + ADDSUB %r9, %r10 + mov %rdx, %r9 + mov %r10, (rp,n,8) + adc $0, %r11 +L(e2): ADDSUB %rax, %r8 + adc $0, %r9 + mov 16(up,n,8), %rax + add $4, n + jnc L(top) + +L(end): mul v0 + mov I(-8(rp),-16(rp,n,8)), %r10 + ADDSUB %r11, %r8 + mov %rdx, %r11 + adc $0, %r9 + mov %r8, I(-16(rp),-24(rp,n,8)) + ADDSUB %rax, %r10 + adc $0, %r11 + ADDSUB %r9, %r10 + adc $0, %r11 + mov %r10, I(-8(rp),-16(rp,n,8)) + mov %r11, %rax + + pop %rbx +IFDOS(``pop %rdi '') +IFDOS(``pop %rsi '') + ret + + ALIGN(16) +L(1): mul v0 + ADDSUB %rax, -8(rp) + mov %rdx, %rax + adc $0, %rax + pop %rbx +IFDOS(``pop %rdi '') +IFDOS(``pop %rsi '') + ret +EPILOGUE() +ASM_END() |