diff options
author | Duncan Wilkie <antigravityd@gmail.com> | 2023-11-18 06:11:09 -0600 |
---|---|---|
committer | Duncan Wilkie <antigravityd@gmail.com> | 2023-11-18 06:11:09 -0600 |
commit | 11da511c784eca003deb90c23570f0873954e0de (patch) | |
tree | e14fdd3d5d6345956d67e79ae771d0633d28362b /gmp-6.3.0/mpn/arm64/applem1 |
Initial commit.
Diffstat (limited to 'gmp-6.3.0/mpn/arm64/applem1')
-rw-r--r-- | gmp-6.3.0/mpn/arm64/applem1/addaddmul_1msb0.asm | 92 | ||||
-rw-r--r-- | gmp-6.3.0/mpn/arm64/applem1/aorsmul_1.asm | 161 | ||||
-rw-r--r-- | gmp-6.3.0/mpn/arm64/applem1/gmp-mparam.h | 187 | ||||
-rw-r--r-- | gmp-6.3.0/mpn/arm64/applem1/sqr_basecase.asm | 318 |
4 files changed, 758 insertions, 0 deletions
diff --git a/gmp-6.3.0/mpn/arm64/applem1/addaddmul_1msb0.asm b/gmp-6.3.0/mpn/arm64/applem1/addaddmul_1msb0.asm new file mode 100644 index 0000000..03cbf97 --- /dev/null +++ b/gmp-6.3.0/mpn/arm64/applem1/addaddmul_1msb0.asm @@ -0,0 +1,92 @@ +dnl ARM64 mpn_addaddmul_1msb0, R = Au + Bv, u,v < 2^63. + +dnl Copyright 2021 Free Software Foundation, Inc. + +dnl This file is part of the GNU MP Library. +dnl +dnl The GNU MP Library is free software; you can redistribute it and/or modify +dnl it under the terms of either: +dnl +dnl * the GNU Lesser General Public License as published by the Free +dnl Software Foundation; either version 3 of the License, or (at your +dnl option) any later version. +dnl +dnl or +dnl +dnl * the GNU General Public License as published by the Free Software +dnl Foundation; either version 2 of the License, or (at your option) any +dnl later version. +dnl +dnl or both in parallel, as here. +dnl +dnl The GNU MP Library is distributed in the hope that it will be useful, but +dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +dnl for more details. +dnl +dnl You should have received copies of the GNU General Public License and the +dnl GNU Lesser General Public License along with the GNU MP Library. If not, +dnl see https://www.gnu.org/licenses/. + +include(`../config.m4') + +C cycles/limb +C Cortex-A53 +C Cortex-A55 +C Cortex-A57 +C Cortex-A72 +C Cortex-A73 +C X-Gene +C Apple M1 2.0 + +changecom(blah) + +define(`rp', x0) +define(`ap', x1) +define(`bp', x2) +define(`n', x3) +define(`u0', x4) +define(`v0', x5) + +C TODO +C * Use fewer distinct registers, should be trivial. + +PROLOGUE(mpn_addaddmul_1msb0) + lsr x7, n, #1 + adds x6, xzr, xzr + tbz n, #0, L(top) + + ldr x11, [ap], #8 C 0 + ldr x15, [bp], #8 C 0 + mul x10, x11, u0 C 0 + umulh x11, x11, u0 C 1 + mul x14, x15, v0 C 0 + umulh x15, x15, v0 C 1 + adds x10, x10, x14 C 0 + adcs x6, x11, x15 C 1 + str x10, [rp], #8 C 0 + cbz x7, L(end) + +L(top): ldp x11, x13, [ap], #16 C 0 1 + ldp x15, x17, [bp], #16 C 0 1 + mul x10, x11, u0 C 0 + umulh x11, x11, u0 C 1 + mul x14, x15, v0 C 0 + umulh x15, x15, v0 C 1 + adcs x10, x10, x14 C 0 + adc x11, x11, x15 C 1 + adds x10, x10, x6 C 0 + mul x12, x13, u0 C 1 + umulh x13, x13, u0 C 2 + mul x14, x17, v0 C 1 + umulh x17, x17, v0 C 2 + adcs x12, x12, x14 C 1 + adc x6, x13, x17 C 2 + adds x11, x12, x11 C 1 + stp x10, x11, [rp], #16 C 0 1 + sub x7, x7, #1 + cbnz x7, L(top) + +L(end): adc x0, x6, xzr + ret +EPILOGUE() diff --git a/gmp-6.3.0/mpn/arm64/applem1/aorsmul_1.asm b/gmp-6.3.0/mpn/arm64/applem1/aorsmul_1.asm new file mode 100644 index 0000000..aa87c2a --- /dev/null +++ b/gmp-6.3.0/mpn/arm64/applem1/aorsmul_1.asm @@ -0,0 +1,161 @@ +dnl ARM64 mpn_addmul_1 and mpn_submul_1. + +dnl Contributed to the GNU project by Torbjörn Granlund. + +dnl Copyright 2020 Free Software Foundation, Inc. + +dnl This file is part of the GNU MP Library. +dnl +dnl The GNU MP Library is free software; you can redistribute it and/or modify +dnl it under the terms of either: +dnl +dnl * the GNU Lesser General Public License as published by the Free +dnl Software Foundation; either version 3 of the License, or (at your +dnl option) any later version. +dnl +dnl or +dnl +dnl * the GNU General Public License as published by the Free Software +dnl Foundation; either version 2 of the License, or (at your option) any +dnl later version. +dnl +dnl or both in parallel, as here. +dnl +dnl The GNU MP Library is distributed in the hope that it will be useful, but +dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +dnl for more details. +dnl +dnl You should have received copies of the GNU General Public License and the +dnl GNU Lesser General Public License along with the GNU MP Library. If not, +dnl see https://www.gnu.org/licenses/. + +include(`../config.m4') + +C cycles/limb +C Cortex-A53 +C Cortex-A55 +C Cortex-A57 +C Cortex-A72 +C Cortex-A73 +C X-Gene +C Apple M1 1.25 + +changecom(blah) + +define(`rp', x0) +define(`up', x1) +define(`n', x2) +define(`v0', x3) +define(`cin',x4) + +define(`CY',x17) + +ifdef(`OPERATION_addmul_1', ` + define(`ADDSUB', adds) + define(`ADDSUBC', adcs) + define(`COND', `cc') + define(`func', mpn_addmul_1)') +ifdef(`OPERATION_submul_1', ` + define(`ADDSUB', subs) + define(`ADDSUBC', sbcs) + define(`COND', `cs') + define(`func', mpn_submul_1)') + +MULFUNC_PROLOGUE(mpn_addmul_1 mpn_submul_1 mpn_addmul_1c) + +ifdef(`OPERATION_addmul_1', ` +PROLOGUE(mpn_addmul_1c) + mov CY, cin + b L(ent) +EPILOGUE() +') + +PROLOGUE(func) + mov CY, #0 C W0 +L(ent): lsr x16, n, #2 + tbz n, #0, L(bx0) + +L(bx1): ldr x4, [up], #8 + mul x8, x4, v0 + umulh x4, x4, v0 + tbz n, #1, L(b01) + +L(b11): ldp x5,x6, [up], #16 + ldp x12,x13, [rp] + ldr x14, [rp,#16] + mul x9, x5, v0 + umulh x5, x5, v0 + mul x10, x6, v0 + umulh x6, x6, v0 + ADDSUB x8, x12, x8 + ADDSUBC x4, x13, x4 + ADDSUBC x5, x14, x5 + csinc x6, x6, x6, COND + ADDSUB x8, x8, CY + ADDSUBC x4, x4, x9 + ADDSUBC x5, x5, x10 + csinc CY, x6, x6, COND + stp x8, x4, [rp], #16 + str x5, [rp], #8 + cbnz x16, L(top) + mov x0, CY + ret + +L(b01): ldr x12, [rp] + ADDSUB x8, x12, x8 + csinc x4, x4, x4, COND + ADDSUB x8, x8, CY + csinc CY, x4, x4, COND + str x8, [rp], #8 + cbnz x16, L(top) + mov x0, CY + ret + +L(bx0): ldp x4,x5, [up], #16 + tbz n, #1, L(top)+4 + +L(b10): ldp x12,x13, [rp] + mul x8, x4, v0 + umulh x4, x4, v0 + mul x9, x5, v0 + umulh x5, x5, v0 + ADDSUB x8, x12, x8 + ADDSUBC x4, x13, x4 + csinc x5, x5, x5, COND + ADDSUB x8, x8, CY + ADDSUBC x4, x4, x9 + csinc CY, x5, x5, COND + stp x8, x4, [rp], #16 + cbz x16, L(done) + +L(top): ldp x4,x5, [up], #16 C W0 W1 + ldp x6,x7, [up], #16 C W2 W3 + ldp x12,x13, [rp] C W0 W1 + ldp x14,x15, [rp,#16] C W2 W3 + mul x8, x4, v0 C W0 + umulh x4, x4, v0 C W1 + mul x9, x5, v0 C W1 + umulh x5, x5, v0 C W2 + mul x10, x6, v0 C W2 + umulh x6, x6, v0 C W3 + mul x11, x7, v0 C W3 + umulh x7, x7, v0 C W4 + ADDSUB x8, x12, x8 C W0 + ADDSUBC x4, x13, x4 C W1 + ADDSUBC x5, x14, x5 C W2 + ADDSUBC x6, x15, x6 C W3 + csinc x7, x7, x7, COND C W4 + ADDSUB x8, x8, CY C W0 carry-in + ADDSUBC x4, x4, x9 C W1 + ADDSUBC x5, x5, x10 C W2 + ADDSUBC x6, x6, x11 C W2 + csinc CY, x7, x7, COND C W3 carry-out + stp x8, x4, [rp], #16 + stp x5, x6, [rp], #16 + sub x16, x16, #1 + cbnz x16, L(top) + +L(done):mov x0, CY + ret +EPILOGUE() diff --git a/gmp-6.3.0/mpn/arm64/applem1/gmp-mparam.h b/gmp-6.3.0/mpn/arm64/applem1/gmp-mparam.h new file mode 100644 index 0000000..d08262f --- /dev/null +++ b/gmp-6.3.0/mpn/arm64/applem1/gmp-mparam.h @@ -0,0 +1,187 @@ +/* gmp-mparam.h -- Compiler/machine parameter header file. + +Copyright 2020 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 2 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library. If not, +see https://www.gnu.org/licenses/. */ + +#define GMP_LIMB_BITS 64 +#define GMP_LIMB_BYTES 8 + +/* 3200 MHz Apple M1 */ +/* FFT tuning limit = 1 M */ +/* Generated by tuneup.c, 2020-12-25, gcc 4.2 */ + +#define MOD_1_1P_METHOD 2 /* 42.96% faster than 1 */ +#define MOD_1_NORM_THRESHOLD 0 /* always */ +#define MOD_1_UNNORM_THRESHOLD 0 /* always */ +#define MOD_1N_TO_MOD_1_1_THRESHOLD 5 +#define MOD_1U_TO_MOD_1_1_THRESHOLD 3 +#define MOD_1_1_TO_MOD_1_2_THRESHOLD 11 +#define MOD_1_2_TO_MOD_1_4_THRESHOLD 18 +#define PREINV_MOD_1_TO_MOD_1_THRESHOLD 11 +#define USE_PREINV_DIVREM_1 1 /* native */ +/* From m1.gmplib.org, 2023-07-21 */ +#define DIV_QR_1N_PI1_METHOD 3 /* 13.35% faster than 1 */ +#define DIV_QR_1_NORM_THRESHOLD 2 +#define DIV_QR_1_UNNORM_THRESHOLD 1 +#define DIV_QR_2_PI2_THRESHOLD 9 +#define DIVEXACT_1_THRESHOLD 0 /* always */ +#define BMOD_1_TO_MOD_1_THRESHOLD 28 + +#define DIV_1_VS_MUL_1_PERCENT 659 + +#define MUL_TOOM22_THRESHOLD 26 +#define MUL_TOOM33_THRESHOLD 77 +#define MUL_TOOM44_THRESHOLD 153 +#define MUL_TOOM6H_THRESHOLD 446 +#define MUL_TOOM8H_THRESHOLD 626 + +#define MUL_TOOM32_TO_TOOM43_THRESHOLD 94 +#define MUL_TOOM32_TO_TOOM53_THRESHOLD 81 +#define MUL_TOOM42_TO_TOOM53_THRESHOLD 41 +#define MUL_TOOM42_TO_TOOM63_THRESHOLD 99 +#define MUL_TOOM43_TO_TOOM54_THRESHOLD 133 + +#define SQR_BASECASE_THRESHOLD 0 /* always (native) */ +#define SQR_TOOM2_THRESHOLD 47 +#define SQR_TOOM3_THRESHOLD 74 +#define SQR_TOOM4_THRESHOLD 372 +#define SQR_TOOM6_THRESHOLD 462 +#define SQR_TOOM8_THRESHOLD 592 + +#define MULMID_TOOM42_THRESHOLD 44 + +#define MULMOD_BNM1_THRESHOLD 9 +#define SQRMOD_BNM1_THRESHOLD 11 + +#define MUL_FFT_MODF_THRESHOLD 216 /* k = 5 */ +#define MUL_FFT_TABLE3 \ + { { 216, 5}, { 7, 4}, { 19, 5}, { 19, 6}, \ + { 10, 5}, { 21, 6}, { 21, 7}, { 11, 6}, \ + { 23, 7}, { 21, 8}, { 11, 7}, { 24, 8}, \ + { 21, 9}, { 11, 8}, { 27, 9}, { 15, 8}, \ + { 33, 9}, { 19, 8}, { 39, 9}, { 23, 8}, \ + { 47, 9}, { 27,10}, { 15, 9}, { 39,10}, \ + { 23, 9}, { 47,11}, { 15,10}, { 31, 9}, \ + { 63,10}, { 39, 9}, { 79,10}, { 55,11}, \ + { 31,10}, { 79,11}, { 47,12}, { 31,11}, \ + { 63,10}, { 127, 9}, { 255, 8}, { 511,11}, \ + { 79,10}, { 159, 9}, { 319, 8}, { 639,11}, \ + { 95,10}, { 191, 9}, { 383,12}, { 63,11}, \ + { 127,10}, { 255, 9}, { 511, 8}, { 1023,10}, \ + { 271, 9}, { 543, 8}, { 1087,11}, { 143,10}, \ + { 287, 9}, { 575, 8}, { 1151,11}, { 159,10}, \ + { 319, 9}, { 639,12}, { 95,11}, { 191,10}, \ + { 383,13}, { 63,12}, { 127,11}, { 255,10}, \ + { 511, 9}, { 1023,11}, { 271,10}, { 543, 9}, \ + { 1087, 8}, { 2175,11}, { 287,10}, { 575, 9}, \ + { 1151,12}, { 159,11}, { 319,10}, { 639, 9}, \ + { 1279,11}, { 351,10}, { 703, 9}, { 1407,12}, \ + { 191,11}, { 383,10}, { 767,11}, { 415,12}, \ + { 223,11}, { 447,10}, { 895,11}, { 479,10}, \ + { 959,13}, { 8192,14}, { 16384,15}, { 32768,16}, \ + { 65536,17}, { 131072,18}, { 262144,19}, { 524288,20}, \ + {1048576,21}, {2097152,22}, {4194304,23}, {8388608,24} } +#define MUL_FFT_TABLE3_SIZE 104 +#define MUL_FFT_THRESHOLD 2368 + +#define SQR_FFT_MODF_THRESHOLD 304 /* k = 5 */ +#define SQR_FFT_TABLE3 \ + { { 304, 5}, { 10, 4}, { 21, 5}, { 11, 4}, \ + { 23, 5}, { 19, 6}, { 10, 5}, { 21, 6}, \ + { 21, 7}, { 11, 6}, { 23, 7}, { 21, 8}, \ + { 11, 7}, { 24, 8}, { 15, 7}, { 31, 8}, \ + { 21, 9}, { 11, 8}, { 27, 9}, { 15, 8}, \ + { 33, 9}, { 19, 8}, { 39, 9}, { 23, 8}, \ + { 47, 9}, { 27,10}, { 15, 9}, { 39,10}, \ + { 23, 9}, { 47,11}, { 15,10}, { 31, 9}, \ + { 63,10}, { 39, 9}, { 79,10}, { 47,11}, \ + { 31,10}, { 79,11}, { 47,12}, { 31,11}, \ + { 63,10}, { 127, 9}, { 255, 8}, { 511,11}, \ + { 79,10}, { 159, 9}, { 319, 8}, { 639,11}, \ + { 95,10}, { 191, 9}, { 383,12}, { 63,10}, \ + { 255, 9}, { 511, 8}, { 1023,10}, { 271, 9}, \ + { 543, 8}, { 1087,10}, { 287, 9}, { 575, 8}, \ + { 1151,11}, { 159,10}, { 319, 9}, { 639,11}, \ + { 175,12}, { 95,11}, { 191,10}, { 383, 9}, \ + { 767,13}, { 63,12}, { 127,11}, { 255,10}, \ + { 511, 9}, { 1023,11}, { 271,10}, { 543, 9}, \ + { 1087, 8}, { 2175,10}, { 575, 9}, { 1151,11}, \ + { 303,12}, { 159,11}, { 319,10}, { 639, 9}, \ + { 1279,11}, { 351,10}, { 703, 9}, { 1407,12}, \ + { 191,11}, { 383,10}, { 767,11}, { 415,10}, \ + { 831, 9}, { 1663,12}, { 223,11}, { 447,10}, \ + { 895,11}, { 479,10}, { 959, 9}, { 1919,13}, \ + { 8192,14}, { 16384,15}, { 32768,16}, { 65536,17}, \ + { 131072,18}, { 262144,19}, { 524288,20}, {1048576,21}, \ + {2097152,22}, {4194304,23}, {8388608,24} } +#define SQR_FFT_TABLE3_SIZE 111 +#define SQR_FFT_THRESHOLD 1856 + +#define MULLO_BASECASE_THRESHOLD 0 /* always */ +#define MULLO_DC_THRESHOLD 76 +#define MULLO_MUL_N_THRESHOLD 4292 +#define SQRLO_BASECASE_THRESHOLD 6 +#define SQRLO_DC_THRESHOLD 186 +#define SQRLO_SQR_THRESHOLD 3688 + +#define DC_DIV_QR_THRESHOLD 67 +#define DC_DIVAPPR_Q_THRESHOLD 242 +#define DC_BDIV_QR_THRESHOLD 68 +#define DC_BDIV_Q_THRESHOLD 129 + +#define INV_MULMOD_BNM1_THRESHOLD 82 +#define INV_NEWTON_THRESHOLD 157 +#define INV_APPR_THRESHOLD 157 + +#define BINV_NEWTON_THRESHOLD 99 +#define REDC_1_TO_REDC_N_THRESHOLD 68 + +#define MU_DIV_QR_THRESHOLD 979 +#define MU_DIVAPPR_Q_THRESHOLD 1210 +#define MUPI_DIV_QR_THRESHOLD 76 +#define MU_BDIV_QR_THRESHOLD 942 +#define MU_BDIV_Q_THRESHOLD 1341 + +#define POWM_SEC_TABLE 11,75,137,712,2177 + +#define GET_STR_DC_THRESHOLD 12 +#define GET_STR_PRECOMPUTE_THRESHOLD 18 +#define SET_STR_DC_THRESHOLD 632 +#define SET_STR_PRECOMPUTE_THRESHOLD 1215 + +#define FAC_DSC_THRESHOLD 252 +#define FAC_ODD_THRESHOLD 0 /* always */ + +#define MATRIX22_STRASSEN_THRESHOLD 9 +#define HGCD2_DIV1_METHOD 1 /* 8.52% faster than 3 */ +#define HGCD_THRESHOLD 131 +#define HGCD_APPR_THRESHOLD 144 +#define HGCD_REDUCE_THRESHOLD 1962 +#define GCD_DC_THRESHOLD 435 +#define GCDEXT_DC_THRESHOLD 199 +#define JACOBI_BASE_METHOD 4 /* 0.80% faster than 1 */ diff --git a/gmp-6.3.0/mpn/arm64/applem1/sqr_basecase.asm b/gmp-6.3.0/mpn/arm64/applem1/sqr_basecase.asm new file mode 100644 index 0000000..22246cf --- /dev/null +++ b/gmp-6.3.0/mpn/arm64/applem1/sqr_basecase.asm @@ -0,0 +1,318 @@ +dnl ARM64 mpn_sqr_basecase + +dnl Contributed to the GNU project by Torbjörn Granlund. + +dnl Copyright 2020 Free Software Foundation, Inc. + +dnl This file is part of the GNU MP Library. +dnl +dnl The GNU MP Library is free software; you can redistribute it and/or modify +dnl it under the terms of either: +dnl +dnl * the GNU Lesser General Public License as published by the Free +dnl Software Foundation; either version 3 of the License, or (at your +dnl option) any later version. +dnl +dnl or +dnl +dnl * the GNU General Public License as published by the Free Software +dnl Foundation; either version 2 of the License, or (at your option) any +dnl later version. +dnl +dnl or both in parallel, as here. +dnl +dnl The GNU MP Library is distributed in the hope that it will be useful, but +dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +dnl for more details. +dnl +dnl You should have received copies of the GNU General Public License and the +dnl GNU Lesser General Public License along with the GNU MP Library. If not, +dnl see https://www.gnu.org/licenses/. + +include(`../config.m4') + +dnl TODO +dnl * Replace the mul_1 code with less scheduled and thus simpler code. If +dnl we base it on the addmul_1 loop, the corner code could benefit from +dnl similar incoming register state, which could eliminate some loads. +dnl * Handle n = 4 early. +dnl * Duplicate addmul loop into 4 loops which fall into each other. Perhaps +dnl stick to one mul_1 loop, but do the (mod 4) stuff at its end instead of +dnl its beginning. + +define(`rp', `x0') +define(`up', `x1') +define(`n', `x2') + +define(`v0', `x3') +define(`CY', `x17') + +PROLOGUE(mpn_sqr_basecase) + cmp n, #3 + b.ls L(le3) + + ldr v0, [up],#8 + sub n, n, #1 + mul x6, v0, v0 + umulh x4, v0, v0 + str x6, [rp],#8 + lsl v0, v0, 1 + lsl n, n, #3 + lsr x16, n, #5 + tbnz n, #3, L(mbx1) + +L(mbx0):adds x11, x4, xzr C move and clear cy + tbz n, #4, L(mb00) + +L(mb10):ldp x4, x5, [up],#16 + mul x8, x4, v0 + umulh x10, x4, v0 + cbz x16, L(m2e) + ldp x6, x7, [up],#16 + mul x9, x5, v0 + b L(mmid)-8 + +L(mbx1):ldr x7, [up],#8 + mul x9, x7, v0 + umulh x11, x7, v0 + adds x9, x9, x4 + str x9, [rp],#8 + tbnz n, #4, L(mb10) +L(mb00):ldp x6, x7, [up],#16 + mul x8, x6, v0 + umulh x10, x6, v0 + ldp x4, x5, [up],#16 + mul x9, x7, v0 + adcs x12, x8, x11 + umulh x11, x7, v0 + sub x16, x16, #1 + cbz x16, L(mend) + + ALIGN(16) +L(mtop):mul x8, x4, v0 + ldp x6, x7, [up],#16 + adcs x13, x9, x10 + umulh x10, x4, v0 + mul x9, x5, v0 + stp x12, x13, [rp],#16 + adcs x12, x8, x11 + umulh x11, x5, v0 +L(mmid):mul x8, x6, v0 + ldp x4, x5, [up],#16 + adcs x13, x9, x10 + umulh x10, x6, v0 + mul x9, x7, v0 + stp x12, x13, [rp],#16 + adcs x12, x8, x11 + umulh x11, x7, v0 + sub x16, x16, #1 + cbnz x16, L(mtop) + +L(mend):mul x8, x4, v0 + adcs x13, x9, x10 + umulh x10, x4, v0 + stp x12, x13, [rp],#16 +L(m2e): mul x9, x5, v0 + adcs x12, x8, x11 + umulh x11, x5, v0 + adcs x13, x9, x10 + stp x12, x13, [rp],#16 + adc x11, x11, xzr + str x11, [rp],#8 + +L(outer): + sub n, n, #8 + sub rp, rp, n + sub up, up, n + ldp x6, x7, [up,#-16] + ldr v0, [rp,#-8] + and x8, x7, x6, asr 63 + mul x9, x7, x7 + adds v0, v0, x8 + umulh x4, x7, x7 + adc x4, x4, xzr + adds v0, v0, x9 + str v0, [rp,#-8] + adc CY, x4, xzr + adds xzr, x6, x6 + adc v0, x7, x7 + cmp n, #16 + beq L(cor2) + + lsr x16, n, #5 + tbz n, #3, L(bx0) + +L(bx1): ldr x4, [up],#8 + mul x8, x4, v0 + umulh x4, x4, v0 + tbz n, #4, L(b01) + +L(b11): ldp x5, x6, [up],#16 + ldp x12, x13, [rp] + ldr x14, [rp,#16] + mul x9, x5, v0 + umulh x5, x5, v0 + mul x10, x6, v0 + umulh x6, x6, v0 + adds x8, x12, x8 + adcs x4, x13, x4 + adcs x5, x14, x5 + adc x6, x6, xzr + adds x8, x8, CY + adcs x4, x4, x9 + adcs x5, x5, x10 + adc CY, x6, xzr + stp x8, x4, [rp],#16 + str x5, [rp],#8 + cbnz x16, L(top) + b L(end) + +L(b01): ldr x12, [rp] + adds x8, x12, x8 + adc x4, x4, xzr + adds x8, x8, CY + adc CY, x4, xzr + str x8, [rp],#8 + b L(top) + +L(bx0): ldp x4, x5, [up],#16 + tbz n, #4, L(top)+4 + +L(b10): ldp x12, x13, [rp] + mul x8, x4, v0 + umulh x4, x4, v0 + mul x9, x5, v0 + umulh x5, x5, v0 + adds x8, x12, x8 + adcs x4, x13, x4 + adc x5, x5, xzr + adds x8, x8, CY + adcs x4, x4, x9 + adc CY, x5, xzr + stp x8, x4, [rp],#16 + + ALIGN(16) +L(top): ldp x4, x5, [up],#16 + ldp x6, x7, [up],#16 + ldp x12, x13, [rp] + ldp x14, x15, [rp,#16] + mul x8, x4, v0 + umulh x4, x4, v0 + mul x9, x5, v0 + umulh x5, x5, v0 + mul x10, x6, v0 + umulh x6, x6, v0 + mul x11, x7, v0 + umulh x7, x7, v0 + adds x8, x12, x8 + adcs x4, x13, x4 + adcs x5, x14, x5 + adcs x6, x15, x6 + adc x7, x7, xzr + adds x8, x8, CY + adcs x4, x4, x9 + adcs x5, x5, x10 + adcs x6, x6, x11 + adc CY, x7, xzr + stp x8, x4, [rp],#16 + stp x5, x6, [rp],#16 + sub x16, x16, #1 + cbnz x16, L(top) + +L(end): str CY, [rp],#8 + b L(outer) + +L(cor2):ldp x10, x11, [up] + ldp x12, x13, [rp] + mul x8, x10, v0 + umulh x4, x10, v0 + mul x9, x11, v0 + umulh x5, x11, v0 + adds x8, x12, x8 + adcs x4, x13, x4 + adc x5, x5, xzr + adds x8, x8, CY + adcs x13, x4, x9 + adc x12, x5, xzr + str x8, [rp] + and x8, x10, x7, asr 63 + mul x9, x10, x10 + adds x13, x13, x8 + umulh x4, x10, x10 + adc x4, x4, xzr + adds x13, x13, x9 + adc CY, x4, xzr + adds xzr, x7, x7 + adc v0, x10, x10 + mul x8, x11, v0 + umulh x4, x11, v0 + adds x8, x12, x8 + adc x4, x4, xzr + adds x8, x8, CY + adc v0, x4, xzr + stp x13, x8, [rp,#8] + and x2, x11, x10, asr 63 + mul x5, x11, x11 + adds v0, v0, x2 + umulh x4, x11, x11 + adc x4, x4, xzr + adds v0, v0, x5 + adc x4, x4, xzr + stp v0, x4, [rp,#24] + ret + +L(le3): ldr v0, [up] + mul x4, v0, v0 C W0 + umulh x5, v0, v0 C W1 + cmp n, #2 + b.hs L(2o3) + stp x4, x5, [rp] + ret + +L(2o3): ldr x6, [up,#8] + mul x7, x6, x6 C W2 + umulh x8, x6, x6 C W3 + mul x9, v0, x6 C W1+1/64 + umulh x10, v0, x6 C W2+1/64 + b.hi L(3) + adds x5, x5, x9 C W1 + adcs x7, x7, x10 C W2 + adc x8, x8, xzr C W3 + adds x5, x5, x9 C W1 + adcs x7, x7, x10 C W2 + adc x8, x8, xzr C W3 + stp x4, x5, [rp] + stp x7, x8, [rp,#16] + ret + +L(3): ldr x11, [up,#16] + mul x12, x11, x11 C W4 + umulh x13, x11, x11 C W5 + mul x14, v0, x11 C W2+1/64 + umulh x15, v0, x11 C W3+1/64 + mul x16, x6, x11 C W3+1/64 + umulh x17, x6, x11 C W4+1/64 + adds x5, x5, x9 + adcs x7, x7, x10 + adcs x8, x8, x15 + adcs x12, x12, x17 + adc x13, x13, xzr + adds x5, x5, x9 + adcs x7, x7, x10 + adcs x8, x8, x15 + adcs x12, x12, x17 + adc x13, x13, xzr + adds x7, x7, x14 + adcs x8, x8, x16 + adcs x12, x12, xzr + adc x13, x13, xzr + adds x7, x7, x14 + adcs x8, x8, x16 + adcs x12, x12, xzr + adc x13, x13, xzr + stp x4, x5, [rp] + stp x7, x8, [rp,#16] + stp x12, x13, [rp,#32] + ret +EPILOGUE() |