diff options
| author | Sunil K Pandey <skpgkp2@gmail.com> | 2022-03-07 10:47:12 -0800 |
|---|---|---|
| committer | Sunil K Pandey <skpgkp2@gmail.com> | 2022-03-07 21:14:10 -0800 |
| commit | 9f38eff64028f236826c269958382dd7b786254f (patch) | |
| tree | 1f7c6bb211af9eb951bbfbaced5156883dd6b3b2 | |
| parent | daae8562387b20f6057fea1e484206416e9f8dd5 (diff) | |
| download | glibc-9f38eff64028f236826c269958382dd7b786254f.tar.xz glibc-9f38eff64028f236826c269958382dd7b786254f.zip | |
x86_64: Fix svml_s_erfcf8_core_avx2.S code formatting
This commit contains following formatting changes
1. Instructions proceeded by a tab.
2. Instruction less than 8 characters in length have a tab
between it and the first operand.
3. Instruction greater than 7 characters in length have a
space between it and the first operand.
4. Tabs after `#define`d names and their value.
5. 8 space at the beginning of line replaced by tab.
6. Indent comments with code.
7. Remove redundent .text section.
8. 1 space between line content and line comment.
9. Space after all commas.
Reviewed-by: Noah Goldstein <goldstein.w.n@gmail.com>
| -rw-r--r-- | sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S | 1794 |
1 files changed, 896 insertions, 898 deletions
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S index ec459644f0..4cafc1bcd5 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S @@ -29,929 +29,927 @@ /* Offsets for data table __svml_serfc_data_internal */ -#define _erfc_tbl 0 -#define _AbsMask 5184 -#define _MaxThreshold 5216 -#define _SgnMask 5248 -#define _One 5280 -#define _SRound 5312 -#define _TwoM48 5344 -#define _poly1_0 5376 -#define _poly1_1 5408 -#define _poly3_0 5440 -#define _poly3_1 5472 -#define _poly1_2 5504 -#define _poly1_3 5536 -#define _UF_Threshold 5568 +#define _erfc_tbl 0 +#define _AbsMask 5184 +#define _MaxThreshold 5216 +#define _SgnMask 5248 +#define _One 5280 +#define _SRound 5312 +#define _TwoM48 5344 +#define _poly1_0 5376 +#define _poly1_1 5408 +#define _poly3_0 5440 +#define _poly3_1 5472 +#define _poly1_2 5504 +#define _poly1_3 5536 +#define _UF_Threshold 5568 /* Lookup bias for data table __svml_serfc_data_internal. */ -#define Table_Lookup_Bias -0x40000000 +#define Table_Lookup_Bias -0x40000000 #include <sysdep.h> - .text - .section .text.avx2,"ax",@progbits + .section .text.avx2, "ax", @progbits ENTRY(_ZGVdN8v_erfcf_avx2) - pushq %rbp - cfi_def_cfa_offset(16) - movq %rsp, %rbp - cfi_def_cfa(6, 16) - cfi_offset(6, -16) - andq $-32, %rsp - subq $96, %rsp - -/* vector gather: erfc_h(x0), (erfc_l(x0), 2/sqrt(pi)*exp(-x0^2)) */ - lea Table_Lookup_Bias+__svml_serfc_data_internal(%rip), %rax - vmovaps %ymm0, %ymm6 - vmovups _SRound+__svml_serfc_data_internal(%rip), %ymm1 - vmovups _TwoM48+__svml_serfc_data_internal(%rip), %ymm10 - vmovups _One+__svml_serfc_data_internal(%rip), %ymm0 - vandps _AbsMask+__svml_serfc_data_internal(%rip), %ymm6, %ymm5 - -/* - * erfc(10.125) underflows to 0 - * can compute all results in the main path - */ - vminps _MaxThreshold+__svml_serfc_data_internal(%rip), %ymm5, %ymm4 - vaddps %ymm1, %ymm4, %ymm8 - vmaxps %ymm10, %ymm4, %ymm2 - vsubps %ymm1, %ymm8, %ymm12 - vpslld $3, %ymm8, %ymm7 - vandps _SgnMask+__svml_serfc_data_internal(%rip), %ymm6, %ymm13 - vorps %ymm13, %ymm0, %ymm3 - -/* 2.0 if x<0, 0.0 otherwise */ - vsubps %ymm3, %ymm0, %ymm5 - vsubps %ymm12, %ymm2, %ymm0 - -/* Start polynomial evaluation */ - vmovups _poly1_0+__svml_serfc_data_internal(%rip), %ymm3 - vmovups _poly3_0+__svml_serfc_data_internal(%rip), %ymm2 - vmulps %ymm0, %ymm12, %ymm1 - vfmadd213ps _poly1_1+__svml_serfc_data_internal(%rip), %ymm1, %ymm3 - vfmadd213ps _poly3_1+__svml_serfc_data_internal(%rip), %ymm1, %ymm2 - vfmadd213ps _poly1_2+__svml_serfc_data_internal(%rip), %ymm1, %ymm3 - -/* 2^(-64) with sign of input */ - vorps %ymm13, %ymm10, %ymm4 - vfmadd213ps _poly1_3+__svml_serfc_data_internal(%rip), %ymm1, %ymm3 - vextractf128 $1, %ymm7, %xmm12 - vmovd %xmm7, %edx - vmovd %xmm12, %r8d - vpextrd $1, %xmm7, %ecx - vpextrd $2, %xmm7, %esi - vpextrd $3, %xmm7, %edi - vpextrd $1, %xmm12, %r9d - vpextrd $2, %xmm12, %r10d - vpextrd $3, %xmm12, %r11d - movslq %edx, %rdx - movslq %ecx, %rcx - movslq %esi, %rsi - movslq %edi, %rdi - movslq %r8d, %r8 - movslq %r9d, %r9 - movslq %r10d, %r10 - movslq %r11d, %r11 - vmovq (%rax,%rdx), %xmm13 - vmovq (%rax,%rcx), %xmm11 - vmovq (%rax,%rsi), %xmm14 - vmovq (%rax,%rdi), %xmm15 - vmovq (%rax,%r8), %xmm7 - vmovq (%rax,%r9), %xmm8 - vmovq (%rax,%r10), %xmm9 - vmovq (%rax,%r11), %xmm10 - vunpcklps %xmm14, %xmm13, %xmm12 - vunpcklps %xmm15, %xmm11, %xmm11 - vunpcklps %xmm9, %xmm7, %xmm9 - vunpcklps %xmm10, %xmm8, %xmm14 - vinsertf128 $1, %xmm9, %ymm12, %ymm15 - vinsertf128 $1, %xmm14, %ymm11, %ymm7 - vunpcklps %ymm7, %ymm15, %ymm9 - vunpckhps %ymm7, %ymm15, %ymm8 - -/* Diff^2 */ - vmulps %ymm0, %ymm0, %ymm15 - -/* P3*D2 */ - vmulps %ymm15, %ymm2, %ymm2 - -/* P1 = P1*T + P3*D2 */ - vfmadd213ps %ymm2, %ymm1, %ymm3 - -/* Special arguments (for flags only) */ - vcmplt_oqps _UF_Threshold+__svml_serfc_data_internal(%rip), %ymm6, %ymm1 - -/* EXP_X0H * (1+P1) */ - vfmadd213ps %ymm8, %ymm8, %ymm3 - -/* erfc_high(x0) - Diff * (2/sqrt(pi)*exp(-x0^2))*(1+P1) */ - vfnmadd213ps %ymm9, %ymm3, %ymm0 - -/* combine and get argument value range mask */ - vmovmskps %ymm1, %edx - notl %edx - movzbl %dl, %edx - vfmadd213ps %ymm5, %ymm4, %ymm0 - testl %edx, %edx - -/* Go to special inputs processing branch */ - jne L(SPECIAL_VALUES_BRANCH) - # LOE rbx r12 r13 r14 r15 edx ymm0 ymm6 - -/* Restore registers - * and exit the function - */ + pushq %rbp + cfi_def_cfa_offset(16) + movq %rsp, %rbp + cfi_def_cfa(6, 16) + cfi_offset(6, -16) + andq $-32, %rsp + subq $96, %rsp + + /* vector gather: erfc_h(x0), (erfc_l(x0), 2/sqrt(pi)*exp(-x0^2)) */ + lea Table_Lookup_Bias+__svml_serfc_data_internal(%rip), %rax + vmovaps %ymm0, %ymm6 + vmovups _SRound+__svml_serfc_data_internal(%rip), %ymm1 + vmovups _TwoM48+__svml_serfc_data_internal(%rip), %ymm10 + vmovups _One+__svml_serfc_data_internal(%rip), %ymm0 + vandps _AbsMask+__svml_serfc_data_internal(%rip), %ymm6, %ymm5 + + /* + * erfc(10.125) underflows to 0 + * can compute all results in the main path + */ + vminps _MaxThreshold+__svml_serfc_data_internal(%rip), %ymm5, %ymm4 + vaddps %ymm1, %ymm4, %ymm8 + vmaxps %ymm10, %ymm4, %ymm2 + vsubps %ymm1, %ymm8, %ymm12 + vpslld $3, %ymm8, %ymm7 + vandps _SgnMask+__svml_serfc_data_internal(%rip), %ymm6, %ymm13 + vorps %ymm13, %ymm0, %ymm3 + + /* 2.0 if x<0, 0.0 otherwise */ + vsubps %ymm3, %ymm0, %ymm5 + vsubps %ymm12, %ymm2, %ymm0 + + /* Start polynomial evaluation */ + vmovups _poly1_0+__svml_serfc_data_internal(%rip), %ymm3 + vmovups _poly3_0+__svml_serfc_data_internal(%rip), %ymm2 + vmulps %ymm0, %ymm12, %ymm1 + vfmadd213ps _poly1_1+__svml_serfc_data_internal(%rip), %ymm1, %ymm3 + vfmadd213ps _poly3_1+__svml_serfc_data_internal(%rip), %ymm1, %ymm2 + vfmadd213ps _poly1_2+__svml_serfc_data_internal(%rip), %ymm1, %ymm3 + + /* 2^(-64) with sign of input */ + vorps %ymm13, %ymm10, %ymm4 + vfmadd213ps _poly1_3+__svml_serfc_data_internal(%rip), %ymm1, %ymm3 + vextractf128 $1, %ymm7, %xmm12 + vmovd %xmm7, %edx + vmovd %xmm12, %r8d + vpextrd $1, %xmm7, %ecx + vpextrd $2, %xmm7, %esi + vpextrd $3, %xmm7, %edi + vpextrd $1, %xmm12, %r9d + vpextrd $2, %xmm12, %r10d + vpextrd $3, %xmm12, %r11d + movslq %edx, %rdx + movslq %ecx, %rcx + movslq %esi, %rsi + movslq %edi, %rdi + movslq %r8d, %r8 + movslq %r9d, %r9 + movslq %r10d, %r10 + movslq %r11d, %r11 + vmovq (%rax, %rdx), %xmm13 + vmovq (%rax, %rcx), %xmm11 + vmovq (%rax, %rsi), %xmm14 + vmovq (%rax, %rdi), %xmm15 + vmovq (%rax, %r8), %xmm7 + vmovq (%rax, %r9), %xmm8 + vmovq (%rax, %r10), %xmm9 + vmovq (%rax, %r11), %xmm10 + vunpcklps %xmm14, %xmm13, %xmm12 + vunpcklps %xmm15, %xmm11, %xmm11 + vunpcklps %xmm9, %xmm7, %xmm9 + vunpcklps %xmm10, %xmm8, %xmm14 + vinsertf128 $1, %xmm9, %ymm12, %ymm15 + vinsertf128 $1, %xmm14, %ymm11, %ymm7 + vunpcklps %ymm7, %ymm15, %ymm9 + vunpckhps %ymm7, %ymm15, %ymm8 + + /* Diff^2 */ + vmulps %ymm0, %ymm0, %ymm15 + + /* P3*D2 */ + vmulps %ymm15, %ymm2, %ymm2 + + /* P1 = P1*T + P3*D2 */ + vfmadd213ps %ymm2, %ymm1, %ymm3 + + /* Special arguments (for flags only) */ + vcmplt_oqps _UF_Threshold+__svml_serfc_data_internal(%rip), %ymm6, %ymm1 + + /* EXP_X0H * (1+P1) */ + vfmadd213ps %ymm8, %ymm8, %ymm3 + + /* erfc_high(x0) - Diff * (2/sqrt(pi)*exp(-x0^2))*(1+P1) */ + vfnmadd213ps %ymm9, %ymm3, %ymm0 + + /* combine and get argument value range mask */ + vmovmskps %ymm1, %edx + notl %edx + movzbl %dl, %edx + vfmadd213ps %ymm5, %ymm4, %ymm0 + testl %edx, %edx + + /* Go to special inputs processing branch */ + jne L(SPECIAL_VALUES_BRANCH) + # LOE rbx r12 r13 r14 r15 edx ymm0 ymm6 + + /* Restore registers + * and exit the function + */ L(EXIT): - movq %rbp, %rsp - popq %rbp - cfi_def_cfa(7, 8) - cfi_restore(6) - ret - cfi_def_cfa(6, 16) - cfi_offset(6, -16) - -/* Branch to process - * special inputs - */ + movq %rbp, %rsp + popq %rbp + cfi_def_cfa(7, 8) + cfi_restore(6) + ret + cfi_def_cfa(6, 16) + cfi_offset(6, -16) + + /* Branch to process + * special inputs + */ L(SPECIAL_VALUES_BRANCH): - vmovups %ymm6, 32(%rsp) - vmovups %ymm0, 64(%rsp) - # LOE rbx r12 r13 r14 r15 edx ymm0 - - xorl %eax, %eax - # LOE rbx r12 r13 r14 r15 eax edx - - vzeroupper - movq %r12, 16(%rsp) - /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */ - .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22 - movl %eax, %r12d - movq %r13, 8(%rsp) - /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus) */ - .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22 - movl %edx, %r13d - movq %r14, (%rsp) - /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus) */ - .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 - # LOE rbx r15 r12d r13d - -/* Range mask - * bits check - */ + vmovups %ymm6, 32(%rsp) + vmovups %ymm0, 64(%rsp) + # LOE rbx r12 r13 r14 r15 edx ymm0 + + xorl %eax, %eax + # LOE rbx r12 r13 r14 r15 eax edx + + vzeroupper + movq %r12, 16(%rsp) + /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */ + .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22 + movl %eax, %r12d + movq %r13, 8(%rsp) + /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus) */ + .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22 + movl %edx, %r13d + movq %r14, (%rsp) + /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus) */ + .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 + # LOE rbx r15 r12d r13d + + /* Range mask + * bits check + */ L(RANGEMASK_CHECK): - btl %r12d, %r13d + btl %r12d, %r13d -/* Call scalar math function */ - jc L(SCALAR_MATH_CALL) - # LOE rbx r15 r12d r13d + /* Call scalar math function */ + jc L(SCALAR_MATH_CALL) + # LOE rbx r15 r12d r13d -/* Special inputs - * processing loop - */ + /* Special inputs + * processing loop + */ L(SPECIAL_VALUES_LOOP): - incl %r12d - cmpl $8, %r12d - -/* Check bits in range mask */ - jl L(RANGEMASK_CHECK) - # LOE rbx r15 r12d r13d - - movq 16(%rsp), %r12 - cfi_restore(12) - movq 8(%rsp), %r13 - cfi_restore(13) - movq (%rsp), %r14 - cfi_restore(14) - vmovups 64(%rsp), %ymm0 - -/* Go to exit */ - jmp L(EXIT) - /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */ - .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22 - /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus) */ - .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22 - /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus) */ - .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 - # LOE rbx r12 r13 r14 r15 ymm0 - -/* Scalar math fucntion call - * to process special input - */ + incl %r12d + cmpl $8, %r12d + + /* Check bits in range mask */ + jl L(RANGEMASK_CHECK) + # LOE rbx r15 r12d r13d + + movq 16(%rsp), %r12 + cfi_restore(12) + movq 8(%rsp), %r13 + cfi_restore(13) + movq (%rsp), %r14 + cfi_restore(14) + vmovups 64(%rsp), %ymm0 + + /* Go to exit */ + jmp L(EXIT) + /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */ + .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22 + /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus) */ + .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22 + /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus) */ + .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 + # LOE rbx r12 r13 r14 r15 ymm0 + + /* Scalar math fucntion call + * to process special input + */ L(SCALAR_MATH_CALL): - movl %r12d, %r14d - movss 32(%rsp,%r14,4), %xmm0 - call erfcf@PLT - # LOE rbx r14 r15 r12d r13d xmm0 + movl %r12d, %r14d + movss 32(%rsp, %r14, 4), %xmm0 + call erfcf@PLT + # LOE rbx r14 r15 r12d r13d xmm0 - movss %xmm0, 64(%rsp,%r14,4) + movss %xmm0, 64(%rsp, %r14, 4) -/* Process special inputs in loop */ - jmp L(SPECIAL_VALUES_LOOP) - # LOE rbx r15 r12d r13d + /* Process special inputs in loop */ + jmp L(SPECIAL_VALUES_LOOP) + # LOE rbx r15 r12d r13d END(_ZGVdN8v_erfcf_avx2) - .section .rodata, "a" - .align 32 + .section .rodata, "a" + .align 32 #ifdef __svml_serfc_data_internal_typedef typedef unsigned int VUINT32; -typedef struct -{ - __declspec(align(32)) VUINT32 _erfc_tbl[645*2][1]; - __declspec(align(32)) VUINT32 _AbsMask[8][1]; - __declspec(align(32)) VUINT32 _MaxThreshold[8][1]; - __declspec(align(32)) VUINT32 _SgnMask[8][1]; - __declspec(align(32)) VUINT32 _One[8][1]; - __declspec(align(32)) VUINT32 _SRound[8][1]; - __declspec(align(32)) VUINT32 _TwoM48[8][1]; - __declspec(align(32)) VUINT32 _poly1_0[8][1]; - __declspec(align(32)) VUINT32 _poly1_1[8][1]; - __declspec(align(32)) VUINT32 _poly3_0[8][1]; - __declspec(align(32)) VUINT32 _poly3_1[8][1]; - __declspec(align(32)) VUINT32 _poly1_2[8][1]; - __declspec(align(32)) VUINT32 _poly1_3[8][1]; - __declspec(align(32)) VUINT32 _UF_Threshold[8][1]; +typedef struct { + __declspec(align(32)) VUINT32 _erfc_tbl[645*2][1]; + __declspec(align(32)) VUINT32 _AbsMask[8][1]; + __declspec(align(32)) VUINT32 _MaxThreshold[8][1]; + __declspec(align(32)) VUINT32 _SgnMask[8][1]; + __declspec(align(32)) VUINT32 _One[8][1]; + __declspec(align(32)) VUINT32 _SRound[8][1]; + __declspec(align(32)) VUINT32 _TwoM48[8][1]; + __declspec(align(32)) VUINT32 _poly1_0[8][1]; + __declspec(align(32)) VUINT32 _poly1_1[8][1]; + __declspec(align(32)) VUINT32 _poly3_0[8][1]; + __declspec(align(32)) VUINT32 _poly3_1[8][1]; + __declspec(align(32)) VUINT32 _poly1_2[8][1]; + __declspec(align(32)) VUINT32 _poly1_3[8][1]; + __declspec(align(32)) VUINT32 _UF_Threshold[8][1]; } __svml_serfc_data_internal; #endif __svml_serfc_data_internal: - /*== _erfc_tbl ==*/ - .long 0x57800000, 0x57906ebb - .long 0x577b7ca2, 0x579065b4 - .long 0x5776f9d5, 0x57904aa3 - .long 0x57727828, 0x57901d93 - .long 0x576df82b, 0x578fde94 - .long 0x57697a6e, 0x578f8dbd - .long 0x5764ff7f, 0x578f2b2e - .long 0x576087ea, 0x578eb70a - .long 0x575c143d, 0x578e317d - .long 0x5757a500, 0x578d9ab9 - .long 0x57533abf, 0x578cf2f5 - .long 0x574ed5fe, 0x578c3a6f - .long 0x574a7744, 0x578b716c - .long 0x57461f12, 0x578a9834 - .long 0x5741cdeb, 0x5789af18 - .long 0x573d844a, 0x5788b66c - .long 0x573942ac, 0x5787ae8b - .long 0x57350989, 0x578697d3 - .long 0x5730d956, 0x578572a8 - .long 0x572cb284, 0x57843f72 - .long 0x57289583, 0x5782fe9f - .long 0x572482bd, 0x5781b0a0 - .long 0x57207a9b, 0x578055e8 - .long 0x571c7d80, 0x577ddddf - .long 0x57188bcb, 0x577af867 - .long 0x5714a5da, 0x5777fc62 - .long 0x5710cc05, 0x5774ead4 - .long 0x570cfe9f, 0x5771c4c4 - .long 0x57093df9, 0x576e8b3e - .long 0x57058a5e, 0x576b3f51 - .long 0x5701e415, 0x5767e20f - .long 0x56fc96c6, 0x5764748e - .long 0x56f5810a, 0x5760f7e5 - .long 0x56ee876d, 0x575d6d2d - .long 0x56e7aa5a, 0x5759d57e - .long 0x56e0ea35, 0x575631f4 - .long 0x56da4757, 0x575283a7 - .long 0x56d3c214, 0x574ecbb1 - .long 0x56cd5ab3, 0x574b0b28 - .long 0x56c71175, 0x57474323 - .long 0x56c0e692, 0x574374b5 - .long 0x56bada38, 0x573fa0ee - .long 0x56b4ec8f, 0x573bc8dc - .long 0x56af1db3, 0x5737ed89 - .long 0x56a96dbc, 0x57340ff9 - .long 0x56a3dcb7, 0x5730312e - .long 0x569e6aaa, 0x572c5223 - .long 0x56991793, 0x572873cf - .long 0x5693e369, 0x57249721 - .long 0x568ece1a, 0x5720bd06 - .long 0x5689d78f, 0x571ce661 - .long 0x5684ffa8, 0x5719140f - .long 0x56804640, 0x571546e7 - .long 0x56775654, 0x57117fb9 - .long 0x566e5c65, 0x570dbf4c - .long 0x56659e43, 0x570a0662 - .long 0x565d1b6d, 0x570655b1 - .long 0x5654d35d, 0x5702adeb - .long 0x564cc57d, 0x56fe1f73 - .long 0x5644f12f, 0x56f6f777 - .long 0x563d55cc, 0x56efe513 - .long 0x5635f2a1, 0x56e8e968 - .long 0x562ec6f6, 0x56e20584 - .long 0x5627d207, 0x56db3a64 - .long 0x5621130b, 0x56d488f8 - .long 0x561a8931, 0x56cdf21c - .long 0x561433a0, 0x56c7769b - .long 0x560e117c, 0x56c11733 - .long 0x560821e1, 0x56bad48d - .long 0x560263e5, 0x56b4af46 - .long 0x55f9ad39, 0x56aea7ea - .long 0x55eef22b, 0x56a8bef3 - .long 0x55e494b6, 0x56a2f4ce - .long 0x55da92eb, 0x569d49d9 - .long 0x55d0ead3, 0x5697be62 - .long 0x55c79a75, 0x569252aa - .long 0x55be9fd3, 0x568d06e3 - .long 0x55b5f8ee, 0x5687db31 - .long 0x55ada3c2, 0x5682cfad - .long 0x55a59e4c, 0x567bc8c2 - .long 0x559de68a, 0x56723298 - .long 0x55967a77, 0x5668dcc1 - .long 0x558f5812, 0x565fc70e - .long 0x55887d5c, 0x5656f136 - .long 0x5581e856, 0x564e5adf - .long 0x55772e0c, 0x56460399 - .long 0x556b0eeb, 0x563deae4 - .long 0x555f6f64, 0x5636102b - .long 0x55544b9e, 0x562e72cb - .long 0x55499fc8, 0x5627120f - .long 0x553f681d, 0x561fed36 - .long 0x5535a0e6, 0x5619036e - .long 0x552c4679, 0x561253dc - .long 0x55235539, 0x560bdd96 - .long 0x551ac999, 0x56059fa9 - .long 0x5512a01c, 0x55ff3230 - .long 0x550ad554, 0x55f391b9 - .long 0x550365e5, 0x55e85bd0 - .long 0x54f89d02, 0x55dd8e4c - .long 0x54eb17df, 0x55d326f3 - .long 0x54de360f, 0x55c92385 - .long 0x54d1f166, 0x55bf81b6 - .long 0x54c643dc, 0x55b63f32 - .long 0x54bb2790, 0x55ad59a1 - .long 0x54b096c5, 0x55a4cea4 - .long 0x54a68be5, 0x559c9bd9 - .long 0x549d0180, 0x5594bedd - .long 0x5493f24c, 0x558d354b - .long 0x548b5926, 0x5585fcbf - .long 0x54833111, 0x557e25af - .long 0x5476ea69, 0x5570ea68 - .long 0x546841c1, 0x556442f0 - .long 0x545a5f10, 0x55582a98 - .long 0x544d398b, 0x554c9cbd - .long 0x5440c8ae, 0x554194c7 - .long 0x54350440, 0x55370e2c - .long 0x5429e44f, 0x552d0474 - .long 0x541f612f, 0x55237336 - .long 0x5415737d, 0x551a561b - .long 0x540c1417, 0x5511a8e1 - .long 0x54033c22, 0x5509675a - .long 0x53f5ca07, 0x55018d6b - .long 0x53e610c3, 0x54f42e22 - .long 0x53d74046, 0x54e600c0 - .long 0x53c94cd8, 0x54d88b05 - .long 0x53bc2b3a, 0x54cbc574 - .long 0x53afd0a5, 0x54bfa8c4 - .long 0x53a432c3, 0x54b42ddb - .long 0x539947af, 0x54a94dcf - .long 0x538f05f3, 0x549f01ec - .long 0x5385647e, 0x549543ae - .long 0x5378b557, 0x548c0cc2 - .long 0x5367c06a, 0x5483570a - .long 0x5357da71, 0x54763931 - .long 0x5348f45f, 0x5466af65 - .long 0x533affda, 0x5458059c - .long 0x532def39, 0x544a3127 - .long 0x5321b57a, 0x543d27b5 - .long 0x5316463d, 0x5430df57 - .long 0x530b95bd, 0x54254e7b - .long 0x530198cc, 0x541a6bee - .long 0x52f08999, 0x54102ed6 - .long 0x52df1f58, 0x54068eb5 - .long 0x52cedfb9, 0x53fb06c5 - .long 0x52bfb8a0, 0x53ea0a1d - .long 0x52b198e5, 0x53da1876 - .long 0x52a4704e, 0x53cb237a - .long 0x52982f7c, 0x53bd1d6f - .long 0x528cc7eb, 0x53aff93b - .long 0x52822be3, 0x53a3aa56 - .long 0x52709cde, 0x539824ce - .long 0x525e46a9, 0x538d5d3c - .long 0x524d3e18, 0x538348c6 - .long 0x523d6d6f, 0x5373ba24 - .long 0x522ec035, 0x53622096 - .long 0x52212321, 0x5351b22a - .long 0x52148413, 0x53425d18 - .long 0x5208d1fc, 0x53341080 - .long 0x51fbf9ac, 0x5326bc5e - .long 0x51e7eb29, 0x531a5183 - .long 0x51d55c2d, 0x530ec18c - .long 0x51c43238, 0x5303feda - .long 0x51b45472, 0x52f3f919 - .long 0x51a5ab93, 0x52e15ce8 - .long 0x519821ce, 0x52d0121b - .long 0x518ba2bc, 0x52c002f8 - .long 0x51801b49, 0x52b11afe - .long 0x516af33c, 0x52a346d7 - .long 0x51575a21, 0x5296744c - .long 0x51454c24, 0x528a9237 - .long 0x5134ac3b, 0x527f20e7 - .long 0x51255f51, 0x526abfa9 - .long 0x51174c27, 0x5257e42f - .long 0x510a5b3b, 0x524673af - .long 0x50fced50, 0x52365507 - .long 0x50e7141d, 0x522770a1 - .long 0x50d304fc, 0x5219b066 - .long 0x50c09cb5, 0x520cffa3 - .long 0x50afba92, 0x52014af8 - .long 0x50a04037, 0x51ed0088 - .long 0x50921177, 0x51d91d2d - .long 0x50851430, 0x51c6cc35 - .long 0x50726058, 0x51b5f011 - .long 0x505c9dfa, 0x51a66d2a - .long 0x5048b7be, 0x519829c8 - .long 0x50368738, 0x518b0df2 - .long 0x5025e8e0, 0x517e06ab - .long 0x5016bbdf, 0x5167ea53 - .long 0x5008e1df, 0x5153a034 - |
