path
stringlengths
14
112
content
stringlengths
0
6.32M
size
int64
0
6.32M
max_lines
int64
1
100k
repo_name
stringclasses
2 values
autogenerated
bool
1 class
cosmopolitan/libc/nt/gdi32/GdiFullscreenControl.S
#include "libc/nt/codegen.h" .imp gdi32,__imp_GdiFullscreenControl,GdiFullscreenControl,1536
93
3
jart/cosmopolitan
false
cosmopolitan/libc/nt/gdi32/GetETM.S
#include "libc/nt/codegen.h" .imp gdi32,__imp_GetETM,GetETM,1636
65
3
jart/cosmopolitan
false
cosmopolitan/libc/nt/url/InetIsOffline.S
#include "libc/nt/codegen.h" .imp url,__imp_InetIsOffline,InetIsOffline,106
76
3
jart/cosmopolitan
false
cosmopolitan/libc/nt/url/URLAssociationDialogW.S
#include "libc/nt/codegen.h" .imp url,__imp_URLAssociationDialogW,URLAssociationDialogW,118
92
3
jart/cosmopolitan
false
cosmopolitan/libc/nt/url/FileProtocolHandler.S
#include "libc/nt/codegen.h" .imp url,__imp_FileProtocolHandler,FileProtocolHandler,104
88
3
jart/cosmopolitan
false
cosmopolitan/libc/nt/url/TelnetProtocolHandler.S
#include "libc/nt/codegen.h" .imp url,__imp_TelnetProtocolHandler,TelnetProtocolHandler,113
92
3
jart/cosmopolitan
false
cosmopolitan/libc/nt/url/OpenURL.S
#include "libc/nt/codegen.h" .imp url,__imp_OpenURL,OpenURL,111
64
3
jart/cosmopolitan
false
cosmopolitan/libc/nt/url/AutodialHookCallback.S
#include "libc/nt/codegen.h" .imp url,__imp_AutodialHookCallback,AutodialHookCallback,103
90
3
jart/cosmopolitan
false
cosmopolitan/libc/nt/url/MIMEAssociationDialogW.S
#include "libc/nt/codegen.h" .imp url,__imp_MIMEAssociationDialogW,MIMEAssociationDialogW,108
94
3
jart/cosmopolitan
false
cosmopolitan/libc/nt/url/MailToProtocolHandler.S
#include "libc/nt/codegen.h" .imp url,__imp_MailToProtocolHandler,MailToProtocolHandler,109
92
3
jart/cosmopolitan
false
cosmopolitan/libc/nt/url/AddMIMEFileTypesPS.S
#include "libc/nt/codegen.h" .imp url,__imp_AddMIMEFileTypesPS,AddMIMEFileTypesPS,102
86
3
jart/cosmopolitan
false
cosmopolitan/libc/nt/url/TranslateURLW.S
#include "libc/nt/codegen.h" .imp url,__imp_TranslateURLW,TranslateURLW,116
76
3
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/program_invocation_name2.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2023 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/runtime/runtime.h" #ifndef __x86_64__ char *program_invocation_name; #endif /* __x86_64__ */
1,947
26
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/tinydivsi.greg.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" // Support code for fast integer division by Si units. // // Division by magnums is described in Hacker's Delight and is // usually generated automatically by compilers, but sadly not // when we optimize for size and idiv goes at least 10x slower // so we do this which saves space while avoiding build tuning // // @param rdi is number to divide // @param cl is magnum #1 // @param rdx is magnum #2 // @return quotient tinydivsi: .leafprologue mov %rdi,%rax imul %rdx mov %rdx,%rax sar %cl,%rax sar $63,%rdi sub %rdi,%rax .leafepilogue .endfn tinydivsi,globl
2,440
42
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/vendor.internal.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_VENDOR_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_VENDOR_H_ #include "libc/nexgen32e/kcpuids.h" #if !(__ASSEMBLER__ + __LINKER__ + 0) #define IsAuthenticAMD() \ (kCpuids[KCPUIDS_0H][KCPUIDS_EBX] == 0x68747541 /* Auth */ && \ kCpuids[KCPUIDS_0H][KCPUIDS_EDX] == 0x69746e65 /* enti */ && \ kCpuids[KCPUIDS_0H][KCPUIDS_ECX] == 0x444d4163 /* cAMD */) #define IsGenuineIntel() \ (kCpuids[KCPUIDS_0H][KCPUIDS_EBX] == 0x756e6547 /* Genu */ && \ kCpuids[KCPUIDS_0H][KCPUIDS_EDX] == 0x49656e69 /* ineI */ && \ kCpuids[KCPUIDS_0H][KCPUIDS_ECX] == 0x6c65746e /* ntel */) #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_VENDOR_H_ */
787
18
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/environ2.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2023 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/runtime/runtime.h" #ifndef __x86_64__ char **environ; #endif /* __x86_64__ */
1,931
25
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/rdtscp.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_RDTSCP_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_RDTSCP_H_ #include "libc/dce.h" #include "libc/intrin/asmflag.h" #include "libc/nexgen32e/x86feature.h" #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ #define TSC_AUX_CORE(MSR) ((MSR)&0xfff) #define TSC_AUX_NODE(MSR) (((MSR) >> 12) & 0xfff) /** * Reads CPU timestamp counter and IA32_TSC_AUX. * * This macro inhibits compiler magic. * This macro does not inhibit CPU magic. * * @see X86_HAVE(RDTSCP) */ #define rdtscp(OPT_OUT_IA32_TSC_AUX) \ ({ \ uint32_t Ecx, *EcxOut; \ uint64_t Rax, Rcx, Rdx; \ asm volatile("rdtscp" \ : "=a"(Rax), "=c"(Ecx), "=d"(Rdx) \ : /* no inputs */ \ : "memory"); \ EcxOut = (OPT_OUT_IA32_TSC_AUX); \ if (EcxOut) *EcxOut = Ecx; \ Rdx << 32 | Rax; \ }) /** * Reads timestamp counter auxiliary model specific register value. */ #define rdpid() \ ({ \ bool Ok; \ long Msr; \ Ok = false; \ if (X86_HAVE(RDPID)) { \ asm volatile("rdpid\t%0" : "=r"(Msr) : /* no inputs */ : "memory"); \ Ok = true; \ } else if (IsLinux()) { \ asm volatile(ZFLAG_ASM("lsl\t%2,%1") \ : ZFLAG_CONSTRAINT(Ok), "=r"(Msr) \ : "r"(0x7b) \ : "memory"); \ } \ if (!Ok && X86_HAVE(RDTSCP)) { \ asm volatile("rdtscp" \ : "=c"(Msr) \ : /* no inputs */ \ : "eax", "edx", "memory"); \ Ok = true; \ } \ if (!Ok) { \ Msr = -1; \ } \ Msr; \ }) COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_RDTSCP_H_ */
3,196
66
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/x87conf.inc
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 sw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ // Reconfigures transcendental math coprocessor. // // @param \conf can be absent to restore default // @clob x87 status and control words only // @see Intel Manual V.1 §8.1.5 // @mode long,legacy .macro x87conf conf=$0x33f push %ax pushw \conf fclex fldcw (%rsp) pop %ax pop %ax .endm
2,132
34
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/auxv2.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2023 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/runtime/runtime.h" #ifdef __aarch64__ unsigned long *__auxv; #endif
1,920
24
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/rdtsc.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_RDTSC_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_RDTSC_H_ #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ /** * Reads CPU timestamp counter. * * This macro inhibits compiler magic. * This macro does not inhibit CPU magic. * * @see X86_HAVE(INVTSC) */ #define rdtsc() __RDTSC("rdtsc") /** * Reads CPU timestamp counter w/ full serialization. * * This macro inhibits CPU magic. * This macro inhibits compiler magic. * * The clock isn't read until: * * 1. previous instructions finish executing; and * 2. previous loads are globally visible; and * 3. previous stores are globally visible. * * Later instructions won't dispatch until RDTSC completes. * * @see X86_HAVE(INVTSC) */ #define mfence_lfence_rdtsc_lfence() \ __RDTSC("mfence\n\tlfence\n\trdtsc\n\tlfence") #ifdef __x86__ #define __RDTSC(ASM) \ ({ \ uint64_t Rax, Rdx; \ asm volatile(ASM : "=a"(Rax), "=d"(Rdx) : /* no inputs */ : "memory"); \ Rdx << 32 | Rax; \ }) #elif defined(__aarch64__) #define __RDTSC(ASM) \ ({ \ uint64_t _Ts; \ asm volatile("mrs\t%0,cntvct_el0" : "=r"(_Ts)); \ _Ts * 48; /* the fudge factor */ \ }) #elif defined(__powerpc64__) #define __RDTSC(ASM) \ ({ \ uint64_t _Ts; \ asm volatile("mfspr\t%0,268" : "=r"(_Ts)); \ _Ts; \ }) #elif defined(__riscv) #define __RDTSC(ASM) \ ({ \ uint64_t _Ts; \ asm volatile("rdcycle\t%0" : "=r"(_Ts)); \ _Ts; \ }) #endif COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_RDTSC_H_ */
2,213
68
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/kreversebits.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .rodata kReverseBits: .byte 0b00000000,0b10000000,0b01000000,0b11000000 .byte 0b00100000,0b10100000,0b01100000,0b11100000 .byte 0b00010000,0b10010000,0b01010000,0b11010000 .byte 0b00110000,0b10110000,0b01110000,0b11110000 .byte 0b00001000,0b10001000,0b01001000,0b11001000 .byte 0b00101000,0b10101000,0b01101000,0b11101000 .byte 0b00011000,0b10011000,0b01011000,0b11011000 .byte 0b00111000,0b10111000,0b01111000,0b11111000 .byte 0b00000100,0b10000100,0b01000100,0b11000100 .byte 0b00100100,0b10100100,0b01100100,0b11100100 .byte 0b00010100,0b10010100,0b01010100,0b11010100 .byte 0b00110100,0b10110100,0b01110100,0b11110100 .byte 0b00001100,0b10001100,0b01001100,0b11001100 .byte 0b00101100,0b10101100,0b01101100,0b11101100 .byte 0b00011100,0b10011100,0b01011100,0b11011100 .byte 0b00111100,0b10111100,0b01111100,0b11111100 .byte 0b00000010,0b10000010,0b01000010,0b11000010 .byte 0b00100010,0b10100010,0b01100010,0b11100010 .byte 0b00010010,0b10010010,0b01010010,0b11010010 .byte 0b00110010,0b10110010,0b01110010,0b11110010 .byte 0b00001010,0b10001010,0b01001010,0b11001010 .byte 0b00101010,0b10101010,0b01101010,0b11101010 .byte 0b00011010,0b10011010,0b01011010,0b11011010 .byte 0b00111010,0b10111010,0b01111010,0b11111010 .byte 0b00000110,0b10000110,0b01000110,0b11000110 .byte 0b00100110,0b10100110,0b01100110,0b11100110 .byte 0b00010110,0b10010110,0b01010110,0b11010110 .byte 0b00110110,0b10110110,0b01110110,0b11110110 .byte 0b00001110,0b10001110,0b01001110,0b11001110 .byte 0b00101110,0b10101110,0b01101110,0b11101110 .byte 0b00011110,0b10011110,0b01011110,0b11011110 .byte 0b00111110,0b10111110,0b01111110,0b11111110 .byte 0b00000001,0b10000001,0b01000001,0b11000001 .byte 0b00100001,0b10100001,0b01100001,0b11100001 .byte 0b00010001,0b10010001,0b01010001,0b11010001 .byte 0b00110001,0b10110001,0b01110001,0b11110001 .byte 0b00001001,0b10001001,0b01001001,0b11001001 .byte 0b00101001,0b10101001,0b01101001,0b11101001 .byte 0b00011001,0b10011001,0b01011001,0b11011001 .byte 0b00111001,0b10111001,0b01111001,0b11111001 .byte 0b00000101,0b10000101,0b01000101,0b11000101 .byte 0b00100101,0b10100101,0b01100101,0b11100101 .byte 0b00010101,0b10010101,0b01010101,0b11010101 .byte 0b00110101,0b10110101,0b01110101,0b11110101 .byte 0b00001101,0b10001101,0b01001101,0b11001101 .byte 0b00101101,0b10101101,0b01101101,0b11101101 .byte 0b00011101,0b10011101,0b01011101,0b11011101 .byte 0b00111101,0b10111101,0b01111101,0b11111101 .byte 0b00000011,0b10000011,0b01000011,0b11000011 .byte 0b00100011,0b10100011,0b01100011,0b11100011 .byte 0b00010011,0b10010011,0b01010011,0b11010011 .byte 0b00110011,0b10110011,0b01110011,0b11110011 .byte 0b00001011,0b10001011,0b01001011,0b11001011 .byte 0b00101011,0b10101011,0b01101011,0b11101011 .byte 0b00011011,0b10011011,0b01011011,0b11011011 .byte 0b00111011,0b10111011,0b01111011,0b11111011 .byte 0b00000111,0b10000111,0b01000111,0b11000111 .byte 0b00100111,0b10100111,0b01100111,0b11100111 .byte 0b00010111,0b10010111,0b01010111,0b11010111 .byte 0b00110111,0b10110111,0b01110111,0b11110111 .byte 0b00001111,0b10001111,0b01001111,0b11001111 .byte 0b00101111,0b10101111,0b01101111,0b11101111 .byte 0b00011111,0b10011111,0b01011111,0b11011111 .byte 0b00111111,0b10111111,0b01111111,0b11111111 .endobj kReverseBits,globl .previous
5,197
89
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/gclongjmp.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 sw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" // Jumps up stack to previous setjmp() invocation. // // This is the same as longjmp() but also unwinds the stack to free // memory, etc. that was registered using gc() or defer(). If GC // isn't linked, this behaves the same as longjmp(). // // @param rdi points to the jmp_buf which must be the same stack // @param esi is returned by setjmp() invocation (coerced nonzero) // @assume system five nexgen32e abi conformant // @see examples/ctrlc.c // @threadsafe // @noreturn _gclongjmp: #ifdef __x86_64__ push %rbp mov %rsp,%rbp .profilable mov %fs:0,%r12 # __get_tls() mov 0x18(%r12),%r12 # Tls::garbages test %r12,%r12 jz 0f movl (%r12),%r13d # garbages.i test %r13d,%r13d jnz .L.unwind.destructors 0: jmp longjmp .L.unwind.destructors: push %rdi push %rsi mov 8(%r12),%r14 # garbages.p mov (%rdi),%r15 # jmp_buf[0] is new %rsp shl $5,%r13 # log2(sizeof(struct Garbage)) 1: sub $32,%r13 # 𝑖-- js 2f cmp (%r14,%r13),%r15 # new %rsp > garbages.p[𝑖].frame jbe 2f mov 16(%r14,%r13),%rdi # garbages.p[𝑖].arg callq *8(%r14,%r13) # garbages.p[𝑖].fn decl (%r12) # garbages.i-- jmp 1b 2: pop %rsi pop %rdi jmp 0b #elif defined(__aarch64__) b longjmp #else #error "unsupported architecture" #endif /* __x86_64__ */ .endfn _gclongjmp,globl
3,163
69
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/nt2sysv.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_NT2SYSV_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_NT2SYSV_H_ #include "libc/nexgen32e/trampoline.h" #if !(__ASSEMBLER__ + __LINKER__ + 0) /** * Creates function to thunk FUNCTION from MSX64 to System V ABI. * * This macro should be used when specifying callbacks in the WIN32 API. */ #define NT2SYSV(FUNCTION) TRAMPOLINE(FUNCTION, __nt2sysv) #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_NT2SYSV_H_ */
480
15
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/ksha256.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2021 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .rodata .balign 64 kSha256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .endobj kSha256,globl
2,740
41
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/ktensindex.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2022 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .rodata kTensIndex: .byte 0, 0, 0, 1, 1, 1, 2, 2 .byte 2, 3, 3, 3, 3, 4, 4, 4 .byte 5, 5, 5, 6, 6, 6, 6, 7 .byte 7, 7, 8, 8, 8, 9, 9, 9 .byte 10, 10, 10, 11, 11, 11, 12, 12 .byte 12, 12, 13, 13, 13, 14, 14, 14 .byte 15, 15, 15, 15, 16, 16, 16, 17 .byte 17, 17, 18, 18, 18, 18, 19, 19 .endfn kTensIndex,globl
2,221
32
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/nexgen32e.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_NEXGEN32E_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_NEXGEN32E_H_ #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ extern long kHalfCache3; extern const uint64_t kTens[20]; extern const uint32_t kSha256[64]; extern const uint64_t kSha512[80]; extern const unsigned char kTensIndex[64]; void CheckStackIsAligned(void); COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_NEXGEN32E_H_ */
485
17
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/tinywcslen.greg.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" // 32-bit strlen that's tiny and near optimal if data's tiny. // // @param RDI is wchar_t *s // @param EAX is unsigned length // @see libc/nexgen32e/strsak32.S tinywcslen: .leafprologue .profilable xor %eax,%eax 1: cmpl $0,(%rdi,%rax,4) jz 2f inc %eax jmp 1b 2: .leafepilogue .endfn tinywcslen,globl
2,178
36
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/hascharacter.internal.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_HASCHARACTER_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_HASCHARACTER_H_ #if !(__ASSEMBLER__ + __LINKER__ + 0) forceinline bool HasCharacter(char c, const char *s) { unsigned i; for (i = 0; s[i]; ++i) { if (s[i] == c) { return true; } } return false; } forceinline bool HasCharacter16(char16_t c, const char16_t *s) { unsigned i; for (i = 0; s[i]; ++i) { if (s[i] == c) { return true; } } return false; } forceinline bool HasCharacterWide(wchar_t c, const wchar_t *s) { unsigned i; for (i = 0; s[i]; ++i) { if (s[i] == c) { return true; } } return false; } #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_HASCHARACTER_H_ */
762
37
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/mul4x4adx.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2021 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" // Computes 512-bit product of 256-bit and 256-bit numbers. // // Instructions: 88 // Total Cycles: 36 // Total uOps: 120 // uOps Per Cycle: 3.33 // IPC: 2.44 // Block RThroughput: 20.0 // // @param rdi receives 8 quadword result // @param rsi is left hand side which must have 4 quadwords // @param rdx is right hand side which must have 4 quadwords // @note words are host endian while array is little endian // @mayalias Mul4x4Adx: push %rbp mov %rsp,%rbp .profilable sub $56,%rsp mov %r15,-8(%rbp) mov %r14,-16(%rbp) mov %r13,-24(%rbp) mov %r12,-32(%rbp) mov %rbx,-40(%rbp) mov %rdx,%r12 mov (%rdx),%rdx mov (%rsi),%rax mov 16(%rsi),%r11 mov 24(%rsi),%r10 mulx %rax,%rbx,%rax mov %rbx,-48(%rbp) mov 8(%rsi),%rbx mulx %rbx,%rdx,%rcx add %rdx,%rax mov (%r12),%rdx mulx %r11,%rdx,%r9 adc %rdx,%rcx mov (%r12),%rdx mulx %r10,%rdx,%r8 adc %rdx,%r9 adc $0,%r8 xor %r13d,%r13d mov (%rsi),%r14 mov 8(%r12),%rdx mulx %r14,%r14,%r15 adox %r14,%rax adcx %r15,%rcx mov %rax,-56(%rbp) mulx %rbx,%r14,%rax adox %r14,%rcx adcx %rax,%r9 mulx %r11,%r14,%rax adox %r14,%r9 adcx %rax,%r8 mulx %r10,%rdx,%rax adox %rdx,%r8 mov 16(%r12),%rdx adcx %r13,%rax adox %r13,%rax mov (%rsi),%r13 xor %r15d,%r15d mulx %r13,%r13,%r14 adox %r13,%rcx adcx %r14,%r9 mulx %rbx,%r14,%r13 adox %r14,%r9 adcx %r13,%r8 mulx %r11,%r14,%r13 adox %r14,%r8 adcx %r13,%rax mov (%rsi),%rsi mulx %r10,%rdx,%r13 adox %rdx,%rax adcx %r15,%r13 mov 24(%r12),%rdx adox %r15,%r13 mulx %rsi,%r12,%rsi xor %r14d,%r14d adox %r12,%r9 adcx %rsi,%r8 mulx %rbx,%rsi,%rbx adox %rsi,%r8 adcx %rbx,%rax mulx %r11,%r11,%rsi mov -56(%rbp),%rbx mov %rcx,16(%rdi) adcx %rsi,%r13 mov -48(%rbp),%rsi mov %rbx,8(%rdi) adox %r11,%rax mov %r9,24(%rdi) mov %r8,32(%rdi) mov %rax,40(%rdi) mulx %r10,%rdx,%r10 adox %rdx,%r13 adcx %r14,%r10 mov %r13,48(%rdi) adox %r14,%r10 mov %rsi,(%rdi) mov %r10,56(%rdi) mov -8(%rbp),%r15 mov -16(%rbp),%r14 mov -24(%rbp),%r13 mov -32(%rbp),%r12 mov -40(%rbp),%rbx leave ret .endfn Mul4x4Adx,globl .end TIMELINE VIEW 0123456789 012345 Index 0123456789 0123456789 [0,0] DeER . . . . . . . subq $56, %rsp [0,1] DeER . . . . . . . movq %r15, -8(%rbp) [0,2] D=eER. . . . . . . movq %r14, -16(%rbp) [0,3] D==eER . . . . . . movq %r13, -24(%rbp) [0,4] D===eER . . . . . . movq %r12, -32(%rbp) [0,5] D====eER . . . . . . movq %rbx, -40(%rbp) [0,6] .DeE---R . . . . . . movq %rdx, %r12 [0,7] .DeeeeeER . . . . . . movq (%rdx), %rdx [0,8] .D=eeeeeER. . . . . . movq (%rsi), %rax [0,9] .D=eeeeeER. . . . . . movq 16(%rsi), %r11 [0,10] .D==eeeeeER . . . . . movq 24(%rsi), %r10 [0,11] . D=====eeeeER . . . . . mulxq %rax, %rbx, %rax [0,12] . D========eER . . . . . movq %rbx, -48(%rbp) [0,13] . D=eeeeeE---R . . . . . movq 8(%rsi), %rbx [0,14] . D=====eeeeER. . . . . mulxq %rbx, %rdx, %rcx [0,15] . D========eER. . . . . addq %rdx, %rax [0,16] . D=eeeeeE---R. . . . . movq (%r12), %rdx [0,17] . D=====eeeeER . . . . mulxq %r11, %rdx, %r9 [0,18] . D========eER . . . . adcq %rdx, %rcx [0,19] . DeeeeeE----R . . . . movq (%r12), %rdx [0,20] . D=====eeeeER . . . . mulxq %r10, %rdx, %r8 [0,21] . D========eER . . . . adcq %rdx, %r9 [0,22] . D=========eER . . . . adcq $0, %r8 [0,23] . D-----------R . . . . xorl %r13d, %r13d [0,24] . .DeeeeeE----R . . . . movq (%rsi), %r14 [0,25] . .DeeeeeE----R . . . . movq 8(%r12), %rdx [0,26] . .D=====eeeeER . . . . mulxq %r14, %r14, %r15 [0,27] . .D========eER . . . . adoxq %r14, %rax [0,28] . . D========eER . . . . adcxq %r15, %rcx [0,29] . . D========eER . . . . movq %rax, -56(%rbp) [0,30] . . D=====eeeeER . . . . mulxq %rbx, %r14, %rax [0,31] . . D=========eER. . . . adoxq %r14, %rcx [0,32] . . D=========eER . . . adcxq %rax, %r9 [0,33] . . D=====eeeeE-R . . . mulxq %r11, %r14, %rax [0,34] . . D==========eER . . . adoxq %r14, %r9 [0,35] . . D===========eER . . . adcxq %rax, %r8 [0,36] . . D=====eeeeE--R . . . mulxq %r10, %rdx, %rax [0,37] . . D===========eER . . . adoxq %rdx, %r8 [0,38] . . DeeeeeE-------R . . . movq 16(%r12), %rdx [0,39] . . D============eER. . . adcxq %r13, %rax [0,40] . . D============eER . . adoxq %r13, %rax [0,41] . . DeeeeeE--------R . . movq (%rsi), %r13 [0,42] . . D=====E--------R . . xorl %r15d, %r15d [0,43] . . D=====eeeeE----R . . mulxq %r13, %r13, %r14 [0,44] . . .D=======eE----R . . adoxq %r13, %rcx [0,45] . . .D========eE---R . . adcxq %r14, %r9 [0,46] . . .D=====eeeeE---R . . mulxq %rbx, %r14, %r13 [0,47] . . .D=========eE--R . . adoxq %r14, %r9 [0,48] . . . D=========eE-R . . adcxq %r13, %r8 [0,49] . . . D=====eeeeE--R . . mulxq %r11, %r14, %r13 [0,50] . . . D==========eER . . adoxq %r14, %r8 [0,51] . . . D===========eER . . adcxq %r13, %rax [0,52] . . . DeeeeeE------R . . movq (%rsi), %rsi [0,53] . . . D=====eeeeE--R . . mulxq %r10, %rdx, %r13 [0,54] . . . D===========eER . . adoxq %rdx, %rax [0,55] . . . D============eER . . adcxq %r15, %r13 [0,56] . . . DeeeeeE-------R . . movq 24(%r12), %rdx [0,57] . . . D============eER. . adoxq %r15, %r13 [0,58] . . . D=====eeeeE----R. . mulxq %rsi, %r12, %rsi [0,59] . . . D======E-------R. . xorl %r14d, %r14d [0,60] . . . D========eE---R. . adoxq %r12, %r9 [0,61] . . . D=========eE--R. . adcxq %rsi, %r8 [0,62] . . . D=====eeeeE---R. . mulxq %rbx, %rsi, %rbx [0,63] . . . D==========eE-R. . adoxq %rsi, %r8 [0,64] . . . .D==========eER. . adcxq %rbx, %rax [0,65] . . . .D=====eeeeE--R. . mulxq %r11, %r11, %rsi [0,66] . . . .DeeeeeE------R. . movq -56(%rbp), %rbx [0,67] . . . .D===eE-------R. . movq %rcx, 16(%rdi) [0,68] . . . . D==========eER . adcxq %rsi, %r13 [0,69] . . . . DeeeeeE------R . movq -48(%rbp), %rsi [0,70] . . . . D====eE------R . movq %rbx, 8(%rdi) [0,71] . . . . D===========eER . adoxq %r11, %rax [0,72] . . . . D=======eE----R . movq %r9, 24(%rdi) [0,73] . . . . D=========eE--R . movq %r8, 32(%rdi) [0,74] . . . . D===========eER . movq %rax, 40(%rdi) [0,75] . . . . D====eeeeE----R . mulxq %r10, %rdx, %r10 [0,76] . . . . D===========eER . adoxq %rdx, %r13 [0,77] . . . . D============eER . adcxq %r14, %r10 [0,78] . . . . D===========eER . movq %r13, 48(%rdi) [0,79] . . . . D============eER. adoxq %r14, %r10 [0,80] . . . . D============eER. movq %rsi, (%rdi) [0,81] . . . . D=============eER movq %r10, 56(%rdi) [0,82] . . . . DeeeeeE---------R movq -8(%rbp), %r15 [0,83] . . . . DeeeeeE---------R movq -16(%rbp), %r14 [0,84] . . . . DeeeeeE--------R movq -24(%rbp), %r13 [0,85] . . . . DeeeeeE--------R movq -32(%rbp), %r12 [0,86] . . . . D=eeeeeE-------R movq -40(%rbp), %rbx [0,87] . . . . D===eE---------R addq $56, %rsp
10,045
221
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/argc.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 sw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" #include "libc/notice.inc" .initbss 300,_init_argc // Global variable holding _start(argc) parameter. __argc: .quad 0 .endobj __argc,globl .previous .init.start 300,_init_argc mov %r12,%rax stosq .init.end 300,_init_argc
2,100
32
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/stackframe.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_STACKFRAME_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_STACKFRAME_H_ #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ struct StackFrame { struct StackFrame *next; intptr_t addr; }; COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_STACKFRAME_H_ */
351
14
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/nexgen32e.mk
#-*-mode:makefile-gmake;indent-tabs-mode:t;tab-width:8;coding:utf-8-*-┐ #───vi: set et ft=make ts=8 tw=8 fenc=utf-8 :vi───────────────────────┘ PKGS += LIBC_NEXGEN32E LIBC_NEXGEN32E_ARTIFACTS += LIBC_NEXGEN32E_A LIBC_NEXGEN32E = $(LIBC_NEXGEN32E_A_DEPS) $(LIBC_NEXGEN32E_A) LIBC_NEXGEN32E_A = o/$(MODE)/libc/nexgen32e/nexgen32e.a LIBC_NEXGEN32E_A_FILES := $(wildcard libc/nexgen32e/*) LIBC_NEXGEN32E_A_HDRS = $(filter %.h,$(LIBC_NEXGEN32E_A_FILES)) LIBC_NEXGEN32E_A_INCS = $(filter %.inc,$(LIBC_NEXGEN32E_A_FILES)) LIBC_NEXGEN32E_A_SRCS_A = $(filter %.s,$(LIBC_NEXGEN32E_A_FILES)) LIBC_NEXGEN32E_A_SRCS_S = $(filter %.S,$(LIBC_NEXGEN32E_A_FILES)) LIBC_NEXGEN32E_A_SRCS_C = $(filter %.c,$(LIBC_NEXGEN32E_A_FILES)) LIBC_NEXGEN32E_A_SRCS = \ $(LIBC_NEXGEN32E_A_SRCS_A) \ $(LIBC_NEXGEN32E_A_SRCS_S) \ $(LIBC_NEXGEN32E_A_SRCS_C) LIBC_NEXGEN32E_A_OBJS = \ $(LIBC_NEXGEN32E_A_SRCS_A:%.s=o/$(MODE)/%.o) \ $(LIBC_NEXGEN32E_A_SRCS_S:%.S=o/$(MODE)/%.o) \ $(LIBC_NEXGEN32E_A_SRCS_C:%.c=o/$(MODE)/%.o) LIBC_NEXGEN32E_A_CHECKS = \ $(LIBC_NEXGEN32E_A).pkg \ $(LIBC_NEXGEN32E_A_HDRS:%=o/$(MODE)/%.ok) LIBC_NEXGEN32E_A_DIRECTDEPS = \ LIBC_STUBS LIBC_NEXGEN32E_A_DEPS := \ $(call uniq,$(foreach x,$(LIBC_NEXGEN32E_A_DIRECTDEPS),$($(x)))) $(LIBC_NEXGEN32E_A): \ libc/nexgen32e/ \ $(LIBC_NEXGEN32E_A).pkg \ $(LIBC_NEXGEN32E_A_OBJS) $(LIBC_NEXGEN32E_A).pkg: \ $(LIBC_NEXGEN32E_A_OBJS) \ $(foreach x,$(LIBC_NEXGEN32E_A_DIRECTDEPS),$($(x)_A).pkg) o/$(MODE)/libc/nexgen32e/argc2.o \ o/$(MODE)/libc/nexgen32e/argv2.o \ o/$(MODE)/libc/nexgen32e/auxv2.o \ o/$(MODE)/libc/nexgen32e/cescapec.o \ o/$(MODE)/libc/nexgen32e/crc32init.o \ o/$(MODE)/libc/nexgen32e/environ2.o \ o/$(MODE)/libc/nexgen32e/envp2.o \ o/$(MODE)/libc/nexgen32e/kbase36.o \ o/$(MODE)/libc/nexgen32e/ktens.o \ o/$(MODE)/libc/nexgen32e/ktolower.o \ o/$(MODE)/libc/nexgen32e/ktoupper.o \ o/$(MODE)/libc/nexgen32e/pid.o \ o/$(MODE)/libc/nexgen32e/program_invocation_name2.o \ o/$(MODE)/libc/nexgen32e/threaded.o: private \ OVERRIDE_CFLAGS += \ $(NO_MAGIC) # these assembly files are safe to build on aarch64 o/$(MODE)/libc/nexgen32e/zip.o: libc/nexgen32e/zip.S @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< o/$(MODE)/libc/nexgen32e/mcount.o: libc/nexgen32e/mcount.S @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< o/$(MODE)/libc/nexgen32e/ksha256.o: libc/nexgen32e/ksha256.S @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< o/$(MODE)/libc/nexgen32e/ksha512.o: libc/nexgen32e/ksha512.S @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< o/$(MODE)/libc/nexgen32e/kcp437.o: libc/nexgen32e/kcp437.S @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< o/$(MODE)/libc/nexgen32e/kreversebits.o: libc/nexgen32e/kreversebits.S @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< o/$(MODE)/libc/nexgen32e/ktensindex.o: libc/nexgen32e/ktensindex.S @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< o/$(MODE)/libc/nexgen32e/longjmp.o: libc/nexgen32e/longjmp.S @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< o/$(MODE)/libc/nexgen32e/setjmp.o: libc/nexgen32e/setjmp.S @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< o/$(MODE)/libc/nexgen32e/missingno.o: libc/nexgen32e/missingno.S @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< o/$(MODE)/libc/nexgen32e/khalfcache3.o: libc/nexgen32e/khalfcache3.S @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< o/$(MODE)/libc/nexgen32e/gclongjmp.o: libc/nexgen32e/gclongjmp.S @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< o/$(MODE)/libc/nexgen32e/checkstackalign.o: libc/nexgen32e/checkstackalign.S @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< LIBC_NEXGEN32E_LIBS = $(foreach x,$(LIBC_NEXGEN32E_ARTIFACTS),$($(x))) LIBC_NEXGEN32E_SRCS = $(foreach x,$(LIBC_NEXGEN32E_ARTIFACTS),$($(x)_SRCS)) LIBC_NEXGEN32E_HDRS = $(foreach x,$(LIBC_NEXGEN32E_ARTIFACTS),$($(x)_HDRS)) LIBC_NEXGEN32E_INCS = $(foreach x,$(LIBC_NEXGEN32E_ARTIFACTS),$($(x)_INCS)) LIBC_NEXGEN32E_CHECKS = $(foreach x,$(LIBC_NEXGEN32E_ARTIFACTS),$($(x)_CHECKS)) LIBC_NEXGEN32E_OBJS = $(foreach x,$(LIBC_NEXGEN32E_ARTIFACTS),$($(x)_OBJS)) $(LIBC_NEXGEN32E_OBJS): $(BUILD_FILES) libc/nexgen32e/nexgen32e.mk .PHONY: o/$(MODE)/libc/nexgen32e o/$(MODE)/libc/nexgen32e: $(LIBC_NEXGEN32E_CHECKS)
4,483
100
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/identity.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" #include "libc/notice.inc" // The identity() function. // @return first argument identity: mov %rdi,%rax ret .endfn identity,globl
2,005
28
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/cpuid4.internal.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_CPUID4_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_CPUID4_H_ #include "libc/nexgen32e/kcpuids.h" #define CPUID4_KEY (eax & 0xff) #define CPUID4_CACHE_TYPE (((eax & 0x0000001fu) >> 000) + 0) #define CPUID4_CACHE_LEVEL (((eax & 0x000000e0u) >> 005) + 0) #define CPUID4_IS_FULLY_ASSOCIATIVE (!!(eax & (1u << 9))) #define CPUID4_IS_SELF_INITIALIZING_LEVEL (!!(eax & (1u << 8))) #define CPUID4_MAX_THREADS_SHARING_CACHE (((eax & 0x03ffc000u) >> 016) + 1) #define CPUID4_MAX_CORES_IN_PHYSICAL_CPU (((eax & 0xfc000000u) >> 032) + 1) #define CPUID4_SYSTEM_COHERENCY_LINE_SIZE (((Ebx & 0x00000fffu) >> 000) + 1) #define CPUID4_PHYSICAL_LINE_PARTITIONS (((Ebx & 0x003ff000u) >> 014) + 1) #define CPUID4_WAYS_OF_ASSOCIATIVITY (((Ebx & 0xffc00000u) >> 026) + 1) #define CPUID4_NUMBER_OF_SETS (Ecx + 1u) #define CPUID4_WBINVD_INVD_BEHAVIOR (!!(Edx & (1u << 0))) #define CPUID4_INCLUSIVE_OF_LOWER_LEVELS (!!(Edx & (1u << 1))) #define CPUID4_COMPLEX_INDEXING (!!(Edx & (1u << 2))) #define CPUID4_CACHE_SIZE_IN_BYTES \ (CPUID4_WAYS_OF_ASSOCIATIVITY * CPUID4_PHYSICAL_LINE_PARTITIONS * \ CPUID4_SYSTEM_COHERENCY_LINE_SIZE * CPUID4_NUMBER_OF_SETS) #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ #define CPUID4_ITERATE(I, FORM) \ do { \ uint32_t eax, Ebx, Ecx, Edx; \ if (KCPUIDS(0H, EAX) >= 4) { \ for (I = 0;; ++I) { \ asm("push\t%%rbx\n\t" \ "cpuid\n\t" \ "mov\t%%ebx,%1\n\t" \ "pop\t%%rbx" \ : "=a"(eax), "=rm"(Ebx), "=c"(Ecx), "=d"(Edx) \ : "0"(4), "2"(I)); \ (void)Ebx; \ (void)Ecx; \ (void)Edx; \ if (CPUID4_CACHE_TYPE) { \ FORM; \ } else { \ break; \ } \ } \ } \ } while (0) COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_CPUID4_H_ */
2,753
52
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/threaded.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2022 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/thread/tls.h" /** * Contains TID of main thread or 0 if threading isn't enabled. */ int __threaded; #ifdef __x86_64__ bool __tls_enabled; #endif unsigned __tls_index;
2,023
31
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/sha1ni.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ │ │ Copyright 2015 Intel Corporation │ │ │ │ Redistribution and use in source and binary forms, with or without │ │ modification, are permitted provided that the following conditions │ │ are met: │ │ │ │ * Redistributions of source code must retain the above copyright │ │ notice, this list of conditions and the following disclaimer. │ │ * Redistributions in binary form must reproduce the above copyright │ │ notice, this list of conditions and the following disclaimer in │ │ the documentation and/or other materials provided with the │ │ distribution. │ │ * Neither the name of Intel Corporation nor the names of its │ │ contributors may be used to endorse or promote products derived │ │ from this software without specific prior written permission. │ │ │ │ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS │ │ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT │ │ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR │ │ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT │ │ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, │ │ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT │ │ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, │ │ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY │ │ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT │ │ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE │ │ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .text .balign 32 .ident "\n\ Intel SHA-NI (BSD-3 License)\n\ Copyright 2015 Intel Corporation\n\ Sean Gulley <[email protected]>\n\ Tim Chen <[email protected]>\n" .include "libc/disclaimer.inc" #define FRAME_SIZE 32 #define DIGEST_PTR %rdi #define DATA_PTR %rsi #define NUM_BLKS %rdx #define ABCD %xmm0 #define E0 %xmm1 /* Need two E's b/c they ping pong */ #define E1 %xmm2 #define MSG0 %xmm3 #define MSG1 %xmm4 #define MSG2 %xmm5 #define MSG3 %xmm6 #define SHUF_MASK %xmm7 // Performs Intel® SHA-NI™ optimized SHA-1 update. // // The function takes a pointer to the current hash values, a // pointer to the input data, and a number of 64 byte blocks to // process. Once all blocks have been processed, the digest pointer // is updated with the resulting hash value. The function only // processes complete blocks, there is no functionality to store // partial blocks. All message padding and hash value // initialization must be done outside the update function. // // The indented lines in the loop are instructions related to // rounds processing. The non-indented lines are instructions // related to the message schedule. // // void sha1_transform_ni(uint32_t digest[static 5], // const void *data, // uint32_t numBlocks); // // @param %rdi points to output digest // @param %rsi points to input data // @param %rdx is number of 64-byte blocks to process // @see X86_HAVE(SHA) sha1_transform_ni: push %rbp mov %rsp,%rbp .profilable sub $FRAME_SIZE,%rsp shl $6,NUM_BLKS # convert to bytes jz .Ldone_hash add DATA_PTR,NUM_BLKS # pointer to end of data // load initial hash values movdqa UPPER_WORD_MASK(%rip),E1 pinsrd $3,1*16(DIGEST_PTR),E0 movdqu 0*16(DIGEST_PTR),ABCD pand E1,E0 pshufd $0x1B,ABCD,ABCD movdqa PSHUFFLE_BYTE_FLIP_MASK(%rip),SHUF_MASK .Lloop0: // Save hash values for addition after rounds movdqa E0,(0*16)(%rsp) movdqa ABCD,(1*16)(%rsp) // Rounds 0-3 movdqu 0*16(DATA_PTR),MSG0 pshufb SHUF_MASK,MSG0 paddd MSG0,E0 movdqa ABCD,E1 sha1rnds4 $0,E0,ABCD // Rounds 4-7 movdqu 1*16(DATA_PTR),MSG1 pshufb SHUF_MASK,MSG1 sha1nexte MSG1,E1 movdqa ABCD,E0 sha1rnds4 $0,E1,ABCD sha1msg1 MSG1,MSG0 // Rounds 8-11 movdqu 2*16(DATA_PTR),MSG2 pshufb SHUF_MASK,MSG2 sha1nexte MSG2,E0 movdqa ABCD,E1 sha1rnds4 $0,E0,ABCD sha1msg1 MSG2,MSG1 pxor MSG2,MSG0 // Rounds 12-15 movdqu 3*16(DATA_PTR),MSG3 pshufb SHUF_MASK,MSG3 sha1nexte MSG3,E1 movdqa ABCD,E0 sha1msg2 MSG3,MSG0 sha1rnds4 $0,E1,ABCD sha1msg1 MSG3,MSG2 pxor MSG3,MSG1 // Rounds 16-19 sha1nexte MSG0,E0 movdqa ABCD,E1 sha1msg2 MSG0,MSG1 sha1rnds4 $0,E0,ABCD sha1msg1 MSG0,MSG3 pxor MSG0,MSG2 // Rounds 20-23 sha1nexte MSG1,E1 movdqa ABCD,E0 sha1msg2 MSG1,MSG2 sha1rnds4 $1,E1,ABCD sha1msg1 MSG1,MSG0 pxor MSG1,MSG3 // Rounds 24-27 sha1nexte MSG2,E0 movdqa ABCD,E1 sha1msg2 MSG2,MSG3 sha1rnds4 $1,E0,ABCD sha1msg1 MSG2,MSG1 pxor MSG2,MSG0 // Rounds 28-31 sha1nexte MSG3,E1 movdqa ABCD,E0 sha1msg2 MSG3,MSG0 sha1rnds4 $1,E1,ABCD sha1msg1 MSG3,MSG2 pxor MSG3,MSG1 // Rounds 32-35 sha1nexte MSG0,E0 movdqa ABCD,E1 sha1msg2 MSG0,MSG1 sha1rnds4 $1,E0,ABCD sha1msg1 MSG0,MSG3 pxor MSG0,MSG2 // Rounds 36-39 sha1nexte MSG1,E1 movdqa ABCD,E0 sha1msg2 MSG1,MSG2 sha1rnds4 $1,E1,ABCD sha1msg1 MSG1,MSG0 pxor MSG1,MSG3 // Rounds 40-43 sha1nexte MSG2,E0 movdqa ABCD,E1 sha1msg2 MSG2,MSG3 sha1rnds4 $2,E0,ABCD sha1msg1 MSG2,MSG1 pxor MSG2,MSG0 // Rounds 44-47 sha1nexte MSG3,E1 movdqa ABCD,E0 sha1msg2 MSG3,MSG0 sha1rnds4 $2,E1,ABCD sha1msg1 MSG3,MSG2 pxor MSG3,MSG1 // Rounds 48-51 sha1nexte MSG0,E0 movdqa ABCD,E1 sha1msg2 MSG0,MSG1 sha1rnds4 $2,E0,ABCD sha1msg1 MSG0,MSG3 pxor MSG0,MSG2 // Rounds 52-55 sha1nexte MSG1,E1 movdqa ABCD,E0 sha1msg2 MSG1,MSG2 sha1rnds4 $2,E1,ABCD sha1msg1 MSG1,MSG0 pxor MSG1,MSG3 // Rounds 56-59 sha1nexte MSG2,E0 movdqa ABCD,E1 sha1msg2 MSG2,MSG3 sha1rnds4 $2,E0,ABCD sha1msg1 MSG2,MSG1 pxor MSG2,MSG0 // Rounds 60-63 sha1nexte MSG3,E1 movdqa ABCD,E0 sha1msg2 MSG3,MSG0 sha1rnds4 $3,E1,ABCD sha1msg1 MSG3,MSG2 pxor MSG3,MSG1 // Rounds 64-67 sha1nexte MSG0,E0 movdqa ABCD,E1 sha1msg2 MSG0,MSG1 sha1rnds4 $3,E0,ABCD sha1msg1 MSG0,MSG3 pxor MSG0,MSG2 // Rounds 68-71 sha1nexte MSG1,E1 movdqa ABCD,E0 sha1msg2 MSG1,MSG2 sha1rnds4 $3,E1,ABCD pxor MSG1,MSG3 // Rounds 72-75 sha1nexte MSG2,E0 movdqa ABCD,E1 sha1msg2 MSG2,MSG3 sha1rnds4 $3,E0,ABCD // Rounds 76-79 sha1nexte MSG3,E1 movdqa ABCD,E0 sha1rnds4 $3,E1,ABCD // Add current hash values with previously saved sha1nexte (0*16)(%rsp),E0 paddd (1*16)(%rsp),ABCD // Increment data pointer and loop if more to process add $64,DATA_PTR cmp NUM_BLKS,DATA_PTR jne .Lloop0 // Write hash values back in the correct order pshufd $0x1B,ABCD,ABCD movdqu ABCD,0*16(DIGEST_PTR) pextrd $3,E0,1*16(DIGEST_PTR) .Ldone_hash: leave ret .endfn sha1_transform_ni,globl .section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16 .balign 16 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x000102030405060708090a0b0c0d0e0f .section .rodata.cst16.UPPER_WORD_MASK, "aM", @progbits, 16 .balign 16 UPPER_WORD_MASK: .octa 0xFFFFFFFF000000000000000000000000
8,409
287
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/macros.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_MACROS_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_MACROS_H_ #ifdef __ASSEMBLER__ #include "libc/nexgen32e/macros.internal.inc" #else /* let's give auto-import tooling a helping hand */ #define pbroadcastb pbroadcastb #endif /* __ASSEMBLER__ */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_MACROS_H_ */
329
12
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/zip.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" #include "ape/relocations.h" #include "libc/zip.h" // ZIP Central Directory. .section .zip.3,"",@progbits .hidden __zip_start .globl __zip_start .type __zip_start,@object __zip_start: .previous/* ... decentralized content ... */.section .zip.5,"",@progbits __zip_end: .long kZipCdirHdrMagic // magic .short 0 // disk .short 0 // starting disk .short v_zip_records // number of records on disk .short v_zip_records // records .long v_zip_cdirsize // size of central directory .weak v_zip_cdoffset .long v_zip_cdoffset // central directory offset .short v_zip_commentsize // comment size .endobj __zip_end,globl,hidden .weak v_zip_records .weak v_zip_cdirsize .weak v_zip_commentsize .previous
2,594
49
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/checkstackalign.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2022 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" // Checks that stack is 16-byte aligned. // // This function crashes if called with a misaligned stack. CheckStackIsAligned: #ifdef __x86_64__ push %rbp mov %rsp,%rbp test $15,%rsp jz 1f int3 // misaligned stack trap 1: pop %rbp ret #elif defined(__aarch64__) stp x29,x30,[sp,#-16]! mov x29,sp and x0,x29,#15 cbz x0,1f brk #666 // misaligned stack trap 1: ldp x29,x30,[sp],#16 ret #else #error "unsupported architecture" #endif .endfn CheckStackIsAligned,globl
2,354
49
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/kompressor.h
#ifndef COSMOPOLITAN_LIBC_KOMPRESSOR_KOMPRESSOR_H_ #define COSMOPOLITAN_LIBC_KOMPRESSOR_KOMPRESSOR_H_ #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ #if 0 /*───────────────────────────────────────────────────────────────────────────│─╗ │ cosmopolitan § standard library » compression ─╬─│┼ ╚────────────────────────────────────────────────────────────────────────────│*/ #endif struct RlDecode { uint8_t repititions; uint8_t byte; }; void rldecode(void *dest, const struct RlDecode *) _Hide; void rldecode2(void *dest, const struct RlDecode *) _Hide; const uint8_t *lz4check(const void *data) _Hide; void *lz4cpy(void *dest, const void *blockdata, size_t blocksize) _Hide; void *lz4decode(void *dest, const void *src) _Hide; COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_KOMPRESSOR_KOMPRESSOR_H_ */
1,221
25
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/gc.internal.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_GC_INTERNAL_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_GC_INTERNAL_H_ #include "libc/nexgen32e/stackframe.h" #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ struct Garbage { struct StackFrame *frame; intptr_t fn; intptr_t arg; intptr_t ret; }; struct Garbages { int i, n; struct Garbage *p; }; int64_t __gc(void); COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_GC_INTERNAL_H_ */
497
24
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/longjmp.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" // Loads previously saved processor state. // // @param rdi points to the jmp_buf // @param esi is returned by setjmp() invocation (coerced nonzero) // @noreturn // @see _gclongjmp() // @see siglongjmp() longjmp: #ifdef __x86_64__ mov %esi,%eax test %eax,%eax jnz 1f inc %eax 1: mov (%rdi),%rsp mov 8(%rdi),%rbx mov 16(%rdi),%rbp mov 24(%rdi),%r12 mov 32(%rdi),%r13 mov 40(%rdi),%r14 mov 48(%rdi),%r15 jmp *56(%rdi) #elif defined(__aarch64__) ldp x19,x20,[x0,#0] ldp x21,x22,[x0,#16] ldp x23,x24,[x0,#32] ldp x25,x26,[x0,#48] ldp x27,x28,[x0,#64] ldp x29,x30,[x0,#80] ldr x2,[x0,#104] mov sp,x2 ldp d8 ,d9,[x0,#112] ldp d10,d11,[x0,#128] ldp d12,d13,[x0,#144] ldp d14,d15,[x0,#160] cmp w1,0 csinc w0,w1,wzr,ne br x30 #endif .endfn longjmp,globl .alias longjmp,_longjmp
2,668
61
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/auxv.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 sw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" #include "libc/notice.inc" .initbss 300,_init_auxv // Global variable holding _start(auxv) parameter. __auxv: .quad 0 .endobj __auxv,globl .previous .init.start 300,_init_auxv mov %r15,%rax stosq .init.end 300,_init_auxv
2,100
32
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/mul8x8adx.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2021 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" // Computes 1024-bit product of 512-bit and 512-bit numbers. // // Instructions: 260 // Total Cycles: 98 // Total uOps: 452 // uOps Per Cycle: 4.61 // IPC: 2.65 // Block RThroughput: 75.3 // // @param rdi receives 16 quadword result // @param rsi is left hand side which must have 8 quadwords // @param rdx is right hand side which must have 8 quadwords // @note words are host endian while array is little endian // @mayalias Mul8x8Adx: push %rbp mov %rsp,%rbp .profilable sub $104,%rsp mov %r15,-8(%rbp) mov %r14,-16(%rbp) mov %r13,-24(%rbp) mov %r12,-32(%rbp) mov %rbx,-40(%rbp) mov %rdx,%r12 mov (%rdx),%rdx mulx (%rsi),%rax,%rcx mov %rdi,-48(%rbp) mov %rax,-56(%rbp) mulx 8(%rsi),%rdx,%rax add %rdx,%rcx mov (%r12),%rdx mulx 16(%rsi),%rdx,%rbx adc %rdx,%rax mov (%r12),%rdx mulx 24(%rsi),%rdx,%r11 adc %rdx,%rbx mov (%r12),%rdx mulx 32(%rsi),%rdx,%r10 adc %rdx,%r11 mov (%r12),%rdx mulx 40(%rsi),%rdx,%r9 adc %rdx,%r10 mov (%r12),%rdx mulx 48(%rsi),%rdx,%r8 adc %rdx,%r9 mov (%r12),%rdx mulx 56(%rsi),%rdx,%rdi adc %rdx,%r8 adc $0,%rdi xor %r13d,%r13d mov 8(%r12),%rdx mulx (%rsi),%r15,%r14 adox %r15,%rcx adcx %r14,%rax mov %rcx,-64(%rbp) mulx 8(%rsi),%r14,%rcx adox %r14,%rax adcx %rcx,%rbx mulx 16(%rsi),%r14,%rcx adox %r14,%rbx adcx %rcx,%r11 mulx 24(%rsi),%r14,%rcx adox %r14,%r11 adcx %rcx,%r10 mulx 32(%rsi),%r14,%rcx adox %r14,%r10 adcx %rcx,%r9 mulx 40(%rsi),%r14,%rcx adox %r14,%r9 adcx %rcx,%r8 mulx 48(%rsi),%r14,%rcx adox %r14,%r8 adcx %rcx,%rdi mulx 56(%rsi),%rdx,%rcx adox %rdx,%rdi adcx %r13,%rcx mov 16(%r12),%rdx adox %r13,%rcx mulx (%rsi),%r15,%r14 xor %r13d,%r13d adox %r15,%rax adcx %r14,%rbx mov %rax,-72(%rbp) mulx 8(%rsi),%r14,%rax adox %r14,%rbx adcx %rax,%r11 mulx 16(%rsi),%r14,%rax adox %r14,%r11 adcx %rax,%r10 mulx 24(%rsi),%r14,%rax adox %r14,%r10 adcx %rax,%r9 mulx 32(%rsi),%r14,%rax adox %r14,%r9 adcx %rax,%r8 mulx 40(%rsi),%r14,%rax adox %r14,%r8 adcx %rax,%rdi mulx 48(%rsi),%r14,%rax adox %r14,%rdi adcx %rax,%rcx mulx 56(%rsi),%rdx,%rax adox %rdx,%rcx adcx %r13,%rax adox %r13,%rax xor %r13d,%r13d mov 24(%r12),%rdx mulx (%rsi),%r15,%r14 adox %r15,%rbx adcx %r14,%r11 mov %rbx,-80(%rbp) mov %r11,%r15 mulx 8(%rsi),%r14,%rbx adox %r14,%r15 adcx %rbx,%r10 mulx 16(%rsi),%rbx,%r11 adox %rbx,%r10 adcx %r11,%r9 mulx 24(%rsi),%rbx,%r11 adox %rbx,%r9 adcx %r11,%r8 mulx 32(%rsi),%rbx,%r11 adox %rbx,%r8 adcx %r11,%rdi mulx 40(%rsi),%rbx,%r11 adox %rbx,%rdi adcx %r11,%rcx mulx 48(%rsi),%rbx,%r11 adox %rbx,%rcx adcx %r11,%rax mulx 56(%rsi),%rdx,%r11 adox %rdx,%rax adcx %r13,%r11 mov 32(%r12),%rdx adox %r13,%r11 xor %ebx,%ebx mulx (%rsi),%r14,%r13 adox %r14,%r15 adcx %r13,%r10 mov %r15,-88(%rbp) mulx 8(%rsi),%r14,%r13 mov %r10,%r15 adcx %r13,%r9 adox %r14,%r15 mulx 16(%rsi),%r13,%r10 adox %r13,%r9 adcx %r10,%r8 mulx 24(%rsi),%r13,%r10 adcx %r10,%rdi adox %r13,%r8 mulx 32(%rsi),%r13,%r10 adox %r13,%rdi adcx %r10,%rcx mulx 40(%rsi),%r13,%r10 adox %r13,%rcx adcx %r10,%rax mulx 48(%rsi),%r13,%r10 adox %r13,%rax adcx %r10,%r11 mulx 56(%rsi),%rdx,%r10 adox %rdx,%r11 adcx %rbx,%r10 mov 40(%r12),%rdx adox %rbx,%r10 mulx (%rsi),%r14,%r13 xor %ebx,%ebx adox %r14,%r15 mov %r15,-96(%rbp) adcx %r13,%r9 mulx 8(%rsi),%r14,%r13 mov %r9,%r15 adox %r14,%r15 adcx %r13,%r8 mulx 16(%rsi),%r13,%r9 adox %r13,%r8 adcx %r9,%rdi mulx 24(%rsi),%r13,%r9 adox %r13,%rdi adcx %r9,%rcx mulx 32(%rsi),%r13,%r9 adox %r13,%rcx adcx %r9,%rax mulx 40(%rsi),%r13,%r9 adox %r13,%rax adcx %r9,%r11 mulx 48(%rsi),%r13,%r9 adox %r13,%r11 adcx %r9,%r10 mulx 56(%rsi),%rdx,%r9 adox %rdx,%r10 adcx %rbx,%r9 adox %rbx,%r9 xor %ebx,%ebx mov 48(%r12),%rdx mulx (%rsi),%r14,%r13 adox %r14,%r15 adcx %r13,%r8 mov %r15,-104(%rbp) mulx 8(%rsi),%r14,%r13 mov %r8,%r15 adcx %r13,%rdi adox %r14,%r15 mulx 16(%rsi),%r13,%r8 adox %r13,%rdi adcx %r8,%rcx mulx 24(%rsi),%r13,%r8 adox %r13,%rcx adcx %r8,%rax mulx 32(%rsi),%r13,%r8 adox %r13,%rax adcx %r8,%r11 mulx 40(%rsi),%r13,%r8 adox %r13,%r11 adcx %r8,%r10 mulx 48(%rsi),%r13,%r8 adox %r13,%r10 adcx %r8,%r9 mulx 56(%rsi),%rdx,%r8 adox %rdx,%r9 mov 56(%r12),%rdx adcx %rbx,%r8 mulx (%rsi),%r13,%r12 adox %rbx,%r8 xor %ebx,%ebx adox %r13,%r15 adcx %r12,%rdi mulx 8(%rsi),%r13,%r12 adox %r13,%rdi adcx %r12,%rcx mulx 16(%rsi),%r13,%r12 adox %r13,%rcx adcx %r12,%rax mulx 24(%rsi),%r13,%r12 adox %r13,%rax adcx %r12,%r11 mulx 32(%rsi),%r13,%r12 adox %r13,%r11 adcx %r12,%r10 mulx 40(%rsi),%r13,%r12 adox %r13,%r10 adcx %r12,%r9 mulx 48(%rsi),%r13,%r12 mulx 56(%rsi),%rsi,%rdx adox %r13,%r9 adcx %r12,%r8 adox %rsi,%r8 adcx %rbx,%rdx mov -64(%rbp),%rsi adox %rbx,%rdx mov -48(%rbp),%rbx mov -56(%rbp),%r14 mov %rsi,8(%rbx) mov -72(%rbp),%rsi mov %r14,(%rbx) mov %rsi,16(%rbx) mov -80(%rbp),%rsi mov %rsi,24(%rbx) mov -88(%rbp),%rsi mov %rsi,32(%rbx) mov -96(%rbp),%rsi mov %rsi,40(%rbx) mov -104(%rbp),%rsi mov %r15,56(%rbx) mov %rsi,48(%rbx) mov %rdi,64(%rbx) mov %rcx,72(%rbx) mov %rax,80(%rbx) mov %r11,88(%rbx) mov %r10,96(%rbx) mov %r9,104(%rbx) mov %r8,112(%rbx) mov %rdx,120(%rbx) mov -8(%rbp),%r15 mov -16(%rbp),%r14 mov -24(%rbp),%r13 mov -32(%rbp),%r12 mov -40(%rbp),%rbx leave ret .endfn Mul8x8Adx,globl .end TIMELINE VIEW 0123456789 0123456789 0123456789 0123456789 Index 0123456789 0123456789 0123456789 0123456789 [0,0] DeER . . . . . . . . . . . . . . . . subq $104, %rsp [0,1] DeER . . . . . . . . . . . . . . . . movq %r15, -8(%rbp) [0,2] D=eER. . . . . . . . . . . . . . . . movq %r14, -16(%rbp) [0,3] D==eER . . . . . . . . . . . . . . . movq %r13, -24(%rbp) [0,4] D===eER . . . . . . . . . . . . . . . movq %r12, -32(%rbp) [0,5] D====eER . . . . . . . . . . . . . . . movq %rbx, -40(%rbp) [0,6] .DeE---R . . . . . . . . . . . . . . . movq %rdx, %r12 [0,7] .DeeeeeER . . . . . . . . . . . . . . . movq (%rdx), %rdx [0,8] .D=====eeeeeeeeeER . . . . . . . . . . . . . mulxq (%rsi), %rax, %rcx [0,9] . D====eE--------R . . . . . . . . . . . . . movq %rdi, -48(%rbp) [0,10] . D=======eE-----R . . . . . . . . . . . . . movq %rax, -56(%rbp) [0,11] . D=====eeeeeeeeeER . . . . . . . . . . . . . mulxq 8(%rsi), %rdx, %rax [0,12] . D============eER . . . . . . . . . . . . . addq %rdx, %rcx [0,13] . DeeeeeE--------R . . . . . . . . . . . . . movq (%r12), %rdx [0,14] . D=====eeeeeeeeeER. . . . . . . . . . . . . mulxq 16(%rsi), %rdx, %rbx [0,15] . D============eER. . . . . . . . . . . . . adcq %rdx, %rax [0,16] . DeeeeeE--------R. . . . . . . . . . . . . movq (%r12), %rdx [0,17] . D=====eeeeeeeeeER . . . . . . . . . . . . mulxq 24(%rsi), %rdx, %r11 [0,18] . D============eER . . . . . . . . . . . . adcq %rdx, %rbx [0,19] . DeeeeeE--------R . . . . . . . . . . . . movq (%r12), %rdx [0,20] . D=====eeeeeeeeeER . . . . . . . . . . . . mulxq 32(%rsi), %rdx, %r10 [0,21] . .D============eER . . . . . . . . . . . . adcq %rdx, %r11 [0,22] . .DeeeeeE--------R . . . . . . . . . . . . movq (%r12), %rdx [0,23] . .D=====eeeeeeeeeER . . . . . . . . . . . . mulxq 40(%rsi), %rdx, %r9 [0,24] . . D============eER . . . . . . . . . . . . adcq %rdx, %r10 [0,25] . . DeeeeeE--------R . . . . . . . . . . . . movq (%r12), %rdx [0,26] . . D=====eeeeeeeeeER . . . . . . . . . . . . mulxq 48(%rsi), %rdx, %r8 [0,27] . . D============eER . . . . . . . . . . . . adcq %rdx, %r9 [0,28] . . DeeeeeE--------R . . . . . . . . . . . . movq (%r12), %rdx [0,29] . . D=====eeeeeeeeeER. . . . . . . . . . . . mulxq 56(%rsi), %rdx, %rdi [0,30] . . D============eER. . . . . . . . . . . . adcq %rdx, %r8 [0,31] . . D=============eER . . . . . . . . . . . adcq $0, %rdi [0,32] . . D---------------R . . . . . . . . . . . xorl %r13d, %r13d [0,33] . . DeeeeeE---------R . . . . . . . . . . . movq 8(%r12), %rdx [0,34] . . D====eeeeeeeeeER . . . . . . . . . . . mulxq (%rsi), %r15, %r14 [0,35] . . D=======eE-----R . . . . . . . . . . . adoxq %r15, %rcx [0,36] . . D=============eER . . . . . . . . . . . adcxq %r14, %rax [0,37] . . .D=======eE-----R . . . . . . . . . . . movq %rcx, -64(%rbp) [0,38] . . .D====eeeeeeeeeER . . . . . . . . . . . mulxq 8(%rsi), %r14, %rcx [0,39] . . .D=============eER . . . . . . . . . . . adoxq %r14, %rax [0,40] . . . D=============eER . . . . . . . . . . . adcxq %rcx, %rbx [0,41] . . . D====eeeeeeeeeE-R . . . . . . . . . . . mulxq 16(%rsi), %r14, %rcx [0,42] . . . D==============eER. . . . . . . . . . . adoxq %r14, %rbx [0,43] . . . D==============eER . . . . . . . . . . adcxq %rcx, %r11 [0,44] . . . D====eeeeeeeeeE--R . . . . . . . . . . mulxq 24(%rsi), %r14, %rcx [0,45] . . . D===============eER . . . . . . . . . . adoxq %r14, %r11 [0,46] . . . D===============eER . . . . . . . . . . adcxq %rcx, %r10 [0,47] . . . D====eeeeeeeeeE---R . . . . . . . . . . mulxq 32(%rsi), %r14, %rcx [0,48] . . . D================eER . . . . . . . . . . adoxq %r14, %r10 [0,49] . . . D================eER. . . . . . . . . . adcxq %rcx, %r9 [0,50] . . . D====eeeeeeeeeE----R. . . . . . . . . . mulxq 40(%rsi), %r14, %rcx [0,51] . . . D=================eER . . . . . . . . . adoxq %r14, %r9 [0,52] . . . .D=================eER . . . . . . . . . adcxq %rcx, %r8 [0,53] . . . .D====eeeeeeeeeE-----R . . . . . . . . . mulxq 48(%rsi), %r14, %rcx [0,54] . . . .D==================eER . . . . . . . . . adoxq %r14, %r8 [0,55] . . . . D==================eER . . . . . . . . . adcxq %rcx, %rdi [0,56] . . . . D====eeeeeeeeeE------R . . . . . . . . . mulxq 56(%rsi), %rdx, %rcx [0,57] . . . . D===================eER. . . . . . . . . adoxq %rdx, %rdi [0,58] . . . . D===================eER . . . . . . . . adcxq %r13, %rcx [0,59] . . . . DeeeeeE---------------R . . . . . . . . movq 16(%r12), %rdx [0,60] . . . . D====================eER . . . . . . . . adoxq %r13, %rcx [0,61] . . . . D====eeeeeeeeeE-------R . . . . . . . . mulxq (%rsi), %r15, %r14 [0,62] . . . . D---------------------R . . . . . . . . xorl %r13d, %r13d [0,63] . . . . D=======eE------------R . . . . . . . . adoxq %r15, %rax [0,64] . . . . D============eE------R . . . . . . . . adcxq %r14, %rbx [0,65] . . . . D=======eE-----------R . . . . . . . . movq %rax, -72(%rbp) [0,66] . . . . D====eeeeeeeeeE------R . . . . . . . . mulxq 8(%rsi), %r14, %rax [0,67] . . . . .D============eE-----R . . . . . . . . adoxq %r14, %rbx [0,68] . . . . .D=============eE----R . . . . . . . . adcxq %rax, %r11 [0,69] . . . . .D====eeeeeeeeeE-----R . . . . . . . . mulxq 16(%rsi), %r14, %rax [0,70] . . . . . D=============eE---R . . . . . . . . adoxq %r14, %r11 [0,71] . . . . . D==============eE--R . . . . . . . . adcxq %rax, %r10 [0,72] . . . . . D====eeeeeeeeeE----R . . . . . . . . mulxq 24(%rsi), %r14, %rax [0,73] . . . . . D==============eE-R . . . . . . . . adoxq %r14, %r10 [0,74] . . . . . D===============eER . . . . . . . . adcxq %rax, %r9 [0,75] . . . . . D====eeeeeeeeeE---R . . . . . . . . mulxq 32(%rsi), %r14, %rax [0,76] . . . . . D===============eER . . . . . . . . adoxq %r14, %r9 [0,77] . . . . . D================eER . . . . . . . . adcxq %rax, %r8 [0,78] . . . . . D====eeeeeeeeeE----R . . . . . . . . mulxq 40(%rsi), %r14, %rax [0,79] . . . . . D================eER. . . . . . . . adoxq %r14, %r8 [0,80] . . . . . D=================eER . . . . . . . adcxq %rax, %rdi [0,81] . . . . . D====eeeeeeeeeE-----R . . . . . . . mulxq 48(%rsi), %r14, %rax [0,82] . . . . . .D=================eER . . . . . . . adoxq %r14, %rdi [0,83] . . . . . .D==================eER . . . . . . . adcxq %rax, %rcx [0,84] . . . . . .D====eeeeeeeeeE------R . . . . . . . mulxq 56(%rsi), %rdx, %rax [0,85] . . . . . . D==================eER . . . . . . . adoxq %rdx, %rcx [0,86] . . . . . . D===================eER. . . . . . . adcxq %r13, %rax [0,87] . . . . . . D====================eER . . . . . . adoxq %r13, %rax [0,88] . . . . . . D----------------------R . . . . . . xorl %r13d, %r13d [0,89] . . . . . . DeeeeeE----------------R . . . . . . movq 24(%r12), %rdx [0,90] . . . . . . D====eeeeeeeeeE-------R . . . . . . mulxq (%rsi), %r15, %r14 [0,91] . . . . . . D===========eE--------R . . . . . . adoxq %r15, %rbx [0,92] . . . . . . D=============eE------R . . . . . . adcxq %r14, %r11 [0,93] . . . . . . D===========eE-------R . . . . . . movq %rbx, -80(%rbp) [0,94] . . . . . . D=============eE-----R . . . . . . movq %r11, %r15 [0,95] . . . . . . D====eeeeeeeeeE------R . . . . . . mulxq 8(%rsi), %r14, %rbx [0,96] . . . . . . D=============eE----R . . . . . . adoxq %r14, %r15 [0,97] . . . . . . D==============eE---R . . . . . . adcxq %rbx, %r10 [0,98] . . . . . . D====eeeeeeeeeE-----R . . . . . . mulxq 16(%rsi), %rbx, %r11 [0,99] . . . . . . .D==============eE--R . . . . . . adoxq %rbx, %r10 [0,100] . . . . . . .D===============eE-R . . . . . . adcxq %r11, %r9 [0,101] . . . . . . .D====eeeeeeeeeE----R . . . . . . mulxq 24(%rsi), %rbx, %r11 [0,102] . . . . . . . D===============eER . . . . . . adoxq %rbx, %r9 [0,103] . . . . . . . D================eER . . . . . . adcxq %r11, %r8 [0,104] . . . . . . . D====eeeeeeeeeE----R . . . . . . mulxq 32(%rsi), %rbx, %r11 [0,105] . . . . . . . D================eER . . . . . . adoxq %rbx, %r8 [0,106] . . . . . . . D=================eER . . . . . . adcxq %r11, %rdi [0,107] . . . . . . . D====eeeeeeeeeE-----R . . . . . . mulxq 40(%rsi), %rbx, %r11 [0,108] . . . . . . . D=================eER. . . . . . adoxq %rbx, %rdi [0,109] . . . . . . . D==================eER . . . . . adcxq %r11, %rcx [0,110] . . . . . . . D====eeeeeeeeeE------R . . . . . mulxq 48(%rsi), %rbx, %r11 [0,111] . . . . . . . D==================eER . . . . . adoxq %rbx, %rcx [0,112] . . . . . . . D===================eER . . . . . adcxq %r11, %rax [0,113] . . . . . . . D====eeeeeeeeeE-------R . . . . . mulxq 56(%rsi), %rdx, %r11 [0,114] . . . . . . . .D===================eER . . . . . adoxq %rdx, %rax [0,115] . . . . . . . .D====================eER. . . . . adcxq %r13, %r11 [0,116] . . . . . . . .DeeeeeE----------------R. . . . . movq 32(%r12), %rdx [0,117] . . . . . . . .D=====================eER . . . . adoxq %r13, %r11 [0,118] . . . . . . . .D=====E-----------------R . . . . xorl %ebx, %ebx [0,119] . . . . . . . . D====eeeeeeeeeE--------R . . . . mulxq (%rsi), %r14, %r13 [0,120] . . . . . . . . D===========eE---------R . . . . adoxq %r14, %r15 [0,121] . . . . . . . . D=============eE-------R . . . . adcxq %r13, %r10 [0,122] . . . . . . . . D===========eE--------R . . . . movq %r15, -88(%rbp) [0,123] . . . . . . . . D====eeeeeeeeeE-------R . . . . mulxq 8(%rsi), %r14, %r13 [0,124] . . . . . . . . D=============eE------R . . . . movq %r10, %r15 [0,125] . . . . . . . . D============eE------R . . . . adcxq %r13, %r9 [0,126] . . . . . . . . D=============eE-----R . . . . adoxq %r14, %r15 [0,127] . . . . . . . . D====eeeeeeeeeE------R . . . . mulxq 16(%rsi), %r13, %r10 [0,128] . . . . . . . . D=============eE----R . . . . adoxq %r13, %r9 [0,129] . . . . . . . . D==============eE---R . . . . adcxq %r10, %r8 [0,130] . . . . . . . . D====eeeeeeeeeE-----R . . . . mulxq 24(%rsi), %r13, %r10 [0,131] . . . . . . . . .D==============eE--R . . . . adcxq %r10, %rdi [0,132] . . . . . . . . .D===============eE-R . . . . adoxq %r13, %r8 [0,133] . . . . . . . . .D====eeeeeeeeeE----R . . . . mulxq 32(%rsi), %r13, %r10 [0,134] . . . . . . . . . D===============eER . . . . adoxq %r13, %rdi [0,135] . . . . . . . . . D================eER . . . . adcxq %r10, %rcx [0,136] . . . . . . . . . D====eeeeeeeeeE----R . . . . mulxq 40(%rsi), %r13, %r10 [0,137] . . . . . . . . . D================eER . . . . adoxq %r13, %rcx [0,138] . . . . . . . . . D=================eER . . . . adcxq %r10, %rax [0,139] . . . . . . . . . D====eeeeeeeeeE-----R . . . . mulxq 48(%rsi), %r13, %r10 [0,140] . . . . . . . . . D=================eER. . . . adoxq %r13, %rax [0,141] . . . . . . . . . D==================eER . . . adcxq %r10, %r11 [0,142] . . . . . . . . . D====eeeeeeeeeE------R . . . mulxq 56(%rsi), %rdx, %r10 [0,143] . . . . . . . . . D==================eER . . . adoxq %rdx, %r11 [0,144] . . . . . . . . . D===================eER . . . adcxq %rbx, %r10 [0,145] . . . . . . . . . DeeeeeE---------------R . . . movq 40(%r12), %rdx [0,146] . . . . . . . . . D====================eER . . . adoxq %rbx, %r10 [0,147] . . . . . . . . . .D====eeeeeeeeeE-------R . . . mulxq (%rsi), %r14, %r13 [0,148] . . . . . . . . . .D---------------------R . . . xorl %ebx, %ebx [0,149] . . . . . . . . . .D============eE-------R . . . adoxq %r14, %r15 [0,150] . . . . . . . . . . D============eE------R . . . movq %r15, -96(%rbp) [0,151] . . . . . . . . . . D============eE------R . . . adcxq %r13, %r9 [0,152] . . . . . . . . . . D=====eeeeeeeeeE-----R . . . mulxq 8(%rsi), %r14, %r13 [0,153] . . . . . . . . . . D============eE-----R . . . movq %r9, %r15 [0,154] . . . . . . . . . . D=============eE----R . . . adoxq %r14, %r15 [0,155] . . . . . . . . . . D==============eE---R . . . adcxq %r13, %r8 [0,156] . . . . . . . . . . D====eeeeeeeeeE----R . . . mulxq 16(%rsi), %r13, %r9 [0,157] . . . . . . . . . . D==============eE--R . . . adoxq %r13, %r8 [0,158] . . . . . . . . . . D===============eE-R . . . adcxq %r9, %rdi [0,159] . . . . . . . . . . D====eeeeeeeeeE---R . . . mulxq 24(%rsi), %r13, %r9 [0,160] . . . . . . . . . . D===============eER . . . adoxq %r13, %rdi [0,161] . . . . . . . . . . D================eER. . . adcxq %r9, %rcx [0,162] . . . . . . . . . . .D====eeeeeeeeeE---R. . . mulxq 32(%rsi), %r13, %r9 [0,163] . . . . . . . . . . .D================eER . . adoxq %r13, %rcx [0,164] . . . . . . . . . . .D=================eER . . adcxq %r9, %rax [0,165] . . . . . . . . . . . D====eeeeeeeeeE----R . . mulxq 40(%rsi), %r13, %r9 [0,166] . . . . . . . . . . . D=================eER . . adoxq %r13, %rax [0,167] . . . . . . . . . . . D==================eER . . adcxq %r9, %r11 [0,168] . . . . . . . . . . . D====eeeeeeeeeE-----R . . mulxq 48(%rsi), %r13, %r9 [0,169] . . . . . . . . . . . D==================eER. . adoxq %r13, %r11 [0,170] . . . . . . . . . . . D===================eER . adcxq %r9, %r10 [0,171] . . . . . . . . . . . D====eeeeeeeeeE------R . mulxq 56(%rsi), %rdx, %r9 [0,172] . . . . . . . . . . . D===================eER . adoxq %rdx, %r10 [0,173] . . . . . . . . . . . D====================eER . adcxq %rbx, %r9 [0,174] . . . . . . . . . . . D====================eER. adoxq %rbx, %r9 [0,175] . . . . . . . . . . . D----------------------R. xorl %ebx, %ebx [0,176] . . . . . . . . . . . DeeeeeE----------------R. movq 48(%r12), %rdx [0,177] . . . . . . . . . . . .D=====eeeeeeeeeE------R. mulxq (%rsi), %r14, %r13 [0,178] . . . . . . . . . . . .D==========eE---------R. adoxq %r14, %r15 [0,179] . . . . . . . . . . . .D==============eE-----R. adcxq %r13, %r8 [0,180] . . . . . . . . . . . . D==========eE--------R. movq %r15, -104(%rbp) [0,181] . . . . . . . . . . . . D=====eeeeeeeeeE-----R. mulxq 8(%rsi), %r14, %r13 [0,182] . . . . . . . . . . . . D==============eE----R. movq %r8, %r15 [0,183] . . . . . . . . . . . . D==============eE---R. adcxq %r13, %rdi [0,184] . . . . . . . . . . . . D===============eE--R. adoxq %r14, %r15 [0,185] . . . . . . . . . . . . D=====eeeeeeeeeE----R. mulxq 16(%rsi), %r13, %r8 [0,186] . . . . . . . . . . . . D===============eE-R. adoxq %r13, %rdi [0,187] . . . . . . . . . . . . D================eER. adcxq %r8, %rcx [0,188] . . . . . . . . . . . . D=====eeeeeeeeeE---R. mulxq 24(%rsi), %r13, %r8 [0,189] . . . . . . . . . . . . D================eER adoxq %r13, %rcx
28,838
496
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/siglongjmp.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2022 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" // Loads previously saved processor state. // // @param rdi points to the jmp_buf // @param esi is returned by setjmp() invocation (coerced nonzero) // @noreturn // @asyncsignalsafe siglongjmp: jmp longjmp .endfn siglongjmp,globl
2,103
30
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/envp.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 sw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" #include "libc/notice.inc" .initbss 300,_init_envp // Global variable holding _start(envp) parameter. __envp: .quad 0 .endobj __envp,globl .previous .init.start 300,_init_envp mov %r14,%rax stosq .init.end 300,_init_envp
2,100
32
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/kcpuids.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 sw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/runtime/pc.internal.h" #include "libc/dce.h" #include "libc/macros.internal.h" #include "libc/nexgen32e/kcpuids.h" #include "libc/nexgen32e/x86feature.h" .initbss 201,_init_kCpuids // Globally precomputed CPUID. // // This module lets us check CPUID in 0.06ns rather than 51.00ns. // If every piece of native software linked this module, then the // world would be a much better place; since all the alternatives // are quite toilsome. // // @see www.felixcloutier.com/x86/cpuid kCpuids:.long 0,0,0,0 # EAX=0 (Basic Processor Info) .long 0,0,0,0 # EAX=1 (Processor Info) .long 0,0,0,0 # EAX=2 .long 0,0,0,0 # EAX=7 (Extended Features) .long 0,0,0,0 # EAX=0x80000001 (NexGen32e) .long 0,0,0,0 # EAX=0x80000007 (APM) .long 0,0,0,0 # EAX=16h (CPU Frequency) .endobj kCpuids,globl .previous .init.start 201,_init_kCpuids push %rbx push $0 push $0x16 push $0xffffffff80000007 push $0xffffffff80000001 push $7 push $2 push $1 mov %rdi,%r8 xor %eax,%eax 1: xor %ecx,%ecx #ifdef FEATURELESS // It's been reported that GDB reverse debugging doesn't // understand VEX encoding. The workaround is to put: // // CPPFLAGS = -DFEATURELESS // // Inside your ~/.cosmo.mk file. xor %eax,%eax xor %ebx,%ebx xor %edx,%edx #else cpuid #endif stosl xchg %eax,%ebx stosl xchg %eax,%ecx stosl xchg %eax,%edx stosl 2: pop %rax test %eax,%eax # EAX = stacklist->pop() jz 3f # EAX ≠ 0 (EOL sentinel) cmp KCPUIDS(0H,EAX)(%r8),%al # EAX ≤ CPUID.0 max leaf jbe 1b # CPUID too new to probe add $4*4,%rdi jmp 2b 3: nop #if !X86_NEED(AVX2) testb X86_HAVE(AVX)(%r8) jz 5f testb X86_HAVE(OSXSAVE)(%r8) jz 4f xor %ecx,%ecx xgetbv and $XCR0_SSE|XCR0_AVX,%eax cmp $XCR0_SSE|XCR0_AVX,%eax je 5f 4: btr $X86_BIT(AVX),X86_WORD(AVX)(%r8) btr $X86_BIT(AVX2),X86_WORD(AVX2)(%r8) #endif 5: pop %rbx .init.end 201,_init_kCpuids
3,721
99
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/x86info.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_X86INFO_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_X86INFO_H_ #include "libc/nexgen32e/kcpuids.h" #define kX86CpuStepping ((KCPUIDS(1H, EAX) >> 0) & 15) #define kX86CpuModelid ((KCPUIDS(1H, EAX) >> 4) & 15) #define kX86CpuFamilyid ((KCPUIDS(1H, EAX) >> 8) & 15) #define kX86CpuType ((KCPUIDS(1H, EAX) >> 12) & 3) #define kX86CpuExtmodelid ((KCPUIDS(1H, EAX) >> 16) & 15) #define kX86CpuExtfamilyid ((KCPUIDS(1H, EAX) >> 20) & 255) #define kX86CpuFamily \ (kX86CpuFamilyid + (kX86CpuFamilyid == 15 ? kX86CpuExtfamilyid : 0)) #define kX86CpuModel \ (kX86CpuModelid | \ (kX86CpuFamilyid == 6 || kX86CpuFamilyid == 15 ? kX86CpuExtmodelid : 0) \ << 4) #define kX86ProcessorModelKey \ (kX86CpuExtfamilyid << 12 | kX86CpuFamilyid << 8 | kX86CpuExtmodelid << 4 | \ kX86CpuModelid) #define X86_MARCH_UNKNOWN 0 #define X86_MARCH_CORE2 1 #define X86_MARCH_NEHALEM 2 #define X86_MARCH_WESTMERE 3 #define X86_MARCH_SANDYBRIDGE 4 #define X86_MARCH_IVYBRIDGE 5 #define X86_MARCH_HASWELL 6 #define X86_MARCH_BROADWELL 7 #define X86_MARCH_SKYLAKE 8 #define X86_MARCH_KABYLAKE 9 #define X86_MARCH_CANNONLAKE 10 #define X86_MARCH_ICELAKE 11 #define X86_MARCH_TIGERLAKE 12 #define X86_MARCH_BONNELL 13 #define X86_MARCH_SALTWELL 14 #define X86_MARCH_SILVERMONT 15 #define X86_MARCH_AIRMONT 16 #define X86_MARCH_GOLDMONT 17 #define X86_MARCH_GOLDMONTPLUS 18 #define X86_MARCH_TREMONT 19 #define X86_MARCH_KNIGHTSLANDING 20 #define X86_MARCH_KNIGHTSMILL 21 #define X86_GRADE_UNKNOWN 0 #define X86_GRADE_APPLIANCE 1 #define X86_GRADE_MOBILE 2 #define X86_GRADE_TABLET 3 #define X86_GRADE_DESKTOP 4 #define X86_GRADE_CLIENT 5 #define X86_GRADE_DENSITY 6 #define X86_GRADE_SERVER 7 #define X86_GRADE_SCIENCE 8 struct X86ProcessorModel { short key; unsigned char march; unsigned char grade; }; extern const size_t kX86ProcessorModelCount; extern const struct X86ProcessorModel kX86ProcessorModels[]; const struct X86ProcessorModel *getx86processormodel(short) nosideeffect; #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_X86INFO_H_ */
2,382
69
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/lolendian.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" // @fileoverview Byte-order conversion functions. // // Endianness is deceptively complicated to the uninitiated. Many // helpers have been written by our top minds to address perceived // difficulties. These ones got through standardization processes. // To protect their legacy, all 19 functions have been implemented // in just 17 bytes. // // @see READ32LE(), READ32BE(), etc. // @asyncsignalsafe bswap_64: htobe64: htole64: be64toh: le64toh:mov %rdi,%rax bswap %rax ret .endfn le64toh,globl .endfn be64toh,globl .endfn htole64,globl .endfn htobe64,globl .endfn bswap_64,globl bswap_32: htobe32: htole32: be32toh: le32toh: ntohl: htonl: mov %edi,%eax bswap %eax ret .endfn htonl,globl .endfn htole32,globl .endfn le32toh,globl .endfn be32toh,globl .endfn htobe32,globl .endfn ntohl,globl .endfn bswap_32,globl bswap_16: htobe16: htole16: be16toh: le16toh: ntohs: htons: movzwl %di,%eax xchg %al,%ah ret .endfn htobe16,globl .endfn htons,globl .endfn le16toh,globl .endfn be16toh,globl .endfn htole16,globl .endfn ntohs,globl .endfn bswap_16,globl
2,950
78
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/setlongerjmp.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2022 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" // Saves caller CPU state to cacheline. // // @param rdi points to jmp_buf // @return eax contains 0 when set, and 1 if jumped // @return rdx contains value passed to longerjmp() // @returnstwice setlongerjmp: lea 8(%rsp),%rax mov %rax,(%rdi) mov %rbx,8(%rdi) mov %rbp,16(%rdi) mov %r12,24(%rdi) mov %r13,32(%rdi) mov %r14,40(%rdi) mov %r15,48(%rdi) mov (%rsp),%rax mov %rax,56(%rdi) xor %eax,%eax xor %edx,%edx ret .endfn setlongerjmp,globl
2,327
42
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/msr.internal.h
#ifndef COSMOPOLITAN_LIBC_MSR_H_ #define COSMOPOLITAN_LIBC_MSR_H_ /*─────────────────────────────────────────────────────────────────────────────╗ │ cosmopolitan § model specific registers │ ╚─────────────────────────────────────────────────────────────────────────────*/ #define MSR_P5_TSC 0x10 /* time stamp register */ #define MSR_P5_CESR 0x11 /* control and event select register */ #define MSR_P5_CTR0 0x12 /* counter #0 */ #define MSR_P5_CTR1 0x13 /* counter #1 */ #define MSR_P5_CESR_PC 0x0200 /* pin control */ #define MSR_P5_CESR_CC 0x01C0 /* counter control mask */ #define MSR_P5_CESR_ES 0x003F /* event control mask */ #define MSR_P5_CESR_SHIFT 16 /* shift to get counter 1 */ #define MSR_P5_CESR_MASK /* mask Counter */ \ (MSR_P5_CESR_PC | MSR_P5_CESR_CC | MSR_P5_CESR_ES) #define MSR_CORE_THREAD_COUNT 0x35 #define MSR_FLEX_RATIO 0x194 #define MSR_IA32_APERF 0xE8 #define MSR_IA32_APIC_BASE 0x1b #define MSR_IA32_APIC_BASE_BASE (0xfffff << 12) #define MSR_IA32_APIC_BASE_BSP (1 << 8) #define MSR_IA32_APIC_BASE_ENABLE (1 << 11) #define MSR_IA32_APIC_BASE_EXTENDED (1 << 10) #define MSR_IA32_BBL_CR_CTL 0x119 #define MSR_IA32_BIOS_SIGN_ID 0x8b #define MSR_IA32_CLOCK_MODULATION 0x19a #define MSR_IA32_CORE_C3_RESIDENCY 0x3FC #define MSR_IA32_CORE_C6_RESIDENCY 0x3FD #define MSR_IA32_CORE_C7_RESIDENCY 0x3FE #define MSR_IA32_CR_PAT 0x277 #define MSR_IA32_CSTAR 0xC0000083 #define MSR_IA32_DDR_ENERGY_STATUS 0x619 #define MSR_IA32_DEBUGCTLMSR 0x1d9 #define MSR_IA32_DS_AREA 0x600 #define MSR_IA32_EBL_CR_POWERON 0x2a #define MSR_IA32_EFER 0xC0000080 #define MSR_IA32_EFER_LMA 0x00000400 #define MSR_IA32_EFER_LME 0x00000100 #define MSR_IA32_EFER_NXE 0x00000800 #define MSR_IA32_EFER_SCE 0x00000001 #define MSR_IA32_EVNTSEL0 0x186 #define MSR_IA32_EVNTSEL1 0x187 #define MSR_IA32_EVNTSEL2 0x188 #define MSR_IA32_EVNTSEL3 0x189 #define MSR_IA32_FEATCTL_CSTATE_SMI (1 << 16) #define MSR_IA32_FEATCTL_LOCK (1 << 0) #define MSR_IA32_FEATCTL_VMXON (1 << 2) #define MSR_IA32_FEATCTL_VMXON_SMX (1 << 1) #define MSR_IA32_FEATURE_CONTROL 0x3a #define MSR_IA32_FMASK 0xC0000084 #define MSR_IA32_FS_BASE 0xC0000100 #define MSR_IA32_GS_BASE 0xC0000101 #define MSR_IA32_GS_BASE_KERNEL 0xC0000102 #define MSR_IA32_GT_PERF_LIMIT_REASONS 0x6B0 #define MSR_IA32_IA_PERF_LIMIT_REASONS 0x690 #define MSR_IA32_IA_PERF_LIMIT_REASONS_SKL 0x64F #define MSR_IA32_LASTBRANCHFROMIP 0x1db #define MSR_IA32_LASTBRANCHTOIP 0x1dc #define MSR_IA32_LASTINTFROMIP 0x1dd #define MSR_IA32_LASTINTTOIP 0x1de #define MSR_IA32_LLC_FLUSHED_RESIDENCY_TIMER 0x61D #define MSR_IA32_LSTAR 0xC0000082 #define MSR_IA32_MC0_ADDR 0x402 #define MSR_IA32_MC0_CTL 0x400 #define MSR_IA32_MC0_MISC 0x403 #define MSR_IA32_MC0_STATUS 0x401 #define MSR_IA32_MCG_CAP 0x179 #define MSR_IA32_MCG_CTL 0x17b #define MSR_IA32_MCG_STATUS 0x17a #define MSR_IA32_MISC_ENABLE 0x1a0 #define MSR_IA32_MPERF 0xE7 #define MSR_IA32_MTRRCAP 0xfe #define MSR_IA32_MTRR_DEF_TYPE 0x2ff #define MSR_IA32_MTRR_FIX16K_80000 0x258 #define MSR_IA32_MTRR_FIX16K_A0000 0x259 #define MSR_IA32_MTRR_FIX4K_C0000 0x268 #define MSR_IA32_MTRR_FIX4K_C8000 0x269 #define MSR_IA32_MTRR_FIX4K_D0000 0x26a #define MSR_IA32_MTRR_FIX4K_D8000 0x26b #define MSR_IA32_MTRR_FIX4K_E0000 0x26c #define MSR_IA32_MTRR_FIX4K_E8000 0x26d #define MSR_IA32_MTRR_FIX4K_F0000 0x26e #define MSR_IA32_MTRR_FIX4K_F8000 0x26f #define MSR_IA32_MTRR_FIX64K_00000 0x250 #define MSR_IA32_MTRR_PHYSBASE(n) (0x200 + 2 * (n)) #define MSR_IA32_MTRR_PHYSMASK(n) (0x200 + 2 * (n) + 1) #define MSR_IA32_P5_MC_ADDR 0 #define MSR_IA32_P5_MC_TYPE 1 #define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x1b2 #define MSR_IA32_PACKAGE_THERM_STATUS 0x1b1 #define MSR_IA32_PERFCTR0 0xc1 #define MSR_IA32_PERFCTR1 0xc2 #define MSR_IA32_PERFCTR3 0xc3 #define MSR_IA32_PERFCTR4 0xc4 #define MSR_IA32_PERF_CTL 0x199 #define MSR_IA32_PERF_FIXED_CTR0 0x309 #define MSR_IA32_PERF_FIXED_CTR_CTRL 0x38D #define MSR_IA32_PERF_GLOBAL_CTRL 0x38F #define MSR_IA32_PERF_GLOBAL_OVF_CTRL 0x390 #define MSR_IA32_PERF_GLOBAL_STATUS 0x38E #define MSR_IA32_PERF_STS 0x198 #define MSR_IA32_PKG_C10_RESIDENCY 0x632 #define MSR_IA32_PKG_C2_RESIDENCY 0x60D #define MSR_IA32_PKG_C3_RESIDENCY 0x3F8 #define MSR_IA32_PKG_C6_RESIDENCY 0x3F9 #define MSR_IA32_PKG_C7_RESIDENCY 0x3FA #define MSR_IA32_PKG_C8_RESIDENCY 0x630 #define MSR_IA32_PKG_C9_RESIDENCY 0x631 #define MSR_IA32_PKG_ENERGY_STATUS 0x611 #define MSR_IA32_PKG_POWER_SKU_UNIT 0x606 #define MSR_IA32_PLATFORM_ID 0x17 #define MSR_IA32_PP0_ENERGY_STATUS 0x639 #define MSR_IA32_PP1_ENERGY_STATUS 0x641 #define MSR_IA32_RING_PERF_STATUS 0x621 #define MSR_IA32_STAR 0xC0000081 #define MSR_IA32_SYSENTER_CS 0x174 #define MSR_IA32_SYSENTER_EIP 0x176 #define MSR_IA32_SYSENTER_ESP 0x175 #define MSR_IA32_TSC_AUX 0xC0000103 #define MSR_IA32_TSC_DEADLINE 0x6e0 #define MSR_IA32_UCODE_REV MSR_IA32_BIOS_SIGN_ID #define MSR_IA32_UCODE_WRITE MSR_IA32_UPDT_TRIG #define MSR_IA32_UPDT_TRIG 0x79 #define MSR_IA32_VMX_BASE 0x480 #define MSR_IA32_VMX_BASIC MSR_IA32_VMX_BASE #define MSR_IA32_VMX_CR0_FIXED0 MSR_IA32_VMX_BASE + 6 #define MSR_IA32_VMX_CR0_FIXED1 MSR_IA32_VMX_BASE + 7 #define MSR_IA32_VMX_CR4_FIXED0 MSR_IA32_VMX_BASE + 8 #define MSR_IA32_VMX_CR4_FIXED1 MSR_IA32_VMX_BASE + 9 #define MSR_IA32_VMX_ENTRY_CTLS MSR_IA32_VMX_BASE + 4 #define MSR_IA32_VMX_EPT_VPID_CAP MSR_IA32_VMX_BASE + 12 #define MSR_IA32_VMX_EPT_VPID_CAP_AD_SHIFT 21 #define MSR_IA32_VMX_EXIT_CTLS MSR_IA32_VMX_BASE + 3 #define MSR_IA32_VMX_MISC MSR_IA32_VMX_BASE + 5 #define MSR_IA32_VMX_PINBASED_CTLS MSR_IA32_VMX_BASE + 1 #define MSR_IA32_VMX_PROCBASED_CTLS MSR_IA32_VMX_BASE + 2 #define MSR_IA32_VMX_PROCBASED_CTLS2 MSR_IA32_VMX_BASE + 11 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS MSR_IA32_VMX_BASE + 13 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS MSR_IA32_VMX_BASE + 14 #define MSR_IA32_VMX_TRUE_VMENTRY_CTLS MSR_IA32_VMX_BASE + 16 #define MSR_IA32_VMX_TRUE_VMEXIT_CTLS MSR_IA32_VMX_BASE + 15 #define MSR_IA32_VMX_VMCS_ENUM MSR_IA32_VMX_BASE + 10 #define MSR_IA32_VMX_VMFUNC MSR_IA32_VMX_BASE + 17 #define MSR_P5_CESR_CC_CLOCK 0x0100 /* Clock Counting (otherwise Event) */ #define MSR_P5_CESR_CC_CPL 0x00C0 /* Count regardless of the CPL */ #define MSR_P5_CESR_CC_CPL012 0x0040 /* Count if the CPL == 0, 1, 2 */ #define MSR_P5_CESR_CC_CPL3 0x0080 /* Count if the CPL == 3 */ #define MSR_P5_CESR_CC_DISABLE 0x0000 /* Disable counter */ #define MSR_P5_CESR_ES_AGI 0x011111 /* Stall because of AGI */ #define MSR_P5_CESR_ES_BANK_CONFLICTS 0x001010 /* Bank conflicts */ #define MSR_P5_CESR_ES_BRANCHE 0x010010 /* Branches */ #define MSR_P5_CESR_ES_BRANCHE_BTB 0x010100 /* Taken branch or BTB Hit */ #define MSR_P5_CESR_ES_BREAK_DR0 0x100011 /* Breakpoint matches on DR0 */ #define MSR_P5_CESR_ES_BREAK_DR1 0x100100 /* Breakpoint matches on DR1 */ #define MSR_P5_CESR_ES_BREAK_DR2 0x100101 /* Breakpoint matches on DR2 */ #define MSR_P5_CESR_ES_BREAK_DR3 0x100110 /* Breakpoint matches on DR3 */ #define MSR_P5_CESR_ES_BTB_HIT 0x010011 /* BTB Hits */ #define MSR_P5_CESR_ES_BUS_CYCLE 0x011000 /* Clocks while bus cycle */ #define MSR_P5_CESR_ES_CACHE_SNOOP_HIT 0x001000 /* Data cache snoop hits */ #define MSR_P5_CESR_ES_CODE_CACHE_MISS 0x001110 /* Code Cache miss */ #define MSR_P5_CESR_ES_CODE_READ 0x001100 /* Code Read */ #define MSR_P5_CESR_ES_CODE_TLB_MISS 0x001101 /* Code TLB miss */ #define MSR_P5_CESR_ES_DATA_CACHE_WB 0x000110 /* Cache lines written back */ #define MSR_P5_CESR_ES_DATA_MEM_READ 0x011010 /* Pipeline waiting for read */ #define MSR_P5_CESR_ES_DATA_READ 0x000000 /* Data Read */ #define MSR_P5_CESR_ES_DATA_READ_MISS 0x000011 /* Data Read Miss */ #define MSR_P5_CESR_ES_DATA_RW 0x101000 /* Data Read or Write */ #define MSR_P5_CESR_ES_DATA_RW_MISS 0x101001 /* Data Read or Write Miss */ #define MSR_P5_CESR_ES_DATA_TLB_MISS 0x000010 /* Data TLB Miss */ #define MSR_P5_CESR_ES_DATA_WRITE 0x000001 /* Data Write */ #define MSR_P5_CESR_ES_DATA_WRITE_MISS 0x000100 /* Data Write Miss */ #define MSR_P5_CESR_ES_EXTERNAL_SNOOP 0x000111 /* External Snoop */ #define MSR_P5_CESR_ES_FLOP 0x100010 /* Floating Point operations */ #define MSR_P5_CESR_ES_FULL_WRITE_BUF 0x011001 /* Clocks while full wrt buf */ #define MSR_P5_CESR_ES_HARDWARE_IT 0x100111 /* Hardware interrupts */ #define MSR_P5_CESR_ES_HIT_EM 0x000101 /* Write (hit) to M|E state */ #define MSR_P5_CESR_ES_INSTRUCTION 0x010110 /* Instruction executed */ #define MSR_P5_CESR_ES_INSTRUCTION_V 0x010111 /* Inst. executed (v-pipe) */ #define MSR_P5_CESR_ES_IO_CYCLE 0x011101 /* I/O Read or Write cycles */ #define MSR_P5_CESR_ES_LOCKED_CYCLE 0x011100 /* Locked bus cycles */ #define MSR_P5_CESR_ES_MEM_ACCESS_PIPE 0x001001 /* mem access both pipes */ #define MSR_P5_CESR_ES_MISALIGNED 0x001011 /* Misaligned Memory or I/O */ #define MSR_P5_CESR_ES_NON_CACHEABLE 0x011110 /* Non-cacheable Mem. read */ #define MSR_P5_CESR_ES_PIPELINE_FLUSH 0x010101 /* Pipeline Flushes */ #define MSR_P5_CESR_ES_SEGMENT_LOADED 0x001111 /* Any segment reg. loaded */ #define MSR_P5_CESR_ES_WRITE_EM 0x011011 /* Stall on write E|M state */ #define MSR_PLATFORM_INFO 0xce #endif /* COSMOPOLITAN_LIBC_MSR_H_ */
11,699
192
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/khalfcache3.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" #ifdef __x86_64__ .initbss 202,_init_kHalfCache3 // Half size of level 3 cache in bytes. kHalfCache3: .quad 0 .endobj kHalfCache3,globl .previous .init.start 202,_init_kHalfCache3 cmpl $3,kCpuids(%rip) jbe 3f xor %r8d,%r8d mov $4,%r8d 1: mov %r8d,%eax mov %r8d,%ecx push %rbx cpuid mov %ebx,%r9d pop %rbx test $31,%al je 3f cmp $99,%al jne 2f mov %r9d,%eax mov %r9d,%edx inc %ecx shr $12,%r9d shr $22,%eax and $0x0fff,%edx and $0x03ff,%r9d inc %eax inc %edx imul %edx,%eax imul %ecx,%eax lea 1(%r9),%ecx imul %ecx,%eax jmp 4f 2: inc %r8d jmp 1b 3: mov $0x00400000,%eax 4: shr %eax stosq .init.end 202,_init_kHalfCache3 #else .rodata .balign 8 kHalfCache3: .quad 4 * 1024 * 1024 .endobj kHalfCache3,globl .previous #endif /* __x86_64__ */
2,654
76
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/tinywcsnlen.greg.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" // 32-bit strnlen that's tiny and near optimal if data's tiny. // // @param RDI is wchar_t *s // @param RSI is size_t n // @param EAX is unsigned length // @see libc/nexgen32e/strsak32.S tinywcsnlen: .leafprologue .profilable xor %eax,%eax 1: cmp %esi,%eax jae 2f cmpl $0,(%rdi,%rax,4) jz 2f inc %eax jmp 1b 2: .leafepilogue .endfn tinywcsnlen,globl
2,230
39
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/nt2sysv.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .text.windows // Translates function call from code built w/ MS-style compiler. // // This wraps WinMain() and callback functions passed to Win32 API. // Please note an intermediary jump slot is needed to set %rax. // // @param %rax is function address // @param %rcx,%rdx,%r8,%r9 // @return %rax,%xmm0 // @note slower than __sysv2nt // @see NT2SYSV() macro __nt2sysv: push %rbp mov %rsp,%rbp // TODO(jart): We should probably find some way to use our own // stack when Windows delivers signals ;_; .profilable sub $0x100,%rsp push %rbx push %rdi push %rsi pushf # TODO(jart): Do we need it? lea -0x80(%rbp),%rdi call _savexmm mov %rcx,%rdi mov %rdx,%rsi mov %r8,%rdx mov %r9,%rcx call *%rax lea -0x80(%rbp),%rdi call _loadxmm popf pop %rsi pop %rdi pop %rbx leave ret .endfn __nt2sysv,globl,hidden
2,710
59
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/sha512.S
///////////////////////////////////////////////////////////////////////// // Implement fast SHA-512 with AVX2 instructions. (x86_64) // // Copyright (C) 2013 Intel Corporation. // // Authors: // James Guilford <[email protected]> // Kirk Yap <[email protected]> // David Cote <[email protected]> // Tim Chen <[email protected]> // // This software is available to you under a choice of one of two // licenses. You may choose to be licensed under the terms of the GNU // General Public License (GPL) Version 2, available from the file // COPYING in the main directory of this source tree, or the // OpenIB.org BSD license below: // // Redistribution and use in source and binary forms, with or // without modification, are permitted provided that the following // conditions are met: // // - Redistributions of source code must retain the above // copyright notice, this list of conditions and the following // disclaimer. // // - Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials // provided with the distribution. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS // BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // ///////////////////////////////////////////////////////////////////////// // // This code is described in an Intel White-Paper: // "Fast SHA-512 Implementations on Intel Architecture Processors" // // To find it, surf to http://www.intel.com/p/en_US/embedded // and search for that title. // ///////////////////////////////////////////////////////////////////////// // This code schedules 1 blocks at a time, with 4 lanes per block ///////////////////////////////////////////////////////////////////////// #include "libc/macros.internal.h" .ident "\n\ AVX2 SHA2 (BSD-2 License)\n\ Copyright 2013 Intel Corporation\n" .include "libc/disclaimer.inc" # Virtual Registers Y_0 = %ymm4 Y_1 = %ymm5 Y_2 = %ymm6 Y_3 = %ymm7 YTMP0 = %ymm0 YTMP1 = %ymm1 YTMP2 = %ymm2 YTMP3 = %ymm3 YTMP4 = %ymm8 XFER = YTMP0 BYTE_FLIP_MASK = %ymm9 # 1st arg is %rdi, which is saved to the stack and accessed later via %r12 CTX1 = %rdi CTX2 = %r12 # 2nd arg INP = %rsi # 3rd arg NUM_BLKS = %rdx c = %rcx d = %r8 e = %rdx y3 = %rsi TBL = %rdi # clobbers CTX1 a = %rax b = %rbx f = %r9 g = %r10 h = %r11 old_h = %r11 T1 = %r12 # clobbers CTX2 y0 = %r13 y1 = %r14 y2 = %r15 # Local variables (stack frame) XFER_SIZE = 4*8 SRND_SIZE = 1*8 INP_SIZE = 1*8 INPEND_SIZE = 1*8 CTX_SIZE = 1*8 RSPSAVE_SIZE = 1*8 GPRSAVE_SIZE = 5*8 frame_XFER = 0 frame_SRND = frame_XFER + XFER_SIZE frame_INP = frame_SRND + SRND_SIZE frame_INPEND = frame_INP + INP_SIZE frame_CTX = frame_INPEND + INPEND_SIZE frame_RSPSAVE = frame_CTX + CTX_SIZE frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE frame_size = frame_GPRSAVE + GPRSAVE_SIZE ## assume buffers not aligned #define VMOVDQ vmovdqu # addm [mem], reg # Add reg to mem using reg-mem add and store .macro addm p1 p2 add \p1, \p2 mov \p2, \p1 .endm # COPY_YMM_AND_BSWAP ymm, [mem], byte_flip_mask # Load ymm with mem and byte swap each dword .macro COPY_YMM_AND_BSWAP p1 p2 p3 VMOVDQ \p2, \p1 vpshufb \p3, \p1, \p1 .endm # rotate_Ys # Rotate values of symbols Y0...Y3 .macro rotate_Ys Y_ = Y_0 Y_0 = Y_1 Y_1 = Y_2 Y_2 = Y_3 Y_3 = Y_ .endm # RotateState .macro RotateState # Rotate symbols a..h right old_h = h TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm # macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL # YDST = {YSRC1, YSRC2} >> RVAL*8 .macro MY_VPALIGNR YDST YSRC1 YSRC2 RVAL vperm2f128 $0x3, \YSRC2, \YSRC1, \YDST # YDST = {YS1_LO, YS2_HI} vpalignr $\RVAL, \YSRC2, \YDST, \YDST # YDST = {YDS1, YS2} >> RVAL*8 .endm .macro FOUR_ROUNDS_AND_SCHED ################################### RND N + 0 ######################################### # Extract w[t-7] MY_VPALIGNR YTMP0, Y_3, Y_2, 8 # YTMP0 = W[-7] # Calculate w[t-16] + w[t-7] vpaddq Y_0, YTMP0, YTMP0 # YTMP0 = W[-7] + W[-16] # Extract w[t-15] MY_VPALIGNR YTMP1, Y_1, Y_0, 8 # YTMP1 = W[-15] # Calculate sigma0 # Calculate w[t-15] ror 1 vpsrlq $1, YTMP1, YTMP2 vpsllq $(64-1), YTMP1, YTMP3 vpor YTMP2, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 # Calculate w[t-15] shr 7 vpsrlq $7, YTMP1, YTMP4 # YTMP4 = W[-15] >> 7 mov a, y3 # y3 = a # MAJA rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B add frame_XFER(%rsp),h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA mov f, y2 # y2 = f # CH rorx $34, a, T1 # T1 = a >> 34 # S0B xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 xor g, y2 # y2 = f^g # CH rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $39, a, y1 # y1 = a >> 39 # S0A add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- RotateState ################################### RND N + 1 ######################################### # Calculate w[t-15] ror 8 vpsrlq $8, YTMP1, YTMP2 vpsllq $(64-8), YTMP1, YTMP1 vpor YTMP2, YTMP1, YTMP1 # YTMP1 = W[-15] ror 8 # XOR the three components vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7 vpxor YTMP1, YTMP3, YTMP1 # YTMP1 = s0 # Add three components, w[t-16], w[t-7] and sigma0 vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 # Move to appropriate lanes for calculating w[16] and w[17] vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA} # Move to appropriate lanes for calculating w[18] and w[19] vpand MASK_YMM_LO(%rip), YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 {DC00} # Calculate w[16] and w[17] in both 128 bit lanes # Calculate sigma1 for w[16] and w[17] on both 128 bit lanes vperm2f128 $0x11, Y_3, Y_3, YTMP2 # YTMP2 = W[-2] {BABA} vpsrlq $6, YTMP2, YTMP4 # YTMP4 = W[-2] >> 6 {BABA} mov a, y3 # y3 = a # MAJA rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B add 1*8+frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA mov f, y2 # y2 = f # CH rorx $34, a, T1 # T1 = a >> 34 # S0B xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 xor g, y2 # y2 = f^g # CH rorx $14, e, y1 # y1 = (e >> 14) # S1 xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $39, a, y1 # y1 = a >> 39 # S0A and e, y2 # y2 = (f^g)&e # CH add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- RotateState ################################### RND N + 2 ######################################### vpsrlq $19, YTMP2, YTMP3 # YTMP3 = W[-2] >> 19 {BABA} vpsllq $(64-19), YTMP2, YTMP1 # YTMP1 = W[-2] << 19 {BABA} vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {BABA} vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA} vpsrlq $61, YTMP2, YTMP3 # YTMP3 = W[-2] >> 61 {BABA} vpsllq $(64-61), YTMP2, YTMP1 # YTMP1 = W[-2] << 61 {BABA} vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {BABA} vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^ # (W[-2] ror 61) ^ (W[-2] >> 6) {BABA} # Add sigma1 to the other compunents to get w[16] and w[17] vpaddq YTMP4, Y_0, Y_0 # Y_0 = {W[1], W[0], W[1], W[0]} # Calculate sigma1 for w[18] and w[19] for upper 128 bit lane vpsrlq $6, Y_0, YTMP4 # YTMP4 = W[-2] >> 6 {DC--} mov a, y3 # y3 = a # MAJA rorx $41, e, y0 # y0 = e >> 41 # S1A add 2*8+frame_XFER(%rsp), h # h = k + w + h # -- rorx $18, e, y1 # y1 = e >> 18 # S1B or c, y3 # y3 = a|c # MAJA mov f, y2 # y2 = f # CH xor g, y2 # y2 = f^g # CH rorx $34, a, T1 # T1 = a >> 34 # S0B xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 and e, y2 # y2 = (f^g)&e # CH rorx $14, e, y1 # y1 = (e >> 14) # S1 add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $39, a, y1 # y1 = a >> 39 # S0A xor g, y2 # y2 = CH = ((f^g)&e)^g # CH xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- RotateState ################################### RND N + 3 ######################################### vpsrlq $19, Y_0, YTMP3 # YTMP3 = W[-2] >> 19 {DC--} vpsllq $(64-19), Y_0, YTMP1 # YTMP1 = W[-2] << 19 {DC--} vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {DC--} vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {DC--} vpsrlq $61, Y_0, YTMP3 # YTMP3 = W[-2] >> 61 {DC--} vpsllq $(64-61), Y_0, YTMP1 # YTMP1 = W[-2] << 61 {DC--} vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {DC--} vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^ # (W[-2] ror 61) ^ (W[-2] >> 6) {DC--} # Add the sigma0 + w[t-7] + w[t-16] for w[18] and w[19] # to newly calculated sigma1 to get w[18] and w[19] vpaddq YTMP4, YTMP0, YTMP2 # YTMP2 = {W[3], W[2], --, --} # Form w[19, w[18], w17], w[16] vpblendd $0xF0, YTMP2, Y_0, Y_0 # Y_0 = {W[3], W[2], W[1], W[0]} mov a, y3 # y3 = a # MAJA rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B add 3*8+frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA mov f, y2 # y2 = f # CH rorx $34, a, T1 # T1 = a >> 34 # S0B xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 xor g, y2 # y2 = f^g # CH rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $39, a, y1 # y1 = a >> 39 # S0A add y0, y2 # y2 = S1 + CH # -- xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- rorx $28, a, T1 # T1 = (a >> 28) # S0 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- RotateState rotate_Ys .endm .macro DO_4ROUNDS ################################### RND N + 0 ######################################### mov f, y2 # y2 = f # CH rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $34, a, T1 # T1 = a >> 34 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $39, a, y1 # y1 = a >> 39 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 add frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- RotateState ################################### RND N + 1 ######################################### add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $34, a, T1 # T1 = a >> 34 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $39, a, y1 # y1 = a >> 39 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 add 8*1+frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- RotateState ################################### RND N + 2 ######################################### add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $34, a, T1 # T1 = a >> 34 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $39, a, y1 # y1 = a >> 39 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 add 8*2+frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- RotateState ################################### RND N + 3 ######################################### add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $34, a, T1 # T1 = a >> 34 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $39, a, y1 # y1 = a >> 39 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 add 8*3+frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- RotateState .endm ######################################################################## # void sha512_transform_rorx(sha512_state *state, const u8 *data, int blocks) # Purpose: Updates the SHA512 digest stored at "state" with the message # stored in "data". # The size of the message pointed to by "data" must be an integer multiple # of SHA512 message blocks. # "blocks" is the message length in SHA512 blocks ######################################################################## sha512_transform_rorx: push %rbp mov %rsp,%rbp .profilable # Allocate Stack Space mov %rsp, %rax sub $frame_size, %rsp and $~(0x20 - 1), %rsp mov %rax, frame_RSPSAVE(%rsp) # Save GPRs mov %rbx, 8*0+frame_GPRSAVE(%rsp) mov %r12, 8*1+frame_GPRSAVE(%rsp) mov %r13, 8*2+frame_GPRSAVE(%rsp) mov %r14, 8*3+frame_GPRSAVE(%rsp) mov %r15, 8*4+frame_GPRSAVE(%rsp) shl $7, NUM_BLKS # convert to bytes jz .Ldone_hash add INP, NUM_BLKS # pointer to end of data mov NUM_BLKS, frame_INPEND(%rsp) ## load initial digest mov 8*0(CTX1), a mov 8*1(CTX1), b mov 8*2(CTX1), c mov 8*3(CTX1), d mov 8*4(CTX1), e mov 8*5(CTX1), f mov 8*6(CTX1), g mov 8*7(CTX1), h # save %rdi (CTX) before it gets clobbered mov %rdi, frame_CTX(%rsp) vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK .Loop0: lea kSha512(%rip), TBL ## byte swap first 16 dwords COPY_YMM_AND_BSWAP Y_0, (INP), BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_1, 1*32(INP), BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_2, 2*32(INP), BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_3, 3*32(INP), BYTE_FLIP_MASK mov INP, frame_INP(%rsp) ## schedule 64 input dwords, by doing 12 rounds of 4 each movq $4, frame_SRND(%rsp) .balign 16 .Loop1: vpaddq (TBL), Y_0, XFER vmovdqa XFER, frame_XFER(%rsp) FOUR_ROUNDS_AND_SCHED vpaddq 1*32(TBL), Y_0, XFER vmovdqa XFER, frame_XFER(%rsp) FOUR_ROUNDS_AND_SCHED vpaddq 2*32(TBL), Y_0, XFER vmovdqa XFER, frame_XFER(%rsp) FOUR_ROUNDS_AND_SCHED vpaddq 3*32(TBL), Y_0, XFER vmovdqa XFER, frame_XFER(%rsp) add $(4*32), TBL FOUR_ROUNDS_AND_SCHED subq $1, frame_SRND(%rsp) jne .Loop1 movq $2, frame_SRND(%rsp) .Loop2: vpaddq (TBL), Y_0, XFER vmovdqa XFER, frame_XFER(%rsp) DO_4ROUNDS vpaddq 1*32(TBL), Y_1, XFER vmovdqa XFER, frame_XFER(%rsp) add $(2*32), TBL DO_4ROUNDS vmovdqa Y_2, Y_0 vmovdqa Y_3, Y_1 subq $1, frame_SRND(%rsp) jne .Loop2 mov frame_CTX(%rsp), CTX2 addm 8*0(CTX2), a addm 8*1(CTX2), b addm 8*2(CTX2), c addm 8*3(CTX2), d addm 8*4(CTX2), e addm 8*5(CTX2), f addm 8*6(CTX2), g addm 8*7(CTX2), h mov frame_INP(%rsp), INP add $128, INP cmp frame_INPEND(%rsp), INP jne .Loop0 .Ldone_hash: # Restore GPRs mov 8*0+frame_GPRSAVE(%rsp), %rbx mov 8*1+frame_GPRSAVE(%rsp), %r12 mov 8*2+frame_GPRSAVE(%rsp), %r13 mov 8*3+frame_GPRSAVE(%rsp), %r14 mov 8*4+frame_GPRSAVE(%rsp), %r15 # Restore Stack Pointer mov frame_RSPSAVE(%rsp), %rsp pop %rbp ret .endfn sha512_transform_rorx,globl ######################################################################## ### Binary Data .rodata.cst32 # Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607 .octa 0x18191a1b1c1d1e1f1011121314151617 .rodata.cst32 MASK_YMM_LO: .octa 0x00000000000000000000000000000000 .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
22,943
706
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/missingno.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 sw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .real // Optional function stub. _missingno: #ifdef __x86__ xor %eax,%eax #elif defined(__aarch64__) mov x0,#0 #endif ret .endfn _missingno,globl,hidden
2,028
31
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/trampoline.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_TRAMPOLINE_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_TRAMPOLINE_H_ #if !(__ASSEMBLER__ + __LINKER__ + 0) #define TRAMPOLINE(FUNCTION, THUNK) \ ({ \ typeof(FUNCTION) *Tramp; \ asm(".section .text.trampoline\n" \ "183:\n\t" \ "mov\t%1,%%eax\n\t" \ "jmp\t" #THUNK "\n\t" \ ".previous\n\t" \ "mov\t$183b,%k0" \ : "=r"(Tramp) \ : "i"(FUNCTION)); \ Tramp; \ }) #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_TRAMPOLINE_H_ */
727
21
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/kbase36.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2023 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/str/tab.internal.h" _Alignas(uint8_t) const uint8_t kBase36[256] = { ['0'] = 1, // ['1'] = 2, // ['2'] = 3, // ['3'] = 4, // ['4'] = 5, // ['5'] = 6, // ['6'] = 7, // ['7'] = 8, // ['8'] = 9, // ['9'] = 10, // ['A'] = 11, // ['B'] = 12, // ['C'] = 13, // ['D'] = 14, // ['E'] = 15, // ['F'] = 16, // ['G'] = 17, // ['H'] = 18, // ['I'] = 19, // ['J'] = 20, // ['K'] = 21, // ['L'] = 22, // ['M'] = 23, // ['N'] = 24, // ['O'] = 25, // ['P'] = 26, // ['Q'] = 27, // ['R'] = 28, // ['S'] = 29, // ['T'] = 30, // ['U'] = 31, // ['V'] = 32, // ['W'] = 33, // ['X'] = 34, // ['Y'] = 35, // ['Z'] = 36, // ['a'] = 11, // ['b'] = 12, // ['c'] = 13, // ['d'] = 14, // ['e'] = 15, // ['f'] = 16, // ['g'] = 17, // ['h'] = 18, // ['i'] = 19, // ['j'] = 20, // ['k'] = 21, // ['l'] = 22, // ['m'] = 23, // ['n'] = 24, // ['o'] = 25, // ['p'] = 26, // ['q'] = 27, // ['r'] = 28, // ['s'] = 29, // ['t'] = 30, // ['u'] = 31, // ['v'] = 32, // ['w'] = 33, // ['x'] = 34, // ['y'] = 35, // ['z'] = 36, // };
3,164
85
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/mcount.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" // Function Profiling Hook. // cc -pg adds this to the start of global functions. mcount: ret .endfn mcount,globl,weak .alias mcount,_mcount // aarch64 weirdness? .alias mcount,.mcount // freebsd weirdness?
2,083
27
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/sha256.S
///////////////////////////////////////////////////////////////////////// // Implement fast SHA-256 with AVX2 instructions. (x86_64) // // Copyright (C) 2013 Intel Corporation. // // Authors: // James Guilford <[email protected]> // Kirk Yap <[email protected]> // Tim Chen <[email protected]> // // This software is available to you under a choice of one of two // licenses. You may choose to be licensed under the terms of the GNU // General Public License (GPL) Version 2, available from the file // COPYING in the main directory of this source tree, or the // OpenIB.org BSD license below: // // Redistribution and use in source and binary forms, with or // without modification, are permitted provided that the following // conditions are met: // // - Redistributions of source code must retain the above // copyright notice, this list of conditions and the following // disclaimer. // // - Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials // provided with the distribution. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS // BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // ///////////////////////////////////////////////////////////////////////// // // This code is described in an Intel White-Paper: // "Fast SHA-256 Implementations on Intel Architecture Processors" // // To find it, surf to http://www.intel.com/p/en_US/embedded // and search for that title. // ///////////////////////////////////////////////////////////////////////// // This code schedules 2 blocks at a time, with 4 lanes per block ///////////////////////////////////////////////////////////////////////// #include "libc/macros.internal.h" .ident "\n\ AVX2 SHA2 (BSD-2 License)\n\ Copyright 2013 Intel Corporation\n" .include "libc/disclaimer.inc" ## assume buffers not aligned #define VMOVDQ vmovdqu ################################ Define Macros # addm [mem], reg # Add reg to mem using reg-mem add and store .macro addm p1 p2 add \p1, \p2 mov \p2, \p1 .endm ################################ X0 = %ymm4 X1 = %ymm5 X2 = %ymm6 X3 = %ymm7 # XMM versions of above XWORD0 = %xmm4 XWORD1 = %xmm5 XWORD2 = %xmm6 XWORD3 = %xmm7 XTMP0 = %ymm0 XTMP1 = %ymm1 XTMP2 = %ymm2 XTMP3 = %ymm3 XTMP4 = %ymm8 XFER = %ymm9 XTMP5 = %ymm11 SHUF_00BA = %ymm10 # shuffle xBxA -> 00BA SHUF_DC00 = %ymm12 # shuffle xDxC -> DC00 BYTE_FLIP_MASK = %ymm13 X_BYTE_FLIP_MASK = %xmm13 # XMM version of BYTE_FLIP_MASK NUM_BLKS = %rdx # 3rd arg INP = %rsi # 2nd arg CTX = %rdi # 1st arg c = %ecx d = %r8d e = %edx # clobbers NUM_BLKS y3 = %esi # clobbers INP SRND = CTX # SRND is same register as CTX a = %eax b = %ebx f = %r9d g = %r10d h = %r11d old_h = %r11d T1 = %r12d y0 = %r13d y1 = %r14d y2 = %r15d _XFER_SIZE = 2*64*4 # 2 blocks, 64 rounds, 4 bytes/round _XMM_SAVE_SIZE = 0 _INP_END_SIZE = 8 _INP_SIZE = 8 _CTX_SIZE = 8 _RSP_SIZE = 8 _XFER = 0 _XMM_SAVE = _XFER + _XFER_SIZE _INP_END = _XMM_SAVE + _XMM_SAVE_SIZE _INP = _INP_END + _INP_END_SIZE _CTX = _INP + _INP_SIZE _RSP = _CTX + _CTX_SIZE STACK_SIZE = _RSP + _RSP_SIZE # rotate_Xs # Rotate values of symbols X0...X3 .macro rotate_Xs X_ = X0 X0 = X1 X1 = X2 X2 = X3 X3 = X_ .endm # ROTATE_ARGS # Rotate values of symbols a...h .macro ROTATE_ARGS old_h = h TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm .macro FOUR_ROUNDS_AND_SCHED disp ################################### RND N + 0 ############################ mov a, y3 # y3 = a # MAJA rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B addl \disp(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7] mov f, y2 # y2 = f # CH rorx $13, a, T1 # T1 = a >> 13 # S0B xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 xor g, y2 # y2 = f^g # CH vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]# y1 = (e >> 6)# S1 rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $22, a, y1 # y1 = a >> 22 # S0A add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15] xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a, T1 # T1 = (a >> 2) # S0 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH vpsrld $7, XTMP1, XTMP2 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- vpslld $(32-7), XTMP1, XTMP3 or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- vpor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7 vpsrld $18, XTMP1, XTMP2 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- ROTATE_ARGS ################################### RND N + 1 ############################ mov a, y3 # y3 = a # MAJA rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B offset = \disp + 1*4 addl offset(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3 mov f, y2 # y2 = f # CH rorx $13, a, T1 # T1 = a >> 13 # S0B xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 xor g, y2 # y2 = f^g # CH rorx $6, e, y1 # y1 = (e >> 6) # S1 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $22, a, y1 # y1 = a >> 22 # S0A and e, y2 # y2 = (f^g)&e # CH add h, d # d = k + w + h + d # -- vpslld $(32-18), XTMP1, XTMP1 and b, y3 # y3 = (a|c)&b # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 vpxor XTMP1, XTMP3, XTMP3 rorx $2, a, T1 # T1 = (a >> 2) # S0 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH vpxor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7 ^ W[-15] ror 18 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- vpxor XTMP4, XTMP3, XTMP1 # XTMP1 = s0 vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA} or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA} ROTATE_ARGS ################################### RND N + 2 ############################ mov a, y3 # y3 = a # MAJA rorx $25, e, y0 # y0 = e >> 25 # S1A offset = \disp + 2*4 addl offset(%rsp, SRND), h # h = k + w + h # -- vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA} rorx $11, e, y1 # y1 = e >> 11 # S1B or c, y3 # y3 = a|c # MAJA mov f, y2 # y2 = f # CH xor g, y2 # y2 = f^g # CH rorx $13, a, T1 # T1 = a >> 13 # S0B xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA} and e, y2 # y2 = (f^g)&e # CH rorx $6, e, y1 # y1 = (e >> 6) # S1 vpxor XTMP3, XTMP2, XTMP2 add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $22, a, y1 # y1 = a >> 22 # S0A vpxor XTMP2, XTMP4, XTMP4 # XTMP4 = s1 {xBxA} xor g, y2 # y2 = CH = ((f^g)&e)^g # CH vpshufb SHUF_00BA, XTMP4, XTMP4 # XTMP4 = s1 {00BA} xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a ,T1 # T1 = (a >> 2) # S0 vpaddd XTMP4, XTMP0, XTMP0 # XTMP0 = {..., ..., W[1], W[0]} xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- vpshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {DDCC} or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1,h # h = k + w + h + S0 # -- add y2,d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2,h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3,h # h = t1 + S0 + MAJ # -- ROTATE_ARGS ################################### RND N + 3 ############################ mov a, y3 # y3 = a # MAJA rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B offset = \disp + 3*4 addl offset(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA vpsrld $10, XTMP2, XTMP5 # XTMP5 = W[-2] >> 10 {DDCC} mov f, y2 # y2 = f # CH rorx $13, a, T1 # T1 = a >> 13 # S0B xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 xor g, y2 # y2 = f^g # CH vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC} rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC} xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH vpxor XTMP3, XTMP2, XTMP2 rorx $22, a, y1 # y1 = a >> 22 # S0A add y0, y2 # y2 = S1 + CH # -- vpxor XTMP2, XTMP5, XTMP5 # XTMP5 = s1 {xDxC} xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- rorx $2, a, T1 # T1 = (a >> 2) # S0 vpshufb SHUF_DC00, XTMP5, XTMP5 # XTMP5 = s1 {DC00} vpaddd XTMP0, XTMP5, X0 # X0 = {W[3], W[2], W[1], W[0]} xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- ROTATE_ARGS rotate_Xs .endm .macro DO_4ROUNDS disp ################################### RND N + 0 ########################### mov f, y2 # y2 = f # CH rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $13, a, T1 # T1 = a >> 13 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $22, a, y1 # y1 = a >> 22 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a, T1 # T1 = (a >> 2) # S0 addl \disp(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- ROTATE_ARGS ################################### RND N + 1 ########################### add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $13, a, T1 # T1 = a >> 13 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $22, a, y1 # y1 = a >> 22 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a, T1 # T1 = (a >> 2) # S0 offset = 4*1 + \disp addl offset(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- ROTATE_ARGS ################################### RND N + 2 ############################## add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $13, a, T1 # T1 = a >> 13 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $22, a, y1 # y1 = a >> 22 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a, T1 # T1 = (a >> 2) # S0 offset = 4*2 + \disp addl offset(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- ROTATE_ARGS ################################### RND N + 3 ########################### add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $13, a, T1 # T1 = a >> 13 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $22, a, y1 # y1 = a >> 22 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a, T1 # T1 = (a >> 2) # S0 offset = 4*3 + \disp addl offset(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- ROTATE_ARGS .endm ######################################################################## ## void sha256_transform_rorx(struct sha256_state *state, const u8 *data, int blocks) ## arg 1 : pointer to state ## arg 2 : pointer to input data ## arg 3 : Num blocks ######################################################################## .text .balign 32 sha256_transform_rorx: push %rbp mov %rsp,%rbp .profilable pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 mov %rsp, %rax subq $STACK_SIZE, %rsp and $-32, %rsp # align rsp to 32 byte boundary mov %rax, _RSP(%rsp) shl $6, NUM_BLKS # convert to bytes jz .Ldone_hash lea -64(INP, NUM_BLKS), NUM_BLKS # pointer to last block mov NUM_BLKS, _INP_END(%rsp) cmp NUM_BLKS, INP je .Lonly_one_block ## load initial digest mov (CTX), a mov 4*1(CTX), b mov 4*2(CTX), c mov 4*3(CTX), d mov 4*4(CTX), e mov 4*5(CTX), f mov 4*6(CTX), g mov 4*7(CTX), h vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK vmovdqa _SHUF_00BA(%rip), SHUF_00BA vmovdqa _SHUF_DC00(%rip), SHUF_DC00 mov CTX, _CTX(%rsp) .Loop0: ## Load first 16 dwords from two blocks VMOVDQ 0*32(INP),XTMP0 VMOVDQ 1*32(INP),XTMP1 VMOVDQ 2*32(INP),XTMP2 VMOVDQ 3*32(INP),XTMP3 ## byte swap data vpshufb BYTE_FLIP_MASK, XTMP0, XTMP0 vpshufb BYTE_FLIP_MASK, XTMP1, XTMP1 vpshufb BYTE_FLIP_MASK, XTMP2, XTMP2 vpshufb BYTE_FLIP_MASK, XTMP3, XTMP3 ## transpose data into high/low halves vperm2i128 $0x20, XTMP2, XTMP0, X0 vperm2i128 $0x31, XTMP2, XTMP0, X1 vperm2i128 $0x20, XTMP3, XTMP1, X2 vperm2i128 $0x31, XTMP3, XTMP1, X3 .Llast_block_enter: add $64, INP mov INP, _INP(%rsp) ## schedule 48 input dwords, by doing 3 rounds of 12 each xor SRND, SRND .balign 16 .Loop1: vpaddd kSha256x2+0*32(SRND), X0, XFER vmovdqa XFER, 0*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 0*32 vpaddd kSha256x2+1*32(SRND), X0, XFER vmovdqa XFER, 1*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 1*32 vpaddd kSha256x2+2*32(SRND), X0, XFER vmovdqa XFER, 2*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 2*32 vpaddd kSha256x2+3*32(SRND), X0, XFER vmovdqa XFER, 3*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 3*32 add $4*32, SRND cmp $3*4*32, SRND jb .Loop1 .Loop2: ## Do last 16 rounds with no scheduling vpaddd kSha256x2+0*32(SRND), X0, XFER vmovdqa XFER, 0*32+_XFER(%rsp, SRND) DO_4ROUNDS _XFER + 0*32 vpaddd kSha256x2+1*32(SRND), X1, XFER vmovdqa XFER, 1*32+_XFER(%rsp, SRND) DO_4ROUNDS _XFER + 1*32 add $2*32, SRND vmovdqa X2, X0 vmovdqa X3, X1 cmp $4*4*32, SRND jb .Loop2 mov _CTX(%rsp), CTX mov _INP(%rsp), INP addm (4*0)(CTX),a addm (4*1)(CTX),b addm (4*2)(CTX),c addm (4*3)(CTX),d addm (4*4)(CTX),e addm (4*5)(CTX),f addm (4*6)(CTX),g addm (4*7)(CTX),h cmp _INP_END(%rsp), INP ja .Ldone_hash #### Do second block using previously scheduled results xor SRND, SRND .balign 16 .Loop3: DO_4ROUNDS _XFER + 0*32 + 16 DO_4ROUNDS _XFER + 1*32 + 16 add $2*32, SRND cmp $4*4*32, SRND jb .Loop3 mov _CTX(%rsp), CTX mov _INP(%rsp), INP add $64, INP addm (4*0)(CTX),a addm (4*1)(CTX),b addm (4*2)(CTX),c addm (4*3)(CTX),d addm (4*4)(CTX),e addm (4*5)(CTX),f addm (4*6)(CTX),g addm (4*7)(CTX),h cmp _INP_END(%rsp), INP jb .Loop0 ja .Ldone_hash .Ldo_last_block: VMOVDQ 0*16(INP),XWORD0 VMOVDQ 1*16(INP),XWORD1 VMOVDQ 2*16(INP),XWORD2 VMOVDQ 3*16(INP),XWORD3 vpshufb X_BYTE_FLIP_MASK, XWORD0, XWORD0 vpshufb X_BYTE_FLIP_MASK, XWORD1, XWORD1 vpshufb X_BYTE_FLIP_MASK, XWORD2, XWORD2 vpshufb X_BYTE_FLIP_MASK, XWORD3, XWORD3 jmp .Llast_block_enter .Lonly_one_block: ## load initial digest mov (4*0)(CTX),a mov (4*1)(CTX),b mov (4*2)(CTX),c mov (4*3)(CTX),d mov (4*4)(CTX),e mov (4*5)(CTX),f mov (4*6)(CTX),g mov (4*7)(CTX),h vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK vmovdqa _SHUF_00BA(%rip), SHUF_00BA vmovdqa _SHUF_DC00(%rip), SHUF_DC00 mov CTX, _CTX(%rsp) jmp .Ldo_last_block .Ldone_hash: mov _RSP(%rsp), %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx pop %rbp ret .endfn sha256_transform_rorx,globl .rodata.cst32 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203 .octa 0x0c0d0e0f08090a0b0405060700010203 # shuffle xBxA -> 00BA .rodata.cst32 _SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100 .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100 # shuffle xDxC -> DC00 .rodata.cst32 _SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF .bss .balign 64 kSha256x2: .zero 512 .endobj kSha256x2,globl .previous .init.start 201,_init_kSha256x2 push $64 pop %rcx ezlea kSha256,dx ezlea kSha256x2,ax 0: movaps -16(%rdx,%rcx,4),%xmm0 movaps %xmm0,-16(%rax,%rcx,8) movaps %xmm0,-32(%rax,%rcx,8) sub $4,%ecx jnz 0b .init.end 201,_init_kSha256x2
22,431
759
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/lz4.h
#ifndef COSMOPOLITAN_LIBC_LZ4_H_ #define COSMOPOLITAN_LIBC_LZ4_H_ #include "libc/intrin/bits.h" /*───────────────────────────────────────────────────────────────────────────│─╗ │ cosmopolitan § lz4 ─╬─│┼ ╚────────────────────────────────────────────────────────────────────────────│─╝ LZ4 is a framing format for REP MOVSB designed by Yann Collet. @see https://github.com/lz4/lz4/blob/master/doc/lz4_Frame_format.md @see https://github.com/lz4/lz4/blob/master/doc/lz4_Block_format.md @see http://ticki.github.io/blog/how-lz4-works/ */ #define LZ4_EOF 0 #define LZ4_VERSION 1 #define LZ4_MAGICNUMBER 0x184D2204 #define LZ4_SKIPPABLE0 0x184D2A50 #define LZ4_SKIPPABLEMASK 0xFFFFFFF0 #define LZ4_MAXHEADERSIZE (MAGICNUMBER_SIZE + 2 + 8 + 4 + 1) #define LZ4_BLOCKMAXSIZE_64KB 4 #define LZ4_BLOCKMAXSIZE_256KB 5 #define LZ4_BLOCKMAXSIZE_1MB 6 #define LZ4_BLOCKMAXSIZE_4MB 7 #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ /*───────────────────────────────────────────────────────────────────────────│─╗ │ cosmopolitan § lz4 » frames ─╬─│┼ ╚────────────────────────────────────────────────────────────────────────────│*/ #define LZ4_MAGIC(FRAME) READ32LE(FRAME) #define LZ4_FRAME_VERSION(FRAME) ((_LZ4_FRAME_FLG(FRAME) >> 6) & 3) #define LZ4_FRAME_BLOCKINDEPENDENCE(FRAME) ((_LZ4_FRAME_FLG(FRAME) >> 5) & 1) #define LZ4_FRAME_BLOCKCHECKSUMFLAG(FRAME) ((_LZ4_FRAME_FLG(FRAME) >> 4) & 1) #define LZ4_FRAME_BLOCKCONTENTSIZEFLAG(FRAME) ((_LZ4_FRAME_FLG(FRAME) >> 3) & 1) #define LZ4_FRAME_BLOCKCONTENTCHECKSUMFLAG(FRAME) \ ((_LZ4_FRAME_FLG(FRAME) >> 2) & 1) #define LZ4_FRAME_DICTIONARYIDFLAG(FRAME) ((_LZ4_FRAME_FLG(FRAME) >> 0) & 1) #define LZ4_FRAME_BLOCKMAXSIZE(FRAME) ((_LZ4_FRAME_BD(FRAME) >> 4) & 7) #define LZ4_FRAME_RESERVED1(FRAME) ((_LZ4_FRAME_FLG(FRAME) >> 1) & 1) #define LZ4_FRAME_RESERVED2(FRAME) ((_LZ4_FRAME_BD(FRAME) >> 7) & 1) #define LZ4_FRAME_RESERVED3(FRAME) ((_LZ4_FRAME_BD(FRAME) >> 0) & 15) #define LZ4_FRAME_BLOCKCONTENTSIZE(FRAME) \ (LZ4_FRAME_BLOCKCONTENTSIZEFLAG(FRAME) ? READ64LE((FRAME) + 4 + 1 + 1) : 0) #define LZ4_FRAME_DICTIONARYID(FRAME) \ (LZ4_FRAME_DICTIONARYIDFLAG(FRAME) \ ? READ32LE(((FRAME) + 4 + 1 + 1 + \ 8 * LZ4_FRAME_BLOCKCONTENTSIZEFLAG(FRAME))) \ : 0) #define LZ4_FRAME_HEADERCHECKSUM(FRAME) \ (*((FRAME) + 4 + 1 + 1 + 8 * LZ4_FRAME_BLOCKCONTENTSIZEFLAG(FRAME) + \ 4 * LZ4_FRAME_DICTIONARYIDFLAG(FRAME))) #define LZ4_FRAME_HEADERSIZE(FRAME) \ (4 + 1 + 1 + 8 * LZ4_FRAME_BLOCKCONTENTSIZEFLAG(FRAME) + \ 4 * LZ4_FRAME_DICTIONARYIDFLAG(FRAME) + 1) #define _LZ4_FRAME_FLG(FRAME) (*((FRAME) + 4)) #define _LZ4_FRAME_BD(FRAME) (*((FRAME) + 5)) /*───────────────────────────────────────────────────────────────────────────│─╗ │ cosmopolitan § lz4 » blocks ─╬─│┼ ╚────────────────────────────────────────────────────────────────────────────│*/ #define LZ4_BLOCK_DATA(block) (block + sizeof(uint32_t)) #define LZ4_BLOCK_DATASIZE(block) (READ32LE(block) & 0x7fffffff) #define LZ4_BLOCK_ISEOF(block) (READ32LE(block) == LZ4_EOF) #define LZ4_BLOCK_ISCOMPRESSED(block) ((READ32LE(block) & 0x80000000) == 0) #define LZ4_BLOCK_SIZE(frame, block) \ (sizeof(uint32_t) + LZ4_BLOCK_DATASIZE(block) + \ (LZ4_FRAME_BLOCKCHECKSUMFLAG(frame) ? sizeof(uint8_t) : 0)) COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_LZ4_H_ */
4,775
73
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/ktoupper.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2023 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/str/tab.internal.h" _Alignas(uint8_t) const uint8_t kToUpper[256] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ' ', '!', '\"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_', '`', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '{', '|', '}', '~', 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, };
3,333
42
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/ktolower.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2023 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/str/tab.internal.h" _Alignas(uint8_t) const uint8_t kToLower[256] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ' ', '!', '\"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '[', '\\', ']', '^', '_', '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, };
3,333
42
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/cachesize.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_CACHESIZE_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_CACHESIZE_H_ #define kCpuCacheTypeData 1 #define kCpuCacheTypeInstruction 2 #define kCpuCacheTypeUnified 3 #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ unsigned _getcachesize(int, int); COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_CACHESIZE_H_ */
422
16
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/kcp437.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .rodata .balign 2 // ibm cp437 unicode table w/ string literal safety // // ░▄██▒▄█ ▐██ ░░░ ▀▀████▒▀█▄ // ▐███▓██░ ██▌ ▀████▄■█▄ // ▐█▓███▀█░██▀ ░ ░▀█████▓▄ // ▐█▓██▀▄█▒██▀ ▄▄░ ▄▄▄ ░░░ ░▀████▒▄ // ▐████▀▄█■█▀ ▀▀ ░█████░ // ▐█▓█▀████▀ ░ ▐▓███▒ // █░███▀▀ ░░░ ▄█ ░░░ █████ // ▐█▓█░▀▀ ░░▄█▄▄▄▄▄ ▀▄ ▌▄▄▄░▄▄▄▄▄ ▐████░ // ▐███▌ ▄▀█████████▄ ▌▐▄████████▄ ▐▓███░ // ▐███░░░▀▄█▀▄▄████▄▀░ ▐████████▒ ▀ ░███░ // ░████░ ▓▀ ▄███████▀▌ ▀▄■████▀▀█▀ ██▀█ // ▓███░ ░▄▀▀░░░ ▀ ░░▌ ▄▀▀▄░░▀░▄▀▄ ▐██▀▄ // ░███░ ▄▓▓▄▄░▀▀█▀█ ▌░░ ▀█▀█▀▀ ▐██▀ // █▀▄▐██ ▀░░ ▄▀ ▐ █ ▀ ▄▄▄░ ░▀▄█▄▀█ // ▌▄ █▓ ▒ ░ █▄█▄▀▄▄▄███▄▀▄ ░░ ░ ▀ █▌ // █▌▄░▌ ░░░▄▀█▀███████▄▀▄▀▄▀▀▄▄▄ █▀█░▐ // ██▄ ░░░▄█▄▀██▄█■██████▄█▄█▄■▀█░ ▐░▐ // ▀██░ ░▄██████████████████▄█▄█ ░█ ░ ▄▀ // ▀▓█▄▓░░ ▒█▀█████████████████████▒ ██▀ // ▀███ ▓▒ ██████████████▀▀▀▀█▄▀ ░▄█▒ // ▀███ ▀█▄▀▄█████▀▀ ▓▓▓▄░ ▐ ░▄██ // ▀██ ▄███████▄████████▀░░ ░▄██ // ▄██▀▀▄ █▄▀▄██▒▒███████████▀▀▀▄░ ░███░ // ▄██▀▄▄░░▀▐▄████▄ █████▀▄░░█▀▄▀░░ ▄██░ // █████▄▄▄███▀░█▌██▄▀▀█████▄▄░░░▄▄███▀██▄ ▄▀▀▀▄▄ // ▀██████▀■▄█▄▄ ░▀███████████████▓▓░░▄██▀▄████▄▄▀▄ // // █▀█ █ █▀█ █▀█ █▄▀ ▐▀█▀▌█▀█ █▀█ █▄ █ ▀█▀ █▀█ █▀▀ // █▀▄ █ █ █ █ █ ▀▄ █ █▀▄ █ █ █ ▀█ █ █ ▀▀█ // █▄█ █▄▌█▄█ █▄█ █ █ █ █ █ █▄█ █ █ ▄█▄ █▄█ █▄█ // // THERE WILL BE BLOCKS march 01 2017 // // @see libc/str/str.h // @see kCp437i[] kCp437: .short 0x00a0,0x263a,0x263b,0x2665,0x2666,0x2663,0x2660,0x2022 //00: ☺☻♥♦♣♠• .short 0x25d8,0x25cb,0x25d9,0x2642,0x2640,0x266a,0x266b,0x263c //08:◘○◙♂♀♪♫☼ .short 0x25ba,0x25c4,0x2195,0x203c,0x00b6,0x00a7,0x25ac,0x21a8 //10:►◄↕‼¶§▬↨ .short 0x2191,0x2193,0x2192,0x2190,0x221f,0x2194,0x25b2,0x25bc //18:↑↓→←∟↔▲▼ .short 0x0020,0x0021,0x201c,0x0023,0x0024,0x0025,0x0026,0x2018 //20: !“//$%&‘ .short 0x0028,0x0029,0x002a,0x002b,0x002c,0x002d,0x002e,0x002f //28:()*+,-./ .short 0x0030,0x0031,0x0032,0x0033,0x0034,0x0035,0x0036,0x0037 //30:01234567 .short 0x0038,0x0039,0x003a,0x003b,0x003c,0x003d,0x003e,0x2047 //38:89:;<=>⁇ .short 0x0040,0x0041,0x0042,0x0043,0x0044,0x0045,0x0046,0x0047 //40:@ABCDEFG .short 0x0048,0x0049,0x004a,0x004b,0x004c,0x004d,0x004e,0x004f //48:HIJKLMNO .short 0x0050,0x0051,0x0052,0x0053,0x0054,0x0055,0x0056,0x0057 //50:PQRSTUVW .short 0x0058,0x0059,0x005a,0x005b,0x005c,0x005d,0x005e,0x005f //58:XYZ[\]^_ .short 0x0060,0x0061,0x0062,0x0063,0x0064,0x0065,0x0066,0x0067 //60:`abcdefg .short 0x0068,0x0069,0x006a,0x006b,0x006c,0x006d,0x006e,0x006f //68:hijklmno .short 0x0070,0x0071,0x0072,0x0073,0x0074,0x0075,0x0076,0x0077 //70:pqrstuvw .short 0x0078,0x0079,0x007a,0x007b,0x007c,0x007d,0x007e,0x2302 //78:xyz{|}~⌂ .short 0x00c7,0x00fc,0x00e9,0x00e2,0x00e4,0x00e0,0x00e5,0x00e7 //80:Çüéâäàåç .short 0x00ea,0x00eb,0x00e8,0x00ef,0x00ee,0x00ec,0x00c4,0x00c5 //88:êëèïîìÄÅ .short 0x00c9,0x00e6,0x00c6,0x00f4,0x00f6,0x00f2,0x00fb,0x00f9 //90:ÉæÆôöòûù .short 0x00ff,0x00d6,0x00dc,0x00a2,0x00a3,0x00a5,0x20ac,0x0192 //98:ÿÖÜ¢£¥€ƒ .short 0x00e1,0x00ed,0x00f3,0x00fa,0x00f1,0x00d1,0x00aa,0x00ba //a0:áíóúñѪº .short 0x00bf,0x2310,0x00ac,0x00bd,0x00bc,0x00a1,0x00ab,0x00bb //a8:¿⌐¬½¼¡«» .short 0x2591,0x2592,0x2593,0x2502,0x2524,0x2561,0x2562,0x2556 //b0:░▒▓│┤╡╢╖ .short 0x2555,0x2563,0x2551,0x2557,0x255d,0x255c,0x255b,0x2510 //b8:╕╣║╗╝╜╛┐ .short 0x2514,0x2534,0x252c,0x251c,0x2500,0x253c,0x255e,0x255f //c0:└┴┬├─┼╞╟ .short 0x255a,0x2554,0x2569,0x2566,0x2560,0x2550,0x256c,0x2567 //c8:╚╔╩╦╠═╬╧ .short 0x2568,0x2564,0x2565,0x2559,0x2558,0x2552,0x2553,0x256b //d0:╨╤╥╙╘╒╓╫ .short 0x256a,0x2518,0x250c,0x2588,0x2584,0x258c,0x2590,0x2580 //d8:╪┘┌█▄▌▐▀ .short 0x03b1,0x00df,0x0393,0x03c0,0x03a3,0x03c3,0x03bc,0x03c4 //e0:αßΓπΣσμτ .short 0x03a6,0x0398,0x03a9,0x03b4,0x221e,0x03c6,0x03b5,0x2229 //e8:ΦΘΩδ∞φε∩ .short 0x2261,0x00b1,0x2265,0x2264,0x2320,0x2321,0x00f7,0x2248 //f0:≡±≥≤⌠⌡÷≈ .short 0x00b0,0x2219,0x00d7,0x221a,0x207f,0x00b2,0x25a0,0x03bb //f8:°∙×√ⁿ²■λ .endobj kCp437,globl .previous
8,038
95
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/crc32init.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2023 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/nexgen32e/nexgen32e.h" void crc32init(uint32_t table[256], uint32_t polynomial) { uint32_t d, i, r; for (d = 0; d < 256; ++d) { r = d; for (i = 0; i < 8; ++i) { r = r >> 1 ^ (r & 1 ? polynomial : 0); } table[d] = r; } }
2,100
31
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/djbsort-avx2.S
#include "libc/macros.internal.h" // D.J. Bernstein's outrageously fast integer sorting algorithm. // // @param rdi is int32 array // @param rsi is number of elements in rdi // @note public domain // @see en.wikipedia.org/wiki/Sorting_network djbsort_avx2: push %rbp mov %rsp,%rbp push %r15 push %r14 push %r13 mov %rsi,%r13 push %r12 mov %rdi,%r12 push %rbx andq $-32,%rsp sub $1056,%rsp cmp $8,%rsi jg .L148 jne .L149 mov (%rdi),%eax mov 4(%rdi),%edx cmp %edx,%eax mov %eax,%ecx cmovg %edx,%eax cmovg %ecx,%edx mov %eax,(%rdi) mov 8(%rdi),%eax cmp %eax,%edx mov %edx,%ecx cmovg %eax,%edx cmovg %ecx,%eax mov %edx,4(%rdi) mov 12(%rdi),%edx cmp %edx,%eax mov %eax,%ecx cmovg %edx,%eax cmovg %ecx,%edx mov %eax,8(%rdi) mov 16(%rdi),%eax cmp %eax,%edx mov %edx,%ecx cmovg %eax,%edx cmovg %ecx,%eax mov %edx,12(%rdi) mov 20(%rdi),%edx cmp %edx,%eax mov %eax,%ecx cmovg %edx,%eax cmovg %ecx,%edx mov %eax,16(%rdi) mov 24(%rdi),%eax cmp %eax,%edx mov %edx,%ecx cmovg %eax,%edx cmovg %ecx,%eax mov %edx,20(%rdi) mov 28(%rdi),%edx cmp %edx,%eax mov %eax,%ecx cmovg %edx,%eax cmovg %ecx,%edx mov %eax,24(%rdi) mov %edx,28(%rdi) jmp .L150 .L149: cmp $7,%rsi jne .L151 .L150: mov (%r12),%edx mov 4(%r12),%eax cmp %eax,%edx mov %edx,%ecx cmovg %eax,%edx cmovg %ecx,%eax mov %edx,(%r12) mov 8(%r12),%edx cmp %edx,%eax mov %eax,%ecx cmovg %edx,%eax cmovg %ecx,%edx mov %eax,4(%r12) mov 12(%r12),%eax cmp %eax,%edx mov %edx,%ecx cmovg %eax,%edx cmovg %ecx,%eax mov %edx,8(%r12) mov 16(%r12),%edx cmp %edx,%eax mov %eax,%ecx cmovg %edx,%eax cmovg %ecx,%edx mov %eax,12(%r12) mov 20(%r12),%eax cmp %eax,%edx mov %edx,%ecx cmovg %eax,%edx cmovg %ecx,%eax mov %edx,16(%r12) mov 24(%r12),%edx cmp %edx,%eax mov %eax,%ecx cmovg %edx,%eax cmovg %ecx,%edx mov %eax,20(%r12) mov %edx,24(%r12) jmp .L152 .L151: cmp $6,%rsi jne .L153 .L152: mov (%r12),%eax mov 4(%r12),%edx cmp %edx,%eax mov %eax,%ecx cmovg %edx,%eax cmovg %ecx,%edx mov %eax,(%r12) mov 8(%r12),%eax cmp %eax,%edx mov %edx,%ecx cmovg %eax,%edx cmovg %ecx,%eax mov %edx,4(%r12) mov 12(%r12),%edx cmp %edx,%eax mov %eax,%ecx cmovg %edx,%eax cmovg %ecx,%edx mov %eax,8(%r12) mov 16(%r12),%eax cmp %eax,%edx mov %edx,%ecx cmovg %eax,%edx cmovg %ecx,%eax mov %edx,12(%r12) mov 20(%r12),%edx cmp %edx,%eax mov %eax,%ecx cmovg %edx,%eax cmovg %ecx,%edx mov %eax,16(%r12) mov %edx,20(%r12) jmp .L154 .L153: cmp $5,%rsi jne .L155 .L154: mov (%r12),%edx mov 4(%r12),%eax cmp %eax,%edx mov %edx,%ecx cmovg %eax,%edx cmovg %ecx,%eax mov %edx,(%r12) mov 8(%r12),%edx cmp %edx,%eax mov %eax,%ecx cmovg %edx,%eax cmovg %ecx,%edx mov %eax,4(%r12) mov 12(%r12),%eax cmp %eax,%edx mov %edx,%ecx cmovg %eax,%edx cmovg %ecx,%eax mov %edx,8(%r12) mov 16(%r12),%edx cmp %edx,%eax mov %eax,%ecx cmovg %edx,%eax cmovg %ecx,%edx mov %eax,12(%r12) mov %edx,16(%r12) jmp .L156 .L155: cmp $4,%rsi jne .L157 .L156: mov (%r12),%eax mov 4(%r12),%edx cmp %edx,%eax mov %eax,%ecx cmovg %edx,%eax cmovg %ecx,%edx mov %eax,(%r12) mov 8(%r12),%eax cmp %eax,%edx mov %edx,%ecx cmovg %eax,%edx cmovg %ecx,%eax mov %edx,4(%r12) mov 12(%r12),%edx cmp %edx,%eax mov %eax,%ecx cmovg %edx,%eax cmovg %ecx,%edx mov %eax,8(%r12) mov %edx,12(%r12) jmp .L158 .L157: cmp $3,%rsi jne .L159 .L158: mov (%r12),%edx mov 4(%r12),%eax cmp %eax,%edx mov %edx,%ecx cmovg %eax,%edx cmovg %ecx,%eax mov %edx,(%r12) mov 8(%r12),%edx cmp %edx,%eax mov %eax,%ecx cmovg %edx,%eax cmovg %ecx,%edx mov %eax,4(%r12) mov %edx,8(%r12) jmp .L160 .L159: cmp $2,%rsi jne .L147 .L160: mov (%r12),%edx mov 4(%r12),%eax cmp %eax,%edx mov %edx,%ecx cmovg %eax,%edx cmovg %ecx,%eax mov %edx,(%r12) mov %eax,4(%r12) jmp .L147 .L148: lea -1(%rsi),%rax mov $8,%ebx test %rsi,%rax jne .L162 xor %edx,%edx call int32_sort_2power jmp .L147 .L162: mov %r13,%r14 sub %rbx,%r14 cmp %rbx,%r14 jle .L199 add %rbx,%rbx jmp .L162 .L199: cmp $128,%rbx jg .L164 mov %rbx,%rax mov %rbx,%rdx vmovdqa .LC4(%rip),%ymm0 sar $3,%rax sar $2,%rdx .L165: cmp %rdx,%rax jge .L200 mov %rax,%rcx incq %rax salq $5,%rcx vmovdqa %ymm0,32(%rsp,%rcx) jmp .L165 .L200: xor %eax,%eax .L167: mov (%r12,%rax,4),%edx mov %rax,%r14 mov %edx,32(%rsp,%rax,4) lea 1(%rax),%rax cmp %rax,%r13 jne .L167 lea (%rbx,%rbx),%rsi xor %edx,%edx lea 32(%rsp),%rdi call int32_sort_2power xor %eax,%eax .L168: mov 32(%rsp,%rax,4),%ecx mov %rax,%rdx mov %ecx,(%r12,%rax,4) incq %rax cmp %rdx,%r14 jne .L168 jmp .L147 .L164: mov %rbx,%rsi mov %r12,%rdi mov $1,%edx call int32_sort_2power lea (%r12,%rbx,4),%rdi mov %r14,%rsi call djbsort_avx2 .L175: mov %rbx,%r14 mov %r13,%rsi mov %r12,%rdi sar $2,%r14 mov %r14,%rdx call int32_threestages lea 0(,%r14,4),%r10 mov %r13,%rdx lea (%r10,%rax),%r11 sub %r10,%rdx lea (%r12,%rax,4),%rdi mov %rax,%r9 sub %rax,%rdx lea (%r12,%r11,4),%rsi call minmax_vector lea (%r14,%r14),%rax mov %rax,24(%rsp) cmp %r13,%r11 jg .L169 imul $-8,%r14,%rax lea (%r12,%r10),%rdx lea (%rdx,%r10),%rcx lea (%r14,%r9),%r15 lea (%rcx,%r10),%rdi add %rdi,%rax lea (%rax,%r10),%rsi lea (%rsi,%r10),%r8 .L170: cmp %r9,%r15 jle .L201 vmovdqu (%rcx,%r9,4),%ymm7 vmovdqu (%rdi,%r9,4),%ymm6 vpminsd (%r12,%r9,4),%ymm7,%ymm2 vpminsd (%rdx,%r9,4),%ymm6,%ymm3 vpmaxsd (%r12,%r9,4),%ymm7,%ymm0 vpmaxsd (%rdx,%r9,4),%ymm6,%ymm1 vpminsd %ymm3,%ymm2,%ymm4 vpmaxsd %ymm3,%ymm2,%ymm2 vpminsd %ymm1,%ymm0,%ymm3 vpmaxsd %ymm1,%ymm0,%ymm0 vmovdqu %ymm4,(%r12,%r9,4) vmovdqu %ymm2,(%rax,%r9,4) vmovdqu %ymm3,(%rsi,%r9,4) vmovdqu %ymm0,(%r8,%r9,4) add $8,%r9 jmp .L170 .L201: mov %r11,%r9 .L169: mov 24(%rsp),%rax lea (%r14,%r14),%r15 mov %r13,%r11 lea (%r12,%r9,4),%rdi sub %r15,%r11 add %r9,%rax mov %r11,%rdx lea (%r12,%rax,4),%rsi sub %r9,%rdx call minmax_vector lea (%r15,%r9),%rax cmp %r13,%rax jg .L172 mov %rax,%rdx add %r12,%r10 sub %r14,%rdx .L173: cmp %r9,%rdx jle .L202 vmovdqu (%r10,%r9,4),%ymm6 vpminsd (%r12,%r9,4),%ymm6,%ymm1 vpmaxsd (%r12,%r9,4),%ymm6,%ymm0 vmovdqu %ymm1,(%r12,%r9,4) vmovdqu %ymm0,(%r10,%r9,4) add $8,%r9 jmp .L173 .L202: mov %rax,%r9 .L172: lea (%r11,%r14),%rdx add %r9,%r14 lea (%r12,%r9,4),%rdi sar $3,%rbx sub %r9,%rdx lea (%r12,%r14,4),%rsi call minmax_vector cmp $63,%rbx jg .L175 cmp $32,%rbx jne .L176 mov %r12,%rax mov $63,%edx .L177: cmp %r13,%rdx jge .L203 vmovdqu (%rax),%ymm6 add $64,%rdx add $256,%rax vpminsd -128(%rax),%ymm6,%ymm10 vpmaxsd -128(%rax),%ymm6,%ymm8 vmovdqu -224(%rax),%ymm6 vpminsd -96(%rax),%ymm6,%ymm3 vpmaxsd -96(%rax),%ymm6,%ymm0 vmovdqu -192(%rax),%ymm6 vpminsd -64(%rax),%ymm6,%ymm2 vpmaxsd -64(%rax),%ymm6,%ymm1 vmovdqu -160(%rax),%ymm6 vpmaxsd -32(%rax),%ymm6,%ymm4 vpminsd -32(%rax),%ymm6,%ymm13 vpminsd %ymm2,%ymm10,%ymm15 vpminsd %ymm1,%ymm8,%ymm12 vpminsd %ymm13,%ymm3,%ymm11 vpminsd %ymm4,%ymm0,%ymm5 vpmaxsd %ymm1,%ymm8,%ymm1 vpmaxsd %ymm2,%ymm10,%ymm2 vpmaxsd %ymm13,%ymm3,%ymm13 vpmaxsd %ymm4,%ymm0,%ymm0 vpminsd %ymm13,%ymm2,%ymm10 vpminsd %ymm0,%ymm1,%ymm4 vpminsd %ymm5,%ymm12,%ymm9 vpminsd %ymm11,%ymm15,%ymm14 vpmaxsd %ymm13,%ymm2,%ymm13 vpmaxsd %ymm0,%ymm1,%ymm0 vpmaxsd %ymm11,%ymm15,%ymm15 vpmaxsd %ymm5,%ymm12,%ymm12 vperm2i128 $32,%ymm13,%ymm10,%ymm6 vperm2i128 $32,%ymm12,%ymm9,%ymm5 vperm2i128 $32,%ymm0,%ymm4,%ymm8 vperm2i128 $32,%ymm15,%ymm14,%ymm11 vperm2i128 $49,%ymm0,%ymm4,%ymm0 vperm2i128 $49,%ymm12,%ymm9,%ymm12 vperm2i128 $49,%ymm15,%ymm14,%ymm14 vperm2i128 $49,%ymm13,%ymm10,%ymm13 vpminsd %ymm14,%ymm11,%ymm3 vpminsd %ymm12,%ymm5,%ymm1 vpminsd %ymm13,%ymm6,%ymm2 vpmaxsd %ymm12,%ymm5,%ymm9 vpmaxsd %ymm14,%ymm11,%ymm11 vpminsd %ymm0,%ymm8,%ymm12 vperm2i128 $32,%ymm9,%ymm1,%ymm5 vpmaxsd %ymm0,%ymm8,%ymm8 vpmaxsd %ymm13,%ymm6,%ymm10 vperm2i128 $32,%ymm11,%ymm3,%ymm7 vperm2i128 $32,%ymm10,%ymm2,%ymm6 vperm2i128 $49,%ymm11,%ymm3,%ymm11 vperm2i128 $49,%ymm10,%ymm2,%ymm10 vperm2i128 $49,%ymm9,%ymm1,%ymm9 vperm2i128 $32,%ymm8,%ymm12,%ymm4 vperm2i128 $49,%ymm8,%ymm12,%ymm8 vpunpcklqdq %ymm11,%ymm7,%ymm3 vpunpcklqdq %ymm10,%ymm6,%ymm2 vpunpcklqdq %ymm9,%ymm5,%ymm1 vpunpcklqdq %ymm8,%ymm4,%ymm0 vpunpckhqdq %ymm11,%ymm7,%ymm7 vpunpckhqdq %ymm10,%ymm6,%ymm6 vpunpckhqdq %ymm9,%ymm5,%ymm5 vpunpckhqdq %ymm8,%ymm4,%ymm4 vpminsd %ymm3,%ymm7,%ymm11 vpminsd %ymm2,%ymm6,%ymm10 vpminsd %ymm1,%ymm5,%ymm9 vpminsd %ymm0,%ymm4,%ymm8 vpmaxsd %ymm3,%ymm7,%ymm7 vpmaxsd %ymm2,%ymm6,%ymm6 vpmaxsd %ymm1,%ymm5,%ymm5 vpunpckldq %ymm7,%ymm11,%ymm3 vpmaxsd %ymm0,%ymm4,%ymm4 vpunpckhdq %ymm7,%ymm11,%ymm7 vpunpckldq %ymm6,%ymm10,%ymm2 vpunpckldq %ymm5,%ymm9,%ymm1 vpunpckhdq %ymm6,%ymm10,%ymm6 vpunpckhdq %ymm5,%ymm9,%ymm5 vpunpckldq %ymm4,%ymm8,%ymm0 vpunpckhdq %ymm4,%ymm8,%ymm4 vpunpcklqdq %ymm7,%ymm3,%ymm10 vpunpcklqdq %ymm5,%ymm1,%ymm8 vpunpckhqdq %ymm7,%ymm3,%ymm3 vpunpcklqdq %ymm6,%ymm2,%ymm9 vpunpcklqdq %ymm4,%ymm0,%ymm7 vpunpckhqdq %ymm6,%ymm2,%ymm2 vpunpckhqdq %ymm5,%ymm1,%ymm1 vpunpckhqdq %ymm4,%ymm0,%ymm0 vpminsd %ymm8,%ymm1,%ymm5 vpminsd %ymm9,%ymm2,%ymm6 vpminsd %ymm7,%ymm0,%ymm4 vpminsd %ymm10,%ymm3,%ymm11 vpmaxsd %ymm8,%ymm1,%ymm1 vpmaxsd %ymm7,%ymm0,%ymm0 vpmaxsd %ymm10,%ymm3,%ymm3 vpmaxsd %ymm9,%ymm2,%ymm2 vpunpckldq %ymm2,%ymm6,%ymm7 vpunpckldq %ymm3,%ymm11,%ymm8 vpunpckhdq %ymm2,%ymm6,%ymm2 vpunpckhdq %ymm3,%ymm11,%ymm3 vpunpckldq %ymm1,%ymm5,%ymm6 vpunpckhdq %ymm1,%ymm5,%ymm1 vmovdqu %ymm8,-256(%rax) vpunpckldq %ymm0,%ymm4,%ymm5 vpunpckhdq %ymm0,%ymm4,%ymm0 vmovdqu %ymm3,-224(%rax) vmovdqu %ymm7,-192(%rax) vmovdqu %ymm2,-160(%rax) vmovdqu %ymm6,-128(%rax) vmovdqu %ymm1,-96(%rax) vmovdqu %ymm5,-64(%rax) vmovdqu %ymm0,-32(%rax) jmp .L177 .L203: mov %r13,%rdi mov %r13,%r9 lea -32(%r13),%rdx shr $6,%rdi andq $-64,%r9 salq $8,%rdi sub %r9,%rdx lea 128(%r12,%rdi),%rsi add %r12,%rdi call minmax_vector jmp .L180 .L176: xor %r10d,%r10d cmp $16,%rbx jne .L181 xor %r9d,%r9d .L180: lea 31(%r9),%rax .L179: cmp %r13,%rax jge .L204 vmovdqu -124(%r12,%rax,4),%ymm6 vpminsd -60(%r12,%rax,4),%ymm6,%ymm5 vpmaxsd -60(%r12,%rax,4),%ymm6,%ymm0 vmovdqu -92(%r12,%rax,4),%ymm6 vpminsd -28(%r12,%rax,4),%ymm6,%ymm1 vpmaxsd -28(%r12,%rax,4),%ymm6,%ymm2 vpminsd %ymm1,%ymm5,%ymm3 vpminsd %ymm2,%ymm0,%ymm4 vpmaxsd %ymm1,%ymm5,%ymm5 vpmaxsd %ymm2,%ymm0,%ymm0 vperm2i128 $32,%ymm0,%ymm4,%ymm2 vperm2i128 $32,%ymm5,%ymm3,%ymm1 vperm2i128 $49,%ymm0,%ymm4,%ymm0 vperm2i128 $49,%ymm5,%ymm3,%ymm3 vpminsd %ymm0,%ymm2,%ymm4 vpmaxsd %ymm0,%ymm2,%ymm0 vpminsd %ymm3,%ymm1,%ymm5 vpmaxsd %ymm3,%ymm1,%ymm1 vperm2i128 $32,%ymm0,%ymm4,%ymm2 vperm2i128 $32,%ymm1,%ymm5,%ymm3 vperm2i128 $49,%ymm0,%ymm4,%ymm4 vperm2i128 $49,%ymm1,%ymm5,%ymm5 vpunpcklqdq %ymm5,%ymm3,%ymm1 vpunpcklqdq %ymm4,%ymm2,%ymm0 vpunpckhqdq %ymm5,%ymm3,%ymm3 vpunpckhqdq %ymm4,%ymm2,%ymm2 vpminsd %ymm3,%ymm1,%ymm5 vpmaxsd %ymm3,%ymm1,%ymm1 vpminsd %ymm2,%ymm0,%ymm4 vpmaxsd %ymm2,%ymm0,%ymm0 vpunpckldq %ymm1,%ymm5,%ymm3 vpunpckldq %ymm0,%ymm4,%ymm2 vpunpckhdq %ymm1,%ymm5,%ymm5 vpunpckhdq %ymm0,%ymm4,%ymm4 vpunpcklqdq %ymm5,%ymm3,%ymm1 vpunpcklqdq %ymm4,%ymm2,%ymm0 vpunpckhqdq %ymm5,%ymm3,%ymm3 vpunpckhqdq %ymm4,%ymm2,%ymm2 vpminsd %ymm3,%ymm1,%ymm4 vpmaxsd %ymm3,%ymm1,%ymm1 vpminsd %ymm2,%ymm0,%ymm3 vpmaxsd %ymm2,%ymm0,%ymm0 vpunpckldq %ymm1,%ymm4,%ymm5 vpunpckldq %ymm0,%ymm3,%ymm2 vpunpckhdq %ymm1,%ymm4,%ymm1 vpunpckhdq %ymm0,%ymm3,%ymm0 vmovdqu %ymm5,-124(%r12,%rax,4) vmovdqu %ymm1,-92(%r12,%rax,4) vmovdqu %ymm2,-60(%r12,%rax,4) vmovdqu %ymm0,-28(%r12,%rax,4) add $32,%rax jmp .L179 .L204: mov %r13,%r10 xor %edx,%edx lea 0(,%r9,4),%rax sub %r9,%r10 mov %r10,%rdi andq $-32,%r10 shr $5,%rdi cmp %r9,%r13 cmovl %rdx,%r10 salq $7,%rdi add %r9,%r10 cmp %r9,%r13 cmovl %rdx,%rdi lea -16(%r13),%rdx sub %r10,%rdx lea 64(%rax,%rdi),%rsi add %rax,%rdi add %r12,%rsi add %r12,%rdi call minmax_vector .L181: lea 15(%r10),%rax .L183: cmp %r13,%rax jge .L205 vmovdqu -60(%r12,%rax,4),%ymm6 vpmaxsd -28(%r12,%rax,4),%ymm6,%ymm2 vpminsd -28(%r12,%rax,4),%ymm6,%ymm1 vperm2i128 $32,%ymm2,%ymm1,%ymm0 vperm2i128 $49,%ymm2,%ymm1,%ymm1 vpminsd %ymm1,%ymm0,%ymm2 vpmaxsd %ymm1,%ymm0,%ymm0 vperm2i128 $32,%ymm0,%ymm2,%ymm1 vperm2i128 $49,%ymm0,%ymm2,%ymm2 vpunpcklqdq %ymm2,%ymm1,%ymm0 vpunpckhqdq %ymm2,%ymm1,%ymm1 vpminsd %ymm1,%ymm0,%ymm2 vpmaxsd %ymm1,%ymm0,%ymm0 vpunpckldq %ymm0,%ymm2,%ymm1 vpunpckhdq %ymm0,%ymm2,%ymm2 vpunpcklqdq %ymm2,%ymm1,%ymm0 vpunpckhqdq %ymm2,%ymm1,%ymm1 vpminsd %ymm1,%ymm0,%ymm2 vpmaxsd %ymm1,%ymm0,%ymm0 vpunpckldq %ymm0,%ymm2,%ymm1 vpunpckhdq %ymm0,%ymm2,%ymm0 vmovdqu %ymm1,-60(%r12,%rax,4) vmovdqu %ymm0,-28(%r12,%rax,4) add $16,%rax jmp .L183 .L205: mov %r13,%r9 xor %edx,%edx lea 0(,%r10,4),%rcx sub %r10,%r9 mov %r9,%rax andq $-16,%r9 shr $4,%rax cmp %r10,%r13 cmovl %rdx,%r9 salq $6,%rax add %r10,%r9 cmp %r10,%r13 cmovl %rdx,%rax lea -8(%r13),%rdx sub %r9,%rdx lea (%rax,%rcx),%r10 lea 32(%rcx,%rax),%rsi add %r12,%r10 add %r12,%rsi mov %r10,%rdi call minmax_vector lea 7(%r9),%rax cmp %r13,%rax jge .L185 lea 16(,%r9,4),%rax mov (%r10),%ecx add $8,%r9 lea -12(%r12,%rax),%r14 lea (%r12,%rax),%rbx lea 4(%r12,%rax),%r11 mov (%rbx),%edx lea 8(%r12,%rax),%r8 cmp %edx,%ecx mov %ecx,%esi cmovg %edx,%ecx cmovg %esi,%edx mov %ecx,(%r10) mov %edx,(%rbx) mov (%r14),%ecx mov (%r11),%edx cmp %edx,%ecx mov %ecx,%esi cmovg %edx,%ecx cmovg %esi,%edx lea -8(%r12,%rax),%rsi mov %ecx,(%r14) mov %edx,(%r11) mov (%rsi),%ecx mov (%r8),%edx cmp %edx,%ecx mov %ecx,%edi cmovg %edx,%ecx cmovg %edi,%edx lea 12(%r12,%rax),%rdi mov %ecx,(%rsi) lea -4(%r12,%rax),%rcx mov %edx,(%r8) mov (%rcx),%edx mov (%rdi),%eax cmp %eax,%edx mov %edx,%r15d cmovg %eax,%edx cmovg %r15d,%eax mov %edx,(%rcx) mov %eax,(%rdi) mov (%r10),%edx mov (%rsi),%eax cmp %eax,%edx mov %edx,%r15d cmovg %eax,%edx cmovg %r15d,%eax mov %edx,(%r10) mov %eax,(%rsi) mov (%rcx),%eax mov (%r14),%edx cmp %eax,%edx mov %edx,%r15d cmovg %eax,%edx cmovg %r15d,%eax mov %edx,(%r14) mov %eax,(%rcx) mov (%r10),%edx mov (%r14),%eax cmp %eax,%edx mov %edx,%r15d cmovg %eax,%edx cmovg %r15d,%eax mov %edx,(%r10) mov %eax,(%r14) mov (%rsi),%edx mov (%rcx),%eax cmp %eax,%edx mov %edx,%r10d cmovg %eax,%edx cmovg %r10d,%eax mov %edx,(%rsi) mov %eax,(%rcx) mov (%rbx),%edx mov (%r8),%esi mov (%rdi),%ecx cmp %esi,%edx mov %edx,%eax cmovg %esi,%edx cmovg %eax,%esi mov (%r11),%eax cmp %ecx,%eax mov %eax,%r10d cmovg %ecx,%eax cmovg %r10d,%ecx cmp %eax,%edx mov %edx,%r10d cmovg %eax,%edx cmovg %r10d,%eax mov %edx,(%rbx) mov %esi,%edx mov %eax,(%r11) mov %ecx,%eax cmp %eax,%edx mov %edx,%ecx cmovg %eax,%edx cmovg %ecx,%eax mov %edx,(%r8) mov %eax,(%rdi) .L185: lea 4(%r9),%r10 lea -4(%r13),%rdx lea 0(,%r10,4),%rbx sub %r9,%rdx lea -16(%r12,%rbx),%r11 lea (%r12,%rbx),%rsi mov %r11,%rdi call minmax_vector lea 3(%r9),%rax cmp %r13,%rax jge .L186 lea -8(%r12,%rbx),%rcx mov (%r11),%edx lea -12(%r12,%rbx),%rdi mov %r10,%r9 mov (%rcx),%eax cmp %eax,%edx mov %edx,%esi cmovg %eax,%edx cmovg %esi,%eax lea -4(%r12,%rbx),%rsi mov %edx,(%r11) mov %eax,(%rcx) mov (%rdi),%edx mov (%rsi),%eax cmp %eax,%edx mov %edx,%r8d cmovg %eax,%edx cmovg %r8d,%eax mov %edx,(%rdi) mov %eax,(%rsi) mov (%rdi),%eax mov (%r11),%edx cmp %eax,%edx mov %edx,%r8d cmovg %eax,%edx cmovg %r8d,%eax mov %edx,(%r11) mov %eax,(%rdi) mov (%rcx),%edx mov (%rsi),%eax cmp %eax,%edx mov %edx,%edi cmovg %eax,%edx cmovg %edi,%eax mov %edx,(%rcx) mov %eax,(%rsi) .L186: lea 2(%r9),%rax cmp %r13,%rax jge .L187 lea 0(,%r9,4),%rax lea (%r12,%rax),%rsi lea 8(%r12,%rax),%rcx mov (%rsi),%edx mov (%rcx),%eax cmp %eax,%edx mov %edx,%edi cmovg %eax,%edx cmovg %edi,%eax mov %edx,(%rsi) mov %eax,(%rcx) .L187: lea 1(%r9),%rax cmp %r13,%rax jge .L147 salq $2,%r9 lea (%r12,%r9),%rsi lea 4(%r12,%r9),%rcx mov (%rsi),%edx mov (%rcx),%eax cmp %eax,%edx mov %edx,%edi cmovg %eax,%edx cmovg %edi,%eax mov %edx,(%rsi) mov %eax,(%rcx) .L147: lea -40(%rbp),%rsp pop %rbx pop %r12 pop %r13 pop %r14 pop %r15 pop %rbp ret .endfn djbsort_avx2,globl,hidden minmax_vector: cmp $7,%rdx jg .L13 .L2: test %rdx,%rdx jle .L15 mov (%rdi),%ecx mov (%rsi),%eax add $4,%rdi add $4,%rsi cmp %eax,%ecx mov %ecx,%r8d cmovg %eax,%ecx cmovg %r8d,%eax decq %rdx mov %ecx,-4(%rdi) mov %eax,-4(%rsi) jmp .L2 .L15: ret .L13: testb $7,%dl je .L6 lea -32(,%rdx,4),%rax andq $-8,%rdx lea (%rdi,%rax),%rcx add %rsi,%rax vmovdqu (%rax),%ymm2 vpminsd (%rcx),%ymm2,%ymm1 vpmaxsd (%rcx),%ymm2,%ymm0 vmovdqu %ymm1,(%rcx) vmovdqu %ymm0,(%rax) .L6: xor %eax,%eax .L7: vmovdqu (%rdi,%rax),%ymm4 vpminsd (%rsi,%rax),%ymm4,%ymm1 vpmaxsd (%rsi,%rax),%ymm4,%ymm0 vmovdqu %ymm1,(%rdi,%rax) vmovdqu %ymm0,(%rsi,%rax) add $32,%rax sub $8,%rdx jne .L7 ret .endfn minmax_vector int32_twostages_32: sub $-128,%rdi .L17: lea -128(%rdi),%rax test %rsi,%rsi jle .L21 .L18: vmovdqu (%rax),%ymm5 vmovdqu 128(%rax),%ymm7 add $32,%rax vpminsd 352(%rax),%ymm7,%ymm3 vpminsd 224(%rax),%ymm5,%ymm2 vpmaxsd 224(%rax),%ymm5,%ymm0 vpmaxsd 352(%rax),%ymm7,%ymm1 vpminsd %ymm3,%ymm2,%ymm4 vpmaxsd %ymm3,%ymm2,%ymm2 vpminsd %ymm1,%ymm0,%ymm3 vpmaxsd %ymm1,%ymm0,%ymm0 vmovdqu %ymm4,-32(%rax) vmovdqu %ymm2,96(%rax) vmovdqu %ymm3,224(%rax) vmovdqu %ymm0,352(%rax) cmp %rax,%rdi jne .L18 add $-128,%rsi add $512,%rdi jmp .L17 .L21: ret .endfn int32_twostages_32 int32_threestages: push %rbp imul $-24,%rdx,%r8 lea 0(,%rdx,8),%rax mov %rsp,%rbp push %r15 push %r14 push %r13 push %r12 push %rbx andq $-32,%rsp sub $64,%rsp mov %rax,56(%rsp) lea 0(,%rdx,4),%rax lea (%rdi,%rax),%rcx mov %rsi,8(%rsp) lea (%rcx,%rax),%rsi lea (%rsi,%rax),%r9 lea (%r9,%rax),%r11 lea (%r11,%rax),%r12 lea (%r12,%rax),%r14 lea (%r14,%rax),%r15 lea (%r15,%r8),%rbx mov %rbx,40(%rsp) add %rax,%rbx lea (%rbx,%rax),%r10 mov %rbx,32(%rsp) lea (%r10,%rax),%rbx lea (%rbx,%rax),%r13 lea 0(%r13,%rax),%r8 mov %r8,24(%rsp) add %r8,%rax mov %rax,16(%rsp) xor %eax,%eax .L23: mov 56(%rsp),%r8 add %rax,%r8 mov %r8,48(%rsp) cmp 8(%rsp),%r8 jg .L28 .L25: cmp %rdx,%rax jge .L29 vmovdqu (%rdi,%rax,4),%ymm3 vmovdqu (%rsi,%rax,4),%ymm6 vpminsd (%r11,%rax,4),%ymm3,%ymm7 vpmaxsd (%r11,%rax,4),%ymm3,%ymm4 vpmaxsd (%r14,%rax,4),%ymm6,%ymm0 vmovdqu (%rcx,%rax,4),%ymm3 vmovdqu (%rsi,%rax,4),%ymm5 vpminsd (%r12,%rax,4),%ymm3,%ymm2 vpmaxsd (%r12,%rax,4),%ymm3,%ymm1 vpminsd (%r14,%rax,4),%ymm5,%ymm5 vmovdqu (%r9,%rax,4),%ymm3 vpminsd (%r15,%rax,4),%ymm3,%ymm6 vpmaxsd (%r15,%rax,4),%ymm3,%ymm3 vpminsd %ymm5,%ymm7,%ymm8 mov 40(%rsp),%r8 vpmaxsd %ymm5,%ymm7,%ymm5 vpminsd %ymm6,%ymm2,%ymm7 vpminsd %ymm7,%ymm8,%ymm9 vpmaxsd %ymm6,%ymm2,%ymm2 vpminsd %ymm0,%ymm4,%ymm6 vpmaxsd %ymm0,%ymm4,%ymm0 vmovdqu %ymm9,(%rdi,%rax,4) vpminsd %ymm3,%ymm1,%ymm4 vpmaxsd %ymm3,%ymm1,%ymm1 vpmaxsd %ymm7,%ymm8,%ymm3 vpminsd %ymm2,%ymm5,%ymm7 vmovdqu %ymm3,(%r8,%rax,4) mov 32(%rsp),%r8 vpmaxsd %ymm2,%ymm5,%ymm2 vpminsd %ymm4,%ymm6,%ymm5 vpmaxsd %ymm4,%ymm6,%ymm6 vpminsd %ymm1,%ymm0,%ymm4 vmovdqu %ymm7,(%r8,%rax,4) mov 24(%rsp),%r8 vpmaxsd %ymm1,%ymm0,%ymm0 vmovdqu %ymm2,(%r10,%rax,4) vmovdqu %ymm5,(%rbx,%rax,4) vmovdqu %ymm6,0(%r13,%rax,4) vmovdqu %ymm4,(%r8,%rax,4) mov 16(%rsp),%r8 vmovdqu %ymm0,(%r8,%rax,4) add $8,%rax jmp .L25 .L29: mov 48(%rsp),%rax add 56(%rsp),%rdx jmp .L23 .L28: lea -40(%rbp),%rsp pop %rbx pop %r12 pop %r13 pop %r14 pop %r15 pop %rbp ret .endfn int32_threestages merge16_finish: vpminsd %ymm1,%ymm0,%ymm3 vpmaxsd %ymm1,%ymm0,%ymm0 vperm2i128 $32,%ymm0,%ymm3,%ymm2 vperm2i128 $49,%ymm0,%ymm3,%ymm0 vpminsd %ymm0,%ymm2,%ymm1 vpmaxsd %ymm0,%ymm2,%ymm0 vpunpcklqdq %ymm0,%ymm1,%ymm2 vpunpckhqdq %ymm0,%ymm1,%ymm0 vpminsd %ymm0,%ymm2,%ymm1 vpmaxsd %ymm0,%ymm2,%ymm2 vpunpckldq %ymm2,%ymm1,%ymm0 vpunpckhdq %ymm2,%ymm1,%ymm1 vpunpcklqdq %ymm1,%ymm0,%ymm3 vpunpckhqdq %ymm1,%ymm0,%ymm0 vpminsd %ymm3,%ymm0,%ymm2 vpmaxsd %ymm3,%ymm0,%ymm0 vpunpckldq %ymm0,%ymm2,%ymm1 vpunpckhdq %ymm0,%ymm2,%ymm0 vperm2i128 $32,%ymm0,%ymm1,%ymm2 vperm2i128 $49,%ymm0,%ymm1,%ymm0 test %esi,%esi je .L31 vpcmpeqd %ymm1,%ymm1,%ymm1 vpxor %ymm1,%ymm2,%ymm2 vpxor %ymm1,%ymm0,%ymm0 .L31: vmovdqu %ymm2,(%rdi) vmovdqu %ymm0,32(%rdi) ret .endfn merge16_finish int32_sort_2power: push %r13 lea 16(%rsp),%r13 andq $-32,%rsp push -8(%r13) push %rbp mov %rsp,%rbp push %r15 push %r14 push %r13 push %r12 mov %rdi,%r12 push %rbx sub $264,%rsp mov %edx,-116(%rbp) cmp $8,%rsi jne .L36 mov 4(%rdi),%edx mov (%rdi),%r8d mov 8(%rdi),%ecx mov 28(%r12),%r9d cmp %r8d,%edx mov %edx,%eax cmovg %r8d,%edx cmovg %eax,%r8d mov 12(%rdi),%eax cmp %ecx,%eax mov %eax,%esi cmovg %ecx,%eax cmovg %esi,%ecx cmp %r8d,%ecx mov %ecx,%esi cmovg %r8d,%ecx cmovg %esi,%r8d cmp %edx,%eax mov %eax,%esi cmovg %edx,%eax cmovg %esi,%edx mov 20(%rdi),%esi mov %edx,%r10d mov 16(%rdi),%edi cmp %r10d,%ecx mov %ecx,%edx cmovg %r10d,%ecx cmovg %edx,%r10d cmp %edi,%esi mov %esi,%edx cmovg %edi,%esi cmovg %edx,%edi mov 24(%r12),%edx cmp %edx,%r9d mov %r9d,%r11d cmovg %edx,%r9d cmovg %r11d,%edx cmp %edi,%edx mov %edx,%r11d cmovg %edi,%edx cmovg %r11d,%edi cmp %esi,%r9d mov %r9d,%r11d cmovg %esi,%r9d cmovg %r11d,%esi cmp %esi,%edx mov %edx,%r11d cmovg %esi,%edx cmovg %r11d,%esi cmp %r8d,%edi mov %edi,%r11d cmovg %r8d,%edi cmovg %r11d,%r8d cmp %ecx,%edx mov %edx,%r11d cmovg %ecx,%edx cmovg %r11d,%ecx mov %r8d,(%r12) cmp %ecx,%edi mov %edi,%r11d cmovg %ecx,%edi cmovg %r11d,%ecx cmp %r10d,%esi mov %esi,%r11d cmovg %r10d,%esi cmovg %r11d,%r10d cmp %eax,%r9d mov %r9d,%r11d cmovg %eax,%r9d cmovg %r11d,%eax cmp %eax,%esi mov %esi,%r11d cmovg %eax,%esi cmovg %r11d,%eax mov %r9d,28(%r12) cmp %r10d,%ecx mov %ecx,%r11d cmovg %r10d,%ecx cmovg %r11d,%r10d cmp %eax,%edi mov %edi,%r11d cmovg %eax,%edi cmovg %r11d,%eax mov %r10d,4(%r12) cmp %esi,%edx mov %edx,%r11d cmovg %esi,%edx cmovg %r11d,%esi mov %ecx,8(%r12) mov %eax,12(%r12) mov %edi,16(%r12) mov %esi,20(%r12) mov %edx,24(%r12) jmp .L35 .L36: mov %rsi,%r15 cmp $16,%rsi jne .L38 vmovdqa .LC0(%rip),%ymm0 vpxor 32(%rdi),%ymm0,%ymm2 vpxor (%rdi),%ymm0,%ymm0 vmovdqa .LC1(%rip),%ymm4 cmpl $0,-116(%rbp) vpunpckldq %ymm2,%ymm0,%ymm1 vpunpckhdq %ymm2,%ymm0,%ymm0 vpunpcklqdq %ymm0,%ymm1,%ymm3 vpunpckhqdq %ymm0,%ymm1,%ymm1 vpminsd %ymm3,%ymm1,%ymm2 vpmaxsd %ymm3,%ymm1,%ymm1 vpxor %ymm4,%ymm2,%ymm2 vpxor %ymm4,%ymm1,%ymm1 vpunpckldq %ymm1,%ymm2,%ymm0 vpunpckhdq %ymm1,%ymm2,%ymm1 vpminsd %ymm1,%ymm0,%ymm3 vpmaxsd %ymm1,%ymm0,%ymm1 vpunpcklqdq %ymm1,%ymm3,%ymm2 vpunpckhqdq %ymm1,%ymm3,%ymm3 vpunpckldq %ymm3,%ymm2,%ymm1 vpunpckhdq %ymm3,%ymm2,%ymm2 vpunpcklqdq %ymm2,%ymm1,%ymm0 vpunpckhqdq %ymm2,%ymm1,%ymm1 vpminsd %ymm0,%ymm1,%ymm2 vpmaxsd %ymm0,%ymm1,%ymm1 vpunpckldq %ymm1,%ymm2,%ymm0 vpunpckhdq %ymm1,%ymm2,%ymm1 vpxor %ymm4,%ymm1,%ymm1 vpxor %ymm4,%ymm0,%ymm0 vperm2i128 $32,%ymm1,%ymm0,%ymm3 vperm2i128 $49,%ymm1,%ymm0,%ymm0 vpminsd %ymm3,%ymm0,%ymm2 vpmaxsd %ymm3,%ymm0,%ymm0 vperm2i128 $32,%ymm0,%ymm2,%ymm1 vperm2i128 $49,%ymm0,%ymm2,%ymm0 vpminsd %ymm1,%ymm0,%ymm3 vpmaxsd %ymm1,%ymm0,%ymm2 vpunpcklqdq %ymm2,%ymm3,%ymm1 vpunpckhqdq %ymm2,%ymm3,%ymm2 vpunpckldq %ymm2,%ymm1,%ymm0 vpunpckhdq %ymm2,%ymm1,%ymm2 vpunpcklqdq %ymm2,%ymm0,%ymm1 vpunpckhqdq %ymm2,%ymm0,%ymm0 vpminsd %ymm1,%ymm0,%ymm2 vpmaxsd %ymm1,%ymm0,%ymm0 vpunpckldq %ymm0,%ymm2,%ymm1 vpunpckhdq %ymm0,%ymm2,%ymm0 vpunpcklqdq %ymm0,%ymm1,%ymm2 vpunpckhqdq %ymm0,%ymm1,%ymm1 vpcmpeqd %ymm0,%ymm0,%ymm0 je .L39 vpxor %ymm0,%ymm1,%ymm1 jmp .L40 .L39: vpxor %ymm0,%ymm2,%ymm2 .L40: mov -116(%rbp),%esi vmovdqa %ymm2,%ymm0 mov %r12,%rdi jmp .L134 .L38: cmp $32,%rsi jne .L41 mov $1,%edx mov $16,%esi lea 64(%r12),%r13 call int32_sort_2power xor %edx,%edx mov $16,%esi mov %r13,%rdi call int32_sort_2power cmpl $0,-116(%rbp) vmovdqu (%r12),%ymm4 vmovdqu 32(%r12),%ymm1 vmovdqu 64(%r12),%ymm2 vmovdqu 96(%r12),%ymm3 je .L42 vpcmpeqd %ymm0,%ymm0,%ymm0 vpxor %ymm0,%ymm4,%ymm4 vpxor %ymm0,%ymm1,%ymm1 vpxor %ymm0,%ymm2,%ymm2 vpxor %ymm0,%ymm3,%ymm3 .L42: mov -116(%rbp),%esi vpmaxsd %ymm1,%ymm3,%ymm5 vpminsd %ymm4,%ymm2,%ymm0 mov %r12,%rdi vpmaxsd %ymm4,%ymm2,%ymm4 vpminsd %ymm1,%ymm3,%ymm1 vmovdqa %ymm5,-80(%rbp) vmovdqa %ymm4,-112(%rbp) call merge16_finish vmovdqa -80(%rbp),%ymm5 mov -116(%rbp),%esi mov %r13,%rdi vmovdqa -112(%rbp),%ymm4 vmovdqa %ymm5,%ymm1 vmovdqa %ymm4,%ymm0 .L134: add $264,%rsp pop %rbx pop %r12 pop %r13 pop %r14 pop %r15 pop %rbp lea -16(%r13),%rsp pop %r13 jmp merge16_finish .L41: mov %rsi,%rax sar $3,%rax mov %rax,-80(%rbp) lea 0(,%rax,4),%r13 salq $3,%rax imul $-20,-80(%rbp),%rdx lea (%rdi,%rax),%rdi lea (%rdi,%rax),%rsi lea (%rsi,%rax),%rcx add %rcx,%rdx lea (%rdx,%rax),%r9 lea (%r9,%rax),%r8 add %r8,%rax mov %rax,-136(%rbp) mov %rax,%r10 xor %eax,%eax .L43: cmp -80(%rbp),%rax jge .L135 add $32,%rdi add $32,%rsi add $32,%rcx add $32,%rdx vmovdqu (%r12,%rax,4),%ymm5 add $32,%r9 add $32,%r8 add $32,%r10 vpminsd -32(%rsi),%ymm5,%ymm4 vpmaxsd -32(%rsi),%ymm5,%ymm2 vmovdqu -32(%rdi),%ymm5 vpminsd -32(%rcx),%ymm5,%ymm1 vpmaxsd -32(%rcx),%ymm5,%ymm0 vpminsd %ymm2,%ymm0,%ymm3 vpmaxsd %ymm2,%ymm0,%ymm0 vpminsd %ymm4,%ymm1,%ymm2 vpmaxsd %ymm4,%ymm1,%ymm1 vmovdqu %ymm0,(%r12,%rax,4) add $8,%rax vpminsd %ymm1,%ymm3,%ymm4 vpmaxsd %ymm1,%ymm3,%ymm1 vmovdqu %ymm4,-32(%rdi) vmovdqu %ymm1,-32(%rsi) vmovdqu %ymm2,-32(%rcx) vmovdqu -32(%r8),%ymm5 vmovdqu -32(%r10),%ymm6 vpminsd -32(%rdx),%ymm5,%ymm1 vpminsd -32(%r9),%ymm6,%ymm3 vpmaxsd -32(%r9),%ymm6,%ymm2 vpmaxsd -32(%rdx),%ymm5,%ymm0 vpminsd %ymm3,%ymm1,%ymm4 vpmaxsd %ymm3,%ymm1,%ymm1 vpminsd %ymm2,%ymm0,%ymm3 vpmaxsd %ymm2,%ymm0,%ymm0 vmovdqu %ymm4,-32(%rdx) vpminsd %ymm1,%ymm3,%ymm2 vpmaxsd %ymm1,%ymm3,%ymm1 vmovdqu %ymm1,-32(%r9) vmovdqu %ymm2,-32(%r8) vmovdqu %ymm0,-32(%r10) jmp .L43 .L135: imul $-24,-80(%rbp),%rax mov %rax,-128(%rbp) cmp $127,%r15 jg .L105 .L63: lea (%r12,%r15,4),%rax vmovdqa .LC1(%rip),%ymm10 movl $3,-272(%rbp) mov $4,%r14d mov %rax,-144(%rbp) mov %r15,%rax vmovdqa .LC3(%rip),%ymm11 sar $4,%rax vmovdqa .LC2(%rip),%ymm12 mov %rax,-112(%rbp) mov -136(%rbp),%rax add -128(%rbp),%rax mov %rax,-200(%rbp) add %r13,%rax mov %rax,-192(%rbp) add %r13,%rax mov %rax,-184(%rbp) add %r13,%rax mov %rax,-176(%rbp) add %r13,%rax mov %rax,-168(%rbp) add %r13,%rax mov %rax,-160(%rbp) add %r13,%rax mov %rax,-152(%rbp) jmp .L46 .L105: xor %eax,%eax vpcmpeqd %ymm0,%ymm0,%ymm0 .L45: vpxor 64(%r12,%rax,4),%ymm0,%ymm1 vpxor (%r12,%rax,4),%ymm0,%ymm2 vmovdqu %ymm1,64(%r12,%rax,4) vmovdqu %ymm2,(%r12,%rax,4) add $32,%rax cmp %rax,%r15 jg .L45 mov -136(%rbp),%r14 add -128(%rbp),%r14 mov $8,%ebx vpcmpeqd %ymm10,%ymm10,%ymm10 lea (%r14,%r13),%rax mov %rax,-296(%rbp) add %r13,%rax lea (%rax,%r13),%r11 mov %rax,-176(%rbp) lea (%r11,%r13),%rax mov %rax,-288(%rbp) add %r13,%rax mov %rax,-144(%rbp) add %r13,%rax mov %rax,-112(%rbp) add -128(%rbp),%rax mov %rax,-200(%rbp) add %r13,%rax mov %rax,-192(%rbp) add %r13,%rax mov %rax,-184(%rbp) add %r13,%rax mov %rax,-168(%rbp) add %r13,%rax mov %rax,-160(%rbp) add %r13,%rax mov %rax,-152(%rbp) add %r13,%rax mov %rax,-280(%rbp) .L64: mov %rbx,%rcx sarq %rcx .L47: cmp $127,%rcx jle .L136 mov %rcx,%rdx mov %r15,%rsi mov %r12,%rdi mov %r11,-272(%rbp) sar $2,%rdx mov %rcx,-240(%rbp) call int32_threestages mov -240(%rbp),%rcx mov -272(%rbp),%r11 vpcmpeqd %ymm10,%ymm10,%ymm10 sar $3,%rcx jmp .L47 .L136: cmp $64,%rcx jne .L49 mov %r15,%rsi mov %r12,%rdi mov %r11,-240(%rbp) call int32_twostages_32 mov -240(%rbp),%r11 vpcmpeqd %ymm10,%ymm10,%ymm10 .L54: xor %eax,%eax jmp .L50 .L49: cmp $32,%rcx jne .L51 mov %r12,%rax xor %edx,%edx .L52: vmovdqu (%rax),%ymm7 vmovdqu 32(%rax),%ymm5 add $64,%rdx add $256,%rax vpminsd -128(%rax),%ymm7,%ymm8 vpmaxsd -128(%rax),%ymm7,%ymm4 vpminsd -96(%rax),%ymm5,%ymm1 vpmaxsd -96(%rax),%ymm5,%ymm0 vmovdqu -192(%rax),%ymm6 vmovdqu -160(%rax),%ymm7 vpminsd -64(%rax),%ymm6,%ymm5 vpmaxsd -32(%rax),%ymm7,%ymm2 vpmaxsd -64(%rax),%ymm6,%ymm3 vmovdqu -160(%rax),%ymm6 vpminsd -32(%rax),%ymm6,%ymm6 vpminsd %ymm5,%ymm8,%ymm7 vpmaxsd %ymm5,%ymm8,%ymm5 vpminsd %ymm6,%ymm1,%ymm8 vpmaxsd %ymm6,%ymm1,%ymm1 vpminsd %ymm3,%ymm4,%ymm6 vpmaxsd %ymm3,%ymm4,%ymm3 vpminsd %ymm2,%ymm0,%ymm4 vpmaxsd %ymm2,%ymm0,%ymm0 vpminsd %ymm8,%ymm7,%ymm9 vpmaxsd %ymm8,%ymm7,%ymm2 vpminsd %ymm1,%ymm5,%ymm7 vpmaxsd %ymm1,%ymm5,%ymm1 vmovdqu %ymm9,-256(%rax) vpminsd %ymm4,%ymm6,%ymm5 vpmaxsd %ymm4,%ymm6,%ymm6 vmovdqu %ymm2,-224(%rax) vpminsd %ymm0,%ymm3,%ymm4 vpmaxsd %ymm0,%ymm3,%ymm3 vmovdqu %ymm5,-128(%rax) vmovdqu %ymm7,-192(%rax) vmovdqu %ymm1,-160(%rax) vmovdqu %ymm6,-96(%rax) vmovdqu %ymm4,-64(%rax) vmovdqu %ymm3,-32(%rax) cmp %rdx,%r15 jg .L52 .L56: lea (%rbx,%rbx),%rdx xor %ecx,%ecx cmp -80(%rbp),%rdx setne %al sete %cl mov %rdx,%r8 xor %esi,%esi movzbl %al,%eax mov %eax,-204(%rbp) jmp .L53 .L51: cmp $16,%rcx jne .L131 jmp .L54 .L50: vmovdqu (%r12,%rax,4),%ymm5 vmovdqu 32(%r12,%rax,4),%ymm6 vpminsd 64(%r12,%rax,4),%ymm5,%ymm2 vpminsd 96(%r12,%rax,4),%ymm6,%ymm3 vpmaxsd 64(%r12,%rax,4),%ymm5,%ymm0 vpmaxsd 96(%r12,%rax,4),%ymm6,%ymm1 vpminsd %ymm3,%ymm2,%ymm4 vpmaxsd %ymm3,%ymm2,%ymm2 vpminsd %ymm1,%ymm0,%ymm3 vpmaxsd %ymm1,%ymm0,%ymm0 vmovdqu %ymm4,(%r12,%rax,4) vmovdqu %ymm2,32(%r12,%rax,4) vmovdqu %ymm3,64(%r12,%rax,4) vmovdqu %ymm0,96(%r12,%rax,4) add $32,%rax cmp %rax,%r15 jg .L50 jmp .L56 .L131: cmp $8,%rcx jne .L56 xor %eax,%eax .L57: vmovdqu 32(%r12,%rax,4),%ymm7 vpmaxsd (%r12,%rax,4),%ymm7,%ymm0 vpminsd (%r12,%rax,4),%ymm7,%ymm1 vmovdqu %ymm0,32(%r12,%rax,4) vmovdqu %ymm1,(%r12,%rax,4) add $16,%rax cmp %rax,%r15 jg .L57 jmp .L56 .L59: mov -176(%rbp),%r10 vmovdqu (%r12,%rax,4),%ymm5 vpminsd (%r14,%rax,4),%ymm5,%ymm6 vpmaxsd (%r14,%rax,4),%ymm5,%ymm15 vmovdqu (%r10,%rax,4),%ymm5 mov -296(%rbp),%r10 vmovdqu (%r10,%rax,4),%ymm7 mov -288(%rbp),%r10 vmovdqa %ymm5,-240(%rbp) vmovdqa %ymm7,-272(%rbp) vmovdqu (%r10,%rax,4),%ymm7 mov -112(%rbp),%r10 vmovdqa -272(%rbp),%ymm5 vpminsd -240(%rbp),%ymm5,%ymm1 vpmaxsd -240(%rbp),%ymm5,%ymm5 vmovdqa %ymm7,-240(%rbp) vmovdqa -240(%rbp),%ymm4 vpmaxsd (%r11,%rax,4),%ymm4,%ymm0 vmovdqu (%r10,%rax,4),%ymm4 vpminsd %ymm1,%ymm6,%ymm8 mov -144(%rbp),%r10 vmovdqa -240(%rbp),%ymm7 vpmaxsd %ymm1,%ymm6,%ymm6 vpminsd %ymm5,%ymm15,%ymm1 vmovdqa %ymm4,-240(%rbp) vpminsd (%r11,%rax,4),%ymm7,%ymm7 vpmaxsd %ymm5,%ymm15,%ymm15 vmovdqu (%r10,%rax,4),%ymm4 vmovdqa %ymm4,-272(%rbp) vmovdqa -272(%rbp),%ymm4 vpminsd -240(%rbp),%ymm4,%ymm3 vpmaxsd -240(%rbp),%ymm4,%ymm4 vpminsd %ymm3,%ymm7,%ymm2 vpmaxsd %ymm3,%ymm7,%ymm3 vpminsd %ymm4,%ymm0,%ymm7 vpmaxsd %ymm4,%ymm0,%ymm0 vpminsd %ymm2,%ymm8,%ymm14 vpminsd %ymm7,%ymm1,%ymm13 vpminsd %ymm3,%ymm6,%ymm12 vpminsd %ymm0,%ymm15,%ymm11 vmovdqa %ymm14,%ymm9 vpmaxsd %ymm3,%ymm6,%ymm6 vpmaxsd %ymm2,%ymm8,%ymm2 vmovdqa %ymm13,%ymm8 vpmaxsd %ymm7,%ymm1,%ymm1 vpmaxsd %ymm0,%ymm15,%ymm0 vmovdqa %ymm6,-240(%rbp) vmovdqa %ymm2,%ymm5 vmovdqa -240(%rbp),%ymm3 vmovdqa %ymm1,%ymm4 vmovdqa %ymm12,%ymm7 vmovdqa %ymm11,%ymm6 vmovdqa %ymm0,%ymm15 test %ecx,%ecx je .L58 vpxor %ymm14,%ymm10,%ymm9 vpxor %ymm13,%ymm10,%ymm8 vpxor %ymm12,%ymm10,%ymm7 vpxor %ymm11,%ymm10,%ymm6 vpxor %ymm2,%ymm10,%ymm5 vpxor %ymm1,%ymm10,%ymm4 vpxor %ymm3,%ymm10,%ymm3 vpxor %ymm0,%ymm10,%ymm15 .L58: mov -200(%rbp),%r10 vmovdqu %ymm9,(%r12,%rax,4) vmovdqu %ymm8,(%r10,%rax,4) mov -192(%rbp),%r10 vmovdqu %ymm7,(%r10,%rax,4) mov -184(%rbp),%r10 vmovdqu %ymm6,(%r10,%rax,4) mov -168(%rbp),%r10 vmovdqu %ymm5,(%r10,%rax,4) mov -160(%rbp),%r10 vmovdqu %ymm4,(%r10,%rax,4) mov -152(%rbp),%r10 vmovdqu %ymm3,(%r10,%rax,4) mov -280(%rbp),%r10 vmovdqu %ymm15,(%r10,%rax,4) add $8,%rax .L60: cmp %rax,%rdi jg .L59 xor $1,%ecx lea (%rdx,%r9),%rdi .L62: mov %rdi,%r9 sub %rbx,%r9 mov %r9,%rax cmp %r9,%r8 jg .L60 xor -204(%rbp),%ecx add %rdx,%rsi add %rdx,%r8 .L53: cmp -80(%rbp),%rsi jge .L61 lea (%rsi,%rbx),%rdi jmp .L62 .L61: salq $4,%rbx cmp %r15,%rbx je .L63 mov %rdx,%rbx jmp .L64 .L46: cmp $4,%r14 jne .L132 mov %r12,%rax .L65: cmp -144(%rbp),%rax je .L72 vpxor 32(%rax),%ymm12,%ymm0 vpxor (%rax),%ymm12,%ymm1 add $64,%rax vmovdqu %ymm1,-64(%rax) vmovdqu %ymm0,-32(%rax) jmp .L65 .L72: mov -112(%rbp),%rbx jmp .L68 .L132: mov %r12,%rax cmp $2,%r14 jne .L70 .L69: cmp -144(%rbp),%rax je .L72 vpxor 32(%rax),%ymm10,%ymm2 vpxor (%rax),%ymm10,%ymm1 add $64,%rax vperm2i128 $32,%ymm2,%ymm1,%ymm0 vperm2i128 $49,%ymm2,%ymm1,%ymm1 vpminsd %ymm1,%ymm0,%ymm2 vpmaxsd %ymm1,%ymm0,%ymm0 vperm2i128 $32,%ymm0,%ymm2,%ymm1 vperm2i128 $49,%ymm0,%ymm2,%ymm0 vmovdqu %ymm1,-64(%rax) vmovdqu %ymm0,-32(%rax) jmp .L69 .L70: cmp -144(%rbp),%rax je .L72 vpxor 32(%rax),%ymm11,%ymm2 vpxor (%rax),%ymm11,%ymm1 add $64,%rax vperm2i128 $32,%ymm2,%ymm1,%ymm0 vperm2i128 $49,%ymm2,%ymm1,%ymm1 vpunpcklqdq %ymm1,%ymm0,%ymm2 vpunpckhqdq %ymm1,%ymm0,%ymm0 vpminsd %ymm0,%ymm2,%ymm1 vpmaxsd %ymm0,%ymm2,%ymm2 vpunpcklqdq %ymm2,%ymm1,%ymm0 vpunpckhqdq %ymm2,%ymm1,%ymm1 vpminsd %ymm1,%ymm0,%ymm2 vpmaxsd %ymm1,%ymm0,%ymm0 vperm2i128 $32,%ymm0,%ymm2,%ymm1 vperm2i128 $49,%ymm0,%ymm2,%ymm0 vmovdqu %ymm1,-64(%rax) vmovdqu %ymm0,-32(%rax) jmp .L70 .L137: cmp $32,%rbx jne .L75 .L74: mov %rbx,%rdx mov %r15,%rsi mov %r12,%rdi sar $3,%rbx sar $2,%rdx call int32_threestages vmovdqa .LC2(%rip),%ymm12 vmovdqa .LC3(%rip),%ymm11 vmovdqa .LC1(%rip),%ymm10 .L68: cmp $127,%rbx jle .L137 jmp .L74 .L139: sar $2,%rbx .L75: cmp $15,%rbx jle .L138 mov %rbx,%rcx xor %esi,%esi sarq %rcx imul $-8,%rcx,%rdi lea 0(,%rcx,4),%rdx lea (%r12,%rdx),%r11 lea (%r11,%rdx),%r10 lea (%r10,%rdx),%r8 lea (%rdi,%r8),%rax lea (%rax,%rdx),%r9 mov %rax,-136(%rbp) lea (%r9,%rdx),%rax mov %rax,-240(%rbp) .L76: cmp %r15,%rsi jge .L139 mov %rsi,%rax .L78: cmp %rcx,%rax jge .L140 vmovdqu (%r12,%rax,4),%ymm6 vmovdqu (%r11,%rax,4),%ymm5 vpminsd (%r10,%rax,4),%ymm6,%ymm2 vpminsd (%r8,%rax,4),%ymm5,%ymm3 mov -136(%rbp),%rdi vpmaxsd (%r10,%rax,4),%ymm6,%ymm0 vpmaxsd (%r8,%rax,4),%ymm5,%ymm1 vpminsd %ymm3,%ymm2,%ymm4 vpmaxsd %ymm3,%ymm2,%ymm2 vmovdqu %ymm4,(%r12,%rax,4) vmovdqu %ymm2,(%rdi,%rax,4) mov -240(%rbp),%rdi vpminsd %ymm1,%ymm0,%ymm3 vpmaxsd %ymm1,%ymm0,%ymm0 vmovdqu %ymm3,(%r9,%rax,4) vmovdqu %ymm0,(%rdi,%rax,4) add $8,%rax jmp .L78 .L140: add %rdx,%rsi add %rdx,%rcx jmp .L76 .L138: cmp $8,%rbx je .L109 .L83: mov -152(%rbp),%rdx mov -160(%rbp),%rcx xor %eax,%eax mov -168(%rbp),%rsi mov -176(%rbp),%rdi mov -184(%rbp),%r8 mov -192(%rbp),%r9 mov -200(%rbp),%r10 jmp .L81 .L109: xor %eax,%eax .L80: cmp %r15,%rax jge .L83 vmovdqu (%r12,%rax,4),%ymm5 vpminsd 32(%r12,%rax,4),%ymm5,%ymm1 vpmaxsd 32(%r12,%rax,4),%ymm5,%ymm0 vmovdqu %ymm1,(%r12,%rax,4) vmovdqu %ymm0,32(%r12,%rax,4) add $16,%rax jmp .L80 .L81: cmp -80(%rbp),%rax jge .L141 vmovdqu (%rdi),%ymm7 add $32,%r10 add $32,%r9 add $32,%r8 add $32,%rdi add $32,%rsi add $32,%rcx add $32,%rdx vmovdqu (%r12,%rax,4),%ymm5 vmovdqu -32(%r9),%ymm6 vpminsd -32(%r10),%ymm5,%ymm3 vpmaxsd -32(%r10),%ymm5,%ymm1 vpminsd -32(%r8),%ymm6,%ymm2 vpmaxsd -32(%r8),%ymm6,%ymm0 vpminsd -32(%rsi),%ymm7,%ymm7 vmovdqu -32(%rcx),%ymm5 vmovdqu -32(%rdi),%ymm6 vpmaxsd -32(%rdx),%ymm5,%ymm4 vpminsd %ymm2,%ymm3,%ymm9 vpmaxsd -32(%rsi),%ymm6,%ymm8 vpminsd -32(%rdx),%ymm5,%ymm6 vpminsd %ymm0,%ymm1,%ymm13 vpmaxsd %ymm2,%ymm3,%ymm2 vpminsd %ymm6,%ymm7,%ymm5 vpminsd %ymm4,%ymm8,%ymm3 vpmaxsd %ymm6,%ymm7,%ymm6 vpmaxsd %ymm0,%ymm1,%ymm0 vpmaxsd %ymm4,%ymm8,%ymm4 vpminsd %ymm5,%ymm9,%ymm1 vpminsd %ymm6,%ymm2,%ymm8 vpminsd %ymm3,%ymm13,%ymm7 vmovdqu %ymm1,(%r12,%rax,4) add $8,%rax vpmaxsd %ymm6,%ymm2,%ymm2 vpmaxsd %ymm5,%ymm9,%ymm5 vmovdqu %ymm7,-32(%r10) vpminsd %ymm4,%ymm0,%ymm6 vpmaxsd %ymm3,%ymm13,%ymm3 vmovdqu %ymm8,-32(%r9) vpmaxsd %ymm4,%ymm0,%ymm0 vmovdqu %ymm6,-32(%r8) vmovdqu %ymm5,-32(%rdi) vmovdqu %ymm3,-32(%rsi) vmovdqu %ymm2,-32(%rcx) vmovdqu %ymm0,-32(%rdx) jmp .L81 .L141: sarq %r14 decl -272(%rbp) jne .L46 mov %r12,%rax xor %edx,%edx vpcmpeqd %ymm5,%ymm5,%ymm5 .L85: cmp %r15,%rdx jge .L89 vmovdqu (%rax),%ymm7 vpunpckldq 32(%rax),%ymm7,%ymm12 vpunpckhdq 32(%rax),%ymm7,%ymm6 vmovdqu 64(%rax),%ymm7 vpunpckldq 96(%rax),%ymm7,%ymm2 vpunpckhdq 96(%rax),%ymm7,%ymm4 vmovdqu 128(%rax),%ymm7 vpunpckldq 160(%rax),%ymm7,%ymm1 vpunpckhdq 160(%rax),%ymm7,%ymm0 vpunpcklqdq %ymm2,%ymm12,%ymm8 vpunpcklqdq %ymm4,%ymm6,%ymm9 cmpl $0,-116(%rbp) vmovdqu 192(%rax),%ymm7 vpunpckhqdq %ymm2,%ymm12,%ymm12 vpunpckhqdq %ymm4,%ymm6,%ymm4 vpunpckldq 224(%rax),%ymm7,%ymm10 vpunpckhdq 224(%rax),%ymm7,%ymm3 vpunpcklqdq %ymm10,%ymm1,%ymm11 vpunpckhqdq %ymm10,%ymm1,%ymm1 vpunpcklqdq %ymm3,%ymm0,%ymm7 vpunpckhqdq %ymm3,%ymm0,%ymm0 je .L86 vpxor %ymm5,%ymm12,%ymm12 vpxor %ymm5,%ymm4,%ymm4 vpxor %ymm5,%ymm1,%ymm1 vpxor %ymm5,%ymm0,%ymm0 jmp .L87 .L86: vpxor %ymm5,%ymm8,%ymm8 vpxor %ymm5,%ymm9,%ymm9 vpxor %ymm5,%ymm11,%ymm11 vpxor %ymm5,%ymm7,%ymm7 .L87: vperm2i128 $32,%ymm11,%ymm8,%ymm3 vperm2i128 $32,%ymm1,%ymm12,%ymm6 vperm2i128 $32,%ymm7,%ymm9,%ymm10 add $64,%rdx vperm2i128 $32,%ymm0,%ymm4,%ymm13 vperm2i128 $49,%ymm11,%ymm8,%ymm11 vperm2i128 $49,%ymm7,%ymm9,%ymm9 add $256,%rax vperm2i128 $49,%ymm1,%ymm12,%ymm1 vperm2i128 $49,%ymm0,%ymm4,%ymm0 vpmaxsd %ymm6,%ymm3,%ymm2 vpminsd %ymm6,%ymm3,%ymm4 vpminsd %ymm1,%ymm11,%ymm7 vpmaxsd %ymm13,%ymm10,%ymm3 vpminsd %ymm13,%ymm10,%ymm8 vpmaxsd %ymm1,%ymm11,%ymm1 vpminsd %ymm0,%ymm9,%ymm10 vpmaxsd %ymm0,%ymm9,%ymm0 vpminsd %ymm8,%ymm4,%ymm11 vpminsd %ymm3,%ymm2,%ymm9 vpmaxsd %ymm8,%ymm4,%ymm8 vpminsd %ymm10,%ymm7,%ymm6 vpmaxsd %ymm10,%ymm7,%ymm4 vpmaxsd %ymm3,%ymm2,%ymm2 vpminsd %ymm0,%ymm1,%ymm3 vpmaxsd %ymm0,%ymm1,%ymm1 vpminsd %ymm6,%ymm11,%ymm10 vpmaxsd %ymm6,%ymm11,%ymm0 vpminsd %ymm3,%ymm9,%ymm7 vpmaxsd %ymm3,%ymm9,%ymm6 vpminsd %ymm4,%ymm8,%ymm3 vpminsd %ymm1,%ymm2,%ymm9 vpmaxsd %ymm4,%ymm8,%ymm4 vpunpckldq %ymm7,%ymm10,%ymm8 vpmaxsd %ymm1,%ymm2,%ymm2 vpunpckhdq %ymm7,%ymm10,%ymm7 vpunpckldq %ymm9,%ymm3,%ymm1 vpunpckhdq %ymm9,%ymm3,%ymm3 vpunpckldq %ymm6,%ymm0,%ymm9 vpunpckhdq %ymm6,%ymm0,%ymm6 vpunpckldq %ymm2,%ymm4,%ymm0 vpunpckhdq %ymm2,%ymm4,%ymm2 vpunpcklqdq %ymm3,%ymm7,%ymm10 vpunpcklqdq %ymm1,%ymm8,%ymm4 vpunpcklqdq %ymm0,%ymm9,%ymm13 vpunpckhqdq %ymm1,%ymm8,%ymm8 vpunpckhqdq %ymm3,%ymm7,%ymm3 vpunpckhqdq %ymm0,%ymm9,%ymm1 vpunpcklqdq %ymm2,%ymm6,%ymm7 vpunpckhqdq %ymm2,%ymm6,%ymm0 vperm2i128 $32,%ymm13,%ymm4,%ymm12 vperm2i128 $32,%ymm1,%ymm8,%ymm11 vperm2i128 $32,%ymm0,%ymm3,%ymm6 vperm2i128 $32,%ymm7,%ymm10,%ymm9 vperm2i128 $49,%ymm13,%ymm4,%ymm4 vmovdqu %ymm12,-256(%rax) vperm2i128 $49,%ymm1,%ymm8,%ymm1 vperm2i128 $49,%ymm7,%ymm10,%ymm2 vperm2i128 $49,%ymm0,%ymm3,%ymm0 vmovdqu %ymm11,-224(%rax) vmovdqu %ymm9,-192(%rax) vmovdqu %ymm6,-160(%rax) vmovdqu %ymm4,-128(%rax) vmovdqu %ymm1,-96(%rax) vmovdqu %ymm2,-64(%rax) vmovdqu %ymm0,-32(%rax) jmp .L85 .L142: cmpq $32,-112(%rbp) jne .L94 .L93: mov -112(%rbp),%rcx sar $2,%rcx lea 0(,%rcx,4),%rdx lea 0(,%rcx,8),%rax mov %rcx,-136(%rbp) lea (%r12,%rdx),%r9 mov %rax,-184(%rbp) imul $-24,%rcx,%rax lea (%r9,%rdx),%r14 lea (%r14,%rdx),%rsi lea (%rsi,%rdx),%rbx lea (%rbx,%rdx),%r10 lea (%r10,%rdx),%r8 lea (%r8,%rdx),%rdi add %rdi,%rax mov %rax,-176(%rbp) add %rdx,%rax mov %rax,-168(%rbp) add %rdx,%rax lea (%rax,%rdx),%r11 mov %rax,-160(%rbp) lea (%r11,%rdx),%rax mov %rax,-200(%rbp) add %rdx,%rax add %rax,%rdx mov %rax,-144(%rbp) mov %rdx,-192(%rbp) .L90: mov -136(%rbp),%rax sub %rcx,%rax cmp %rax,%r15 jg .L92 sarq $3,-112(%rbp) .L89: cmpq $127,-112(%rbp) jle .L142 jmp .L93 .L92: cmp -136(%rbp),%rax jge .L143 vmovdqu (%r12,%rax,4),%ymm6 vpminsd (%rbx,%rax,4),%ymm6,%ymm7 vpmaxsd (%rbx,%rax,4),%ymm6,%ymm4 vmovdqu (%r9,%rax,4),%ymm6 vpminsd (%r10,%rax,4),%ymm6,%ymm1 vpmaxsd (%r10,%rax,4),%ymm6,%ymm0 vmovdqu (%r14,%rax,4),%ymm6 vpminsd (%r8,%rax,4),%ymm6,%ymm5 vpmaxsd (%r8,%rax,4),%ymm6,%ymm3 vmovdqu (%rsi,%rax,4),%ymm6 vpminsd (%rdi,%rax,4),%ymm6,%ymm6 vpminsd %ymm5,%ymm7,%ymm9 vmovdqu (%rsi,%rax,4),%ymm2 vpmaxsd %ymm5,%ymm7,%ymm5 mov -176(%rbp),%rdx vpminsd %ymm3,%ymm4,%ymm8 vpminsd %ymm6,%ymm1,%ymm7 vpmaxsd %ymm3,%ymm4,%ymm3 vpminsd %ymm7,%ymm9,%ymm10 vpmaxsd %ymm7,%ymm9,%ymm4 vpmaxsd (%rdi,%rax,4),%ymm2,%ymm2 vpmaxsd %ymm6,%ymm1,%ymm1 vmovdqu %ymm10,(%r12,%rax,4) vmovdqu %ymm4,(%rdx,%rax,4) mov -168(%rbp),%rdx vpminsd %ymm1,%ymm5,%ymm9 vpmaxsd %ymm1,%ymm5,%ymm1 vpminsd %ymm2,%ymm0,%ymm6 vpmaxsd %ymm2,%ymm0,%ymm0 vmovdqu %ymm9,(%rdx,%rax,4) vpminsd %ymm6,%ymm8,%ymm7 vpmaxsd %ymm6,%ymm8,%ymm2 mov -160(%rbp),%rdx vpminsd %ymm0,%ymm3,%ymm5 vpmaxsd %ymm0,%ymm3,%ymm3 vmovdqu %ymm1,(%rdx,%rax,4) mov -200(%rbp),%rdx vmovdqu %ymm7,(%r11,%rax,4) vmovdqu %ymm2,(%rdx,%rax,4) mov -144(%rbp),%rdx vmovdqu %ymm5,(%rdx,%rax,4) mov -192(%rbp),%rdx vmovdqu %ymm3,(%rdx,%rax,4) add $8,%rax jmp .L92 .L143: mov -184(%rbp),%rdx add %rdx,-136(%rbp) jmp .L90 .L145: sarq $2,-112(%rbp) .L94: cmpq $15,-112(%rbp) jle .L144 mov -112(%rbp),%rcx xor %esi,%esi sarq %rcx imul $-8,%rcx,%rdi lea 0(,%rcx,4),%rdx lea (%r12,%rdx),%r11 lea (%r11,%rdx),%r10 lea (%r10,%rdx),%r8 add %r8,%rdi lea (%rdi,%rdx),%r9 lea (%r9,%rdx),%rbx .L95: cmp %r15,%rsi jge .L145 mov %rsi,%rax .L97: cmp %rcx,%rax jge .L146 vmovdqu (%r12,%rax,4),%ymm5 vpminsd (%r10,%rax,4),%ymm5,%ymm2 vpmaxsd (%r10,%rax,4),%ymm5,%ymm0 vmovdqu (%r11,%rax,4),%ymm5 vpminsd (%r8,%rax,4),%ymm5,%ymm3 vpmaxsd (%r8,%rax,4),%ymm5,%ymm1 vpminsd %ymm3,%ymm2,%ymm4 vpmaxsd %ymm3,%ymm2,%ymm2 vpminsd %ymm1,%ymm0,%ymm3 vpmaxsd %ymm1,%ymm0,%ymm0 vmovdqu %ymm4,(%r12,%rax,4) vmovdqu %ymm2,(%rdi,%rax,4) vmovdqu %ymm3,(%r9,%rax,4) vmovdqu %ymm0,(%rbx,%rax,4) add $8,%rax jmp .L97 .L146: add %rdx,%rsi add %rdx,%rcx jmp .L95 .L144: cmpq $8,-112(%rbp) je .L111 .L102: mov -152(%rbp),%rdx add -128(%rbp),%rdx xor %ecx,%ecx vpcmpeqd %ymm6,%ymm6,%ymm6 lea (%rdx,%r13),%r10 lea (%r10,%r13),%r9 lea (%r9,%r13),%r8 lea (%r8,%r13),%rdi lea (%rdi,%r13),%rsi lea (%rsi,%r13),%rax jmp .L100 .L111: xor %eax,%eax .L99: cmp %r15,%rax jge .L102 vmovdqu (%r12,%rax,4),%ymm5 vpminsd 32(%r12,%rax,4),%ymm5,%ymm1 vpmaxsd 32(%r12,%rax,4),%ymm5,%ymm0 vmovdqu %ymm1,(%r12,%rax,4) vmovdqu %ymm0,32(%r12,%rax,4) add $16,%rax jmp .L99 .L104: vmovdqu (%r10),%ymm7 vmovdqu (%r12,%rcx,4),%ymm4 vpminsd (%r9),%ymm7,%ymm3 vpminsd (%rdx),%ymm4,%ymm5 vpmaxsd (%r9),%ymm7,%ymm2 vpmaxsd (%rdx),%ymm4,%ymm4 vmovdqu (%r8),%ymm7 vmovdqu (%rsi),%ymm14 vpminsd %ymm3,%ymm5,%ymm11 vpmaxsd %ymm3,%ymm5,%ymm3 vpminsd (%rdi),%ymm7,%ymm1 vpminsd %ymm2,%ymm4,%ymm10 cmpl $0,-116(%rbp) vpmaxsd (%rdi),%ymm7,%ymm0 vmovdqu (%rsi),%ymm7 vpmaxsd %ymm2,%ymm4,%ymm2 vpminsd (%rax),%ymm7,%ymm7 vpmaxsd (%rax),%ymm14,%ymm9 vpminsd %ymm7,%ymm1,%ymm8 vpmaxsd %ymm7,%ymm1,%ymm1 vpminsd %ymm9,%ymm0,%ymm7 vpmaxsd %ymm9,%ymm0,%ymm0 vpminsd %ymm8,%ymm11,%ymm5 vpminsd %ymm1,%ymm3,%ymm9 vpminsd %ymm7,%ymm10,%ymm12 vpmaxsd %ymm1,%ymm3,%ymm3 vpminsd %ymm0,%ymm2,%ymm4 vpmaxsd %ymm8,%ymm11,%ymm8 vpmaxsd %ymm0,%ymm2,%ymm2 vpmaxsd %ymm7,%ymm10,%ymm7 vpunpckldq %ymm8,%ymm5,%ymm11 vpunpckldq %ymm7,%ymm12,%ymm10 vpunpckhdq %ymm8,%ymm5,%ymm8 vpunpckhdq %ymm7,%ymm12,%ymm7 vpunpckhdq %ymm3,%ymm9,%ymm5 vpunpckldq %ymm2,%ymm4,%ymm1 vpunpckldq %ymm3,%ymm9,%ymm0 vpunpckhdq %ymm2,%ymm4,%ymm4 vpunpcklqdq %ymm0,%ymm11,%ymm3 vpunpckhqdq %ymm0,%ymm11,%ymm9 vpunpcklqdq %ymm5,%ymm8,%ymm2 vpunpcklqdq %ymm4,%ymm7,%ymm11 vpunpckhqdq %ymm5,%ymm8,%ymm5 vpunpcklqdq %ymm1,%ymm10,%ymm12 vpunpckhqdq %ymm4,%ymm7,%ymm0 vpunpckhqdq %ymm1,%ymm10,%ymm1 vperm2i128 $32,%ymm11,%ymm2,%ymm8 vperm2i128 $32,%ymm12,%ymm3,%ymm10 vperm2i128 $32,%ymm1,%ymm9,%ymm7 vperm2i128 $32,%ymm0,%ymm5,%ymm4 vperm2i128 $49,%ymm12,%ymm3,%ymm3 vperm2i128 $49,%ymm11,%ymm2,%ymm2 vperm2i128 $49,%ymm1,%ymm9,%ymm1 vperm2i128 $49,%ymm0,%ymm5,%ymm0 je .L103 vpxor %ymm6,%ymm10,%ymm10 vpxor %ymm6,%ymm8,%ymm8 vpxor %ymm6,%ymm7,%ymm7 vpxor %ymm6,%ymm4,%ymm4 vpxor %ymm6,%ymm3,%ymm3 vpxor %ymm6,%ymm2,%ymm2 vpxor %ymm6,%ymm1,%ymm1 vpxor %ymm6,%ymm0,%ymm0 .L103: add $32,%rdx add $32,%r10 add $32,%r9 add $32,%r8 vmovdqu %ymm10,(%r12,%rcx,4) add $32,%rdi add $8,%rcx add $32,%rsi vmovdqu %ymm3,-32(%rdx) add $32,%rax vmovdqu %ymm8,-32(%r10) vmovdqu %ymm2,-32(%r9) vmovdqu %ymm7,-32(%r8) vmovdqu %ymm1,-32(%rdi) vmovdqu %ymm4,-32(%rsi) vmovdqu %ymm0,-32(%rax) .L100: cmp -80(%rbp),%rcx jl .L104 .L35: add $264,%rsp pop %rbx pop %r12 pop %r13 pop %r14 pop %r15 pop %rbp lea -16(%r13),%rsp pop %r13 ret .endfn int32_sort_2power .rodata.cst32 .LC0: .quad -1,0,-1,0 .LC1: .quad 0,-1,-1,0 .LC2: .quad -1,-1,0,0 .LC3: .quad -4294967296,4294967295,-4294967296,4294967295 .LC4: .quad 0x7fffffff7fffffff .quad 0x7fffffff7fffffff .quad 0x7fffffff7fffffff .quad 0x7fffffff7fffffff
45,162
2,089
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/mul6x6adx.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2021 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" // Computes 768-bit product of 384-bit and 384-bit numbers. // // Instructions: 152 // Total Cycles: 65 // Total uOps: 260 // uOps Per Cycle: 4.00 // IPC: 2.34 // Block RThroughput: 43.3 // // @param rdi receives 8 quadword result // @param rsi is left hand side which must have 4 quadwords // @param rdx is right hand side which must have 4 quadwords // @note words are host endian while array is little endian // @mayalias Mul6x6Adx: push %rbp mov %rsp,%rbp .profilable sub $64,%rsp mov %r15,-8(%rbp) mov %r14,-16(%rbp) mov %r13,-24(%rbp) mov %r12,-32(%rbp) mov %rbx,-40(%rbp) mov %rdx,%rbx mov (%rdx),%rdx mulx (%rsi),%rcx,%rax mulx 8(%rsi),%rdx,%r12 mov %rcx,-48(%rbp) add %rdx,%rax mov (%rbx),%rdx mulx 16(%rsi),%rdx,%r15 adc %rdx,%r12 mov (%rbx),%rdx mulx 24(%rsi),%rdx,%r10 adc %rdx,%r15 mov (%rbx),%rdx mulx 32(%rsi),%rdx,%r9 adc %rdx,%r10 mov (%rbx),%rdx mulx 40(%rsi),%rdx,%rcx adc %rdx,%r9 mov 8(%rbx),%rdx adc $0,%rcx mulx (%rsi),%r13,%r11 xor %r8d,%r8d adox %r13,%rax adcx %r11,%r12 mov %rax,-56(%rbp) mulx 8(%rsi),%r11,%rax adox %r11,%r12 adcx %rax,%r15 mov %r12,%r14 mulx 16(%rsi),%r11,%rax adox %r11,%r15 adcx %rax,%r10 mulx 24(%rsi),%r11,%rax adox %r11,%r10 adcx %rax,%r9 mulx 32(%rsi),%r11,%rax adox %r11,%r9 adcx %rax,%rcx mulx 40(%rsi),%rdx,%rax adox %rdx,%rcx adcx %r8,%rax mov 16(%rbx),%rdx adox %r8,%rax mulx (%rsi),%r13,%r8 xor %r11d,%r11d adox %r13,%r14 mov %r14,-64(%rbp) adcx %r8,%r15 mulx 8(%rsi),%r12,%r8 adox %r12,%r15 adcx %r8,%r10 mulx 16(%rsi),%r12,%r8 adox %r12,%r10 adcx %r8,%r9 mulx 24(%rsi),%r12,%r8 adox %r12,%r9 adcx %r8,%rcx mulx 32(%rsi),%r12,%r8 adox %r12,%rcx adcx %r8,%rax mulx 40(%rsi),%rdx,%r8 adox %rdx,%rax adcx %r11,%r8 mov 24(%rbx),%rdx adox %r11,%r8 mulx (%rsi),%r13,%r11 xor %r12d,%r12d adox %r13,%r15 adcx %r11,%r10 mulx 8(%rsi),%r13,%r11 adox %r13,%r10 adcx %r11,%r9 mulx 16(%rsi),%r13,%r11 adox %r13,%r9 adcx %r11,%rcx mulx 24(%rsi),%r13,%r11 adox %r13,%rcx adcx %r11,%rax mulx 32(%rsi),%r13,%r11 adox %r13,%rax adcx %r11,%r8 mulx 40(%rsi),%rdx,%r11 adox %rdx,%r8 mov 32(%rbx),%rdx adcx %r12,%r11 mulx (%rsi),%r14,%r13 adox %r12,%r11 xor %r12d,%r12d adox %r14,%r10 adcx %r13,%r9 mulx 8(%rsi),%r14,%r13 adox %r14,%r9 adcx %r13,%rcx mulx 16(%rsi),%r14,%r13 adox %r14,%rcx adcx %r13,%rax mulx 24(%rsi),%r14,%r13 adox %r14,%rax adcx %r13,%r8 mulx 32(%rsi),%r14,%r13 adox %r14,%r8 adcx %r13,%r11 mulx 40(%rsi),%rdx,%r13 adox %rdx,%r11 adcx %r12,%r13 mov 40(%rbx),%rdx adox %r12,%r13 mulx (%rsi),%r14,%rbx xor %r12d,%r12d adox %r14,%r9 adcx %rbx,%rcx mulx 8(%rsi),%r14,%rbx adox %r14,%rcx adcx %rbx,%rax mulx 16(%rsi),%r14,%rbx adox %r14,%rax adcx %rbx,%r8 mulx 24(%rsi),%r14,%rbx adox %r14,%r8 adcx %rbx,%r11 mulx 32(%rsi),%r14,%rbx mulx 40(%rsi),%rsi,%rdx adox %r14,%r11 adcx %rbx,%r13 adox %rsi,%r13 adcx %r12,%rdx adox %r12,%rdx mov -48(%rbp),%rsi mov -56(%rbp),%rbx mov -64(%rbp),%r14 mov %rsi,(%rdi) mov %rbx,8(%rdi) mov %r14,16(%rdi) mov %r15,24(%rdi) mov %r10,32(%rdi) mov %r9,40(%rdi) mov %rcx,48(%rdi) mov %rax,56(%rdi) mov %r8,64(%rdi) mov %r11,72(%rdi) mov %r13,80(%rdi) mov %rdx,88(%rdi) mov -8(%rbp),%r15 mov -16(%rbp),%r14 mov -24(%rbp),%r13 mov -32(%rbp),%r12 mov -40(%rbp),%rbx leave ret .endfn Mul6x6Adx,globl .end SIMULATION 0123456789 0123456789 0123456789 Index 0123456789 0123456789 0123456789 01234 [0,0] DeER . . . . . . . . . . . . . movq %r15, -8(%rbp) [0,1] D=eER. . . . . . . . . . . . . movq %r14, -16(%rbp) [0,2] D==eER . . . . . . . . . . . . movq %r13, -24(%rbp) [0,3] D===eER . . . . . . . . . . . . movq %r12, -32(%rbp) [0,4] D====eER . . . . . . . . . . . . movq %rbx, -40(%rbp) [0,5] DeE----R . . . . . . . . . . . . movq %rdx, %rbx [0,6] .DeeeeeER . . . . . . . . . . . . movq (%rdx), %rdx [0,7] .D=====eeeeeeeeeER . . . . . . . . . . mulxq (%rsi), %rcx, %rax [0,8] . D=====eeeeeeeeeER . . . . . . . . . . mulxq 8(%rsi), %rdx, %r12 [0,9] . D=======eE------R . . . . . . . . . . movq %rcx, -48(%rbp) [0,10] . D=============eER . . . . . . . . . . addq %rdx, %rax [0,11] . DeeeeeE--------R . . . . . . . . . . movq (%rbx), %rdx [0,12] . D=====eeeeeeeeeER. . . . . . . . . . mulxq 16(%rsi), %rdx, %r15 [0,13] . D=============eER. . . . . . . . . . adcq %rdx, %r12 [0,14] . DeeeeeE--------R. . . . . . . . . . movq (%rbx), %rdx [0,15] . D=====eeeeeeeeeER . . . . . . . . . mulxq 24(%rsi), %rdx, %r10 [0,16] . D=============eER . . . . . . . . . adcq %rdx, %r15 [0,17] . DeeeeeE--------R . . . . . . . . . movq (%rbx), %rdx [0,18] . D=====eeeeeeeeeER . . . . . . . . . mulxq 32(%rsi), %rdx, %r9 [0,19] . D=============eER . . . . . . . . . adcq %rdx, %r10 [0,20] . .DeeeeeE--------R . . . . . . . . . movq (%rbx), %rdx [0,21] . .D=====eeeeeeeeeER . . . . . . . . . mulxq 40(%rsi), %rdx, %rcx [0,22] . .D=============eER . . . . . . . . . adcq %rdx, %r9 [0,23] . . DeeeeeE--------R . . . . . . . . . movq 8(%rbx), %rdx [0,24] . . D=============eER . . . . . . . . . adcq $0, %rcx [0,25] . . D=====eeeeeeeeeER . . . . . . . . . mulxq (%rsi), %r13, %r11 [0,26] . . D--------------R . . . . . . . . . xorl %r8d, %r8d [0,27] . . D========eE----R . . . . . . . . . adoxq %r13, %rax [0,28] . . D=============eER. . . . . . . . . adcxq %r11, %r12 [0,29] . . D=========eE----R. . . . . . . . . movq %rax, -56(%rbp) [0,30] . . D====eeeeeeeeeER. . . . . . . . . mulxq 8(%rsi), %r11, %rax [0,31] . . D=============eER . . . . . . . . adoxq %r11, %r12 [0,32] . . D==============eER . . . . . . . . adcxq %rax, %r15 [0,33] . . D=============eER . . . . . . . . movq %r12, %r14 [0,34] . . D====eeeeeeeeeE-R . . . . . . . . mulxq 16(%rsi), %r11, %rax [0,35] . . D==============eER . . . . . . . . adoxq %r11, %r15 [0,36] . . .D==============eER . . . . . . . . adcxq %rax, %r10 [0,37] . . .D====eeeeeeeeeE--R . . . . . . . . mulxq 24(%rsi), %r11, %rax [0,38] . . .D===============eER. . . . . . . . adoxq %r11, %r10 [0,39] . . . D===============eER . . . . . . . adcxq %rax, %r9 [0,40] . . . D====eeeeeeeeeE---R . . . . . . . mulxq 32(%rsi), %r11, %rax [0,41] . . . D================eER . . . . . . . adoxq %r11, %r9 [0,42] . . . D================eER . . . . . . . adcxq %rax, %rcx [0,43] . . . D====eeeeeeeeeE----R . . . . . . . mulxq 40(%rsi), %rdx, %rax [0,44] . . . D=================eER . . . . . . . adoxq %rdx, %rcx [0,45] . . . D=================eER. . . . . . . adcxq %r8, %rax [0,46] . . . DeeeeeE-------------R. . . . . . . movq 16(%rbx), %rdx [0,47] . . . D==================eER . . . . . . adoxq %r8, %rax [0,48] . . . D====eeeeeeeeeE-----R . . . . . . mulxq (%rsi), %r13, %r8 [0,49] . . . D====E--------------R . . . . . . xorl %r11d, %r11d [0,50] . . . D=========eE--------R . . . . . . adoxq %r13, %r14 [0,51] . . . .D=========eE-------R . . . . . . movq %r14, -64(%rbp) [0,52] . . . .D============eE----R . . . . . . adcxq %r8, %r15 [0,53] . . . .D====eeeeeeeeeE----R . . . . . . mulxq 8(%rsi), %r12, %r8 [0,54] . . . . D============eE---R . . . . . . adoxq %r12, %r15 [0,55] . . . . D=============eE--R . . . . . . adcxq %r8, %r10 [0,56] . . . . D====eeeeeeeeeE---R . . . . . . mulxq 16(%rsi), %r12, %r8 [0,57] . . . . D=============eE-R . . . . . . adoxq %r12, %r10 [0,58] . . . . D==============eER . . . . . . adcxq %r8, %r9 [0,59] . . . . D====eeeeeeeeeE--R . . . . . . mulxq 24(%rsi), %r12, %r8 [0,60] . . . . D==============eER . . . . . . adoxq %r12, %r9 [0,61] . . . . D===============eER . . . . . . adcxq %r8, %rcx [0,62] . . . . D====eeeeeeeeeE---R . . . . . . mulxq 32(%rsi), %r12, %r8 [0,63] . . . . D===============eER . . . . . . adoxq %r12, %rcx [0,64] . . . . D================eER. . . . . . adcxq %r8, %rax [0,65] . . . . D====eeeeeeeeeE----R. . . . . . mulxq 40(%rsi), %rdx, %r8 [0,66] . . . . .D================eER . . . . . adoxq %rdx, %rax [0,67] . . . . .D=================eER . . . . . adcxq %r11, %r8 [0,68] . . . . .DeeeeeE-------------R . . . . . movq 24(%rbx), %rdx [0,69] . . . . .D==================eER . . . . . adoxq %r11, %r8 [0,70] . . . . . D====eeeeeeeeeE-----R . . . . . mulxq (%rsi), %r13, %r11 [0,71] . . . . . D====E--------------R . . . . . xorl %r12d, %r12d [0,72] . . . . . D===========eE------R . . . . . adoxq %r13, %r15 [0,73] . . . . . D============eE----R . . . . . adcxq %r11, %r10 [0,74] . . . . . D====eeeeeeeeeE----R . . . . . mulxq 8(%rsi), %r13, %r11 [0,75] . . . . . D=============eE---R . . . . . adoxq %r13, %r10 [0,76] . . . . . D=============eE--R . . . . . adcxq %r11, %r9 [0,77] . . . . . D====eeeeeeeeeE---R . . . . . mulxq 16(%rsi), %r13, %r11 [0,78] . . . . . D==============eE-R . . . . . adoxq %r13, %r9 [0,79] . . . . . D==============eER . . . . . adcxq %r11, %rcx [0,80] . . . . . D====eeeeeeeeeE--R . . . . . mulxq 24(%rsi), %r13, %r11 [0,81] . . . . . D===============eER . . . . . adoxq %r13, %rcx [0,82] . . . . . .D===============eER. . . . . adcxq %r11, %rax [0,83] . . . . . .D====eeeeeeeeeE---R. . . . . mulxq 32(%rsi), %r13, %r11 [0,84] . . . . . .D================eER . . . . adoxq %r13, %rax [0,85] . . . . . . D================eER . . . . adcxq %r11, %r8 [0,86] . . . . . . D====eeeeeeeeeE----R . . . . mulxq 40(%rsi), %rdx, %r11 [0,87] . . . . . . D=================eER . . . . adoxq %rdx, %r8 [0,88] . . . . . . DeeeeeE------------R . . . . movq 32(%rbx), %rdx [0,89] . . . . . . D=================eER . . . . adcxq %r12, %r11 [0,90] . . . . . . D=====eeeeeeeeeE----R . . . . mulxq (%rsi), %r14, %r13 [0,91] . . . . . . D=================eER. . . . adoxq %r12, %r11 [0,92] . . . . . . D-------------------R. . . . xorl %r12d, %r12d [0,93] . . . . . . D===========eE------R. . . . adoxq %r14, %r10 [0,94] . . . . . . D=============eE----R. . . . adcxq %r13, %r9 [0,95] . . . . . . D====eeeeeeeeeE----R. . . . mulxq 8(%rsi), %r14, %r13 [0,96] . . . . . . D=============eE---R. . . . adoxq %r14, %r9 [0,97] . . . . . . D==============eE--R. . . . adcxq %r13, %rcx [0,98] . . . . . . .D====eeeeeeeeeE---R. . . . mulxq 16(%rsi), %r14, %r13 [0,99] . . . . . . .D==============eE-R. . . . adoxq %r14, %rcx [0,100] . . . . . . .D===============eER. . . . adcxq %r13, %rax [0,101] . . . . . . . D====eeeeeeeeeE--R. . . . mulxq 24(%rsi), %r14, %r13 [0,102] . . . . . . . D===============eER . . . adoxq %r14, %rax [0,103] . . . . . . . D================eER . . . adcxq %r13, %r8 [0,104] . . . . . . . D====eeeeeeeeeE---R . . . mulxq 32(%rsi), %r14, %r13 [0,105] . . . . . . . D================eER . . . adoxq %r14, %r8 [0,106] . . . . . . . D=================eER . . . adcxq %r13, %r11 [0,107] . . . . . . . D====eeeeeeeeeE----R . . . mulxq 40(%rsi), %rdx, %r13 [0,108] . . . . . . . D=================eER. . . adoxq %rdx, %r11 [0,109] . . . . . . . D==================eER . . adcxq %r12, %r13 [0,110] . . . . . . . DeeeeeE-------------R . . movq 40(%rbx), %rdx [0,111] . . . . . . . D==================eER . . adoxq %r12, %r13 [0,112] . . . . . . . D=====eeeeeeeeeE-----R . . mulxq (%rsi), %r14, %rbx [0,113] . . . . . . . .D-------------------R . . xorl %r12d, %r12d [0,114] . . . . . . . .D===========eE------R . . adoxq %r14, %r9 [0,115] . . . . . . . .D=============eE----R . . adcxq %rbx, %rcx [0,116] . . . . . . . . D====eeeeeeeeeE----R . . mulxq 8(%rsi), %r14, %rbx [0,117] . . . . . . . . D=============eE---R . . adoxq %r14, %rcx [0,118] . . . . . . . . D==============eE--R . . adcxq %rbx, %rax [0,119] . . . . . . . . D====eeeeeeeeeE---R . . mulxq 16(%rsi), %r14, %rbx [0,120] . . . . . . . . D==============eE-R . . adoxq %r14, %rax [0,121] . . . . . . . . D===============eER . . adcxq %rbx, %r8 [0,122] . . . . . . . . D====eeeeeeeeeE--R . . mulxq 24(%rsi), %r14, %rbx [0,123] . . . . . . . . D===============eER . . adoxq %r14, %r8 [0,124] . . . . . . . . D================eER . . adcxq %rbx, %r11 [0,125] . . . . . . . . D====eeeeeeeeeE---R . . mulxq 32(%rsi), %r14, %rbx [0,126] . . . . . . . . .D====eeeeeeeeeE--R . . mulxq 40(%rsi), %rsi, %rdx [0,127] . . . . . . . . .D===============eER. . adoxq %r14, %r11 [0,128] . . . . . . . . .D================eER . adcxq %rbx, %r13 [0,129] . . . . . . . . . D================eER . adoxq %rsi, %r13 [0,130] . . . . . . . . . D=================eER . adcxq %r12, %rdx [0,131] . . . . . . . . . D==================eER. adoxq %r12, %rdx [0,132] . . . . . . . . . DeeeeeE--------------R. movq -48(%rbp), %rsi [0,133] . . . . . . . . . D=eeeeeE-------------R. movq -56(%rbp), %rbx [0,134] . . . . . . . . . D==eeeeeE------------R. movq -64(%rbp), %r14 [0,135] . . . . . . . . . D====eE-------------R. movq %rsi, (%rdi) [0,136] . . . . . . . . . D=====eE------------R. movq %rbx, 8(%rdi) [0,137] . . . . . . . . . D======eE-----------R. movq %r14, 16(%rdi) [0,138] . . . . . . . . . D=======eE----------R. movq %r15, 24(%rdi) [0,139] . . . . . . . . . D========eE---------R. movq %r10, 32(%rdi) [0,140] . . . . . . . . . D=========eE--------R. movq %r9, 40(%rdi) [0,141] . . . . . . . . . D=========eE-------R. movq %rcx, 48(%rdi) [0,142] . . . . . . . . . D==========eE------R. movq %rax, 56(%rdi) [0,143] . . . . . . . . . D===========eE-----R. movq %r8, 64(%rdi) [0,144] . . . . . . . . . D=============eE---R. movq %r11, 72(%rdi) [0,145] . . . . . . . . . D===============eE-R. movq %r13, 80(%rdi) [0,146] . . . . . . . . . D=================eER movq %rdx, 88(%rdi) [0,147] . . . . . . . . . DeeeeeE------------R movq -8(%rbp), %r15 [0,148] . . . . . . . . . D=eeeeeE-----------R movq -16(%rbp), %r14 [0,149] . . . . . . . . . D=eeeeeE-----------R movq -24(%rbp), %r13 [0,150] . . . . . . . . . D==eeeeeE----------R movq -32(%rbp), %r12 [0,151] . . . . . . . . . D==eeeeeE----------R movq -40(%rbp), %rbx
20,260
351
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/savexmm.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .privileged // Stores XMM registers to buffer. // // @param %rdi points to &(forcealign(16) uint8_t[256])[128] // @note modern cpus have out-of-order execution engines _savexmm: .leafprologue movaps %xmm0,-0x80(%rdi) movaps %xmm1,-0x70(%rdi) movaps %xmm2,-0x60(%rdi) movaps %xmm3,-0x50(%rdi) movaps %xmm4,-0x40(%rdi) movaps %xmm5,-0x30(%rdi) movaps %xmm6,-0x20(%rdi) movaps %xmm7,-0x10(%rdi) movaps %xmm8,0x00(%rdi) movaps %xmm9,0x10(%rdi) movaps %xmm10,0x20(%rdi) movaps %xmm11,0x30(%rdi) movaps %xmm12,0x40(%rdi) movaps %xmm13,0x50(%rdi) movaps %xmm14,0x60(%rdi) movaps %xmm15,0x70(%rdi) .leafepilogue .endfn _savexmm,globl,hidden
2,523
46
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/memjmpinit.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/nexgen32e/x86feature.h" #include "libc/macros.internal.h" .text.startup // Initializes jump table for memset() and memcpy(). // // @param !ZF if required cpu vector extensions are available // @param rdi is address of 64-bit jump table // @param rsi is address of 8-bit jump initializers // @param rdx is address of indirect branch // @param ecx is size of jump table memjmpinit: .leafprologue setnz %r8b shl %r8b 0: xor %eax,%eax lodsb add %rdx,%rax stosq .loop 0b xor %eax,%eax testb X86_HAVE(ERMS)+kCpuids(%rip) setnz %al or %r8b,%al mov (%rsi,%rax),%al add %rdx,%rax stosq lodsq .leafepilogue .endfn memjmpinit,globl,hidden
2,497
49
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/argc2.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2023 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/runtime/runtime.h" #ifndef __x86_64__ int __argc; #endif /* __x86_64__ */
1,927
25
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/gc.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" #include "libc/dce.h" #include "libc/notice.inc" nop // Invokes deferred function calls. // // This offers behavior similar to std::unique_ptr. Functions // overwrite their return addresses jumping here, and pushing // exactly one entry on the shadow stack below. Functions may // repeat that process multiple times, in which case the body // of this gadget loops and unwinds as a natural consequence. // // @param rax,rdx,xmm0,xmm1,st0,st1 is return value // @see test/libc/runtime/gc_test.c // @threadsafe __gc: #ifdef __x86_64__ mov %fs:0,%rcx // __get_tls() mov 0x18(%rcx),%rcx // tls::garbages decl (%rcx) // ++g->i mov (%rcx),%r8d // r8 = g->i mov 8(%rcx),%r9 // r9 = g->p js 9f shl $5,%r8 lea (%r9,%r8),%r8 mov 8(%r8),%r9 mov 16(%r8),%rdi push 24(%r8) push %rbp mov %rsp,%rbp sub $32,%rsp mov %rax,-8(%rbp) mov %rdx,-16(%rbp) movdqa %xmm0,-32(%rbp) call *%r9 movdqa -32(%rbp),%xmm0 mov -16(%rbp),%rdx mov -8(%rbp),%rax leave ret 9: ud2 nop #elif defined(__aarch64__) stp x29,x30,[sp,-80]! mov x29,sp stp x0,x1,[sp,16] stp x2,x3,[sp,32] stp x4,x5,[sp,48] stp x6,x7,[sp,64] // todo jart ldp x0,x1,[sp,16] ldp x2,x3,[sp,32] ldp x4,x5,[sp,48] ldp x6,x7,[sp,64] ldp x29,x30,[sp],80 #endif /* __x86_64__ */ .endfn __gc,globl,hidden
3,159
84
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/longerjmp.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .privileged // Loads previously saved processor state. // // @param rdi points to the jmp_buf // @param rsi is returned by setlongerjmp() invocation // @noreturn longerjmp: mov $1,%eax mov %rsi,%rdx mov (%rdi),%rsp mov 8(%rdi),%rbx mov 16(%rdi),%rbp mov 24(%rdi),%r12 mov 32(%rdi),%r13 mov 40(%rdi),%r14 mov 48(%rdi),%r15 jmp *56(%rdi) .endfn longerjmp,globl
2,241
39
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/ffs.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_FFS_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_FFS_H_ #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ int ffs(int) pureconst; int ffsl(long) pureconst; int ffsll(long long) pureconst; #ifdef __GNUC__ #define ffs(u) __builtin_ffs(u) #define ffsl(u) __builtin_ffsl(u) #define ffsll(u) __builtin_ffsll(u) #endif COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_FFS_H_ */
474
19
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/uart.internal.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_UART_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_UART_H_ #define COM1 0x0 #define COM2 0x2 #define COM3 0x4 #define COM4 0x6 #define IRQ3 0x0b /* com2 interrupt number (irq3) */ #define IRQ4 0x0c /* com1 interrupt number (irq4) */ #define UART_DLAB (1 << 7) /* serial line conf mode bit */ #define UART_DLL 0 /* divisor latch register */ #define UART_DLM 1 /* divisor latch register */ #define UART_IIR 2 /* interrupt identification register */ #define UART_LCR 3 /* line control register */ #define UART_MCR 4 /* modem control register */ #define UART_LSR 5 /* line status register */ #define UART_TTYDA (1 << 0) /* data available (rx ready) */ #define UART_TTYOE (1 << 1) /* overrun error */ #define UART_TTYPE (1 << 2) /* parity error */ #define UART_TTYFE (1 << 3) /* framing error */ #define UART_TTYBSR (1 << 4) /* break signal received */ #define UART_TTYTXR (1 << 5) /* serial thr empty (tx ready) */ #define UART_TTYIDL (1 << 6) /* serial thr empty and line idle */ #define UART_TTYEDF (1 << 7) /* erroneous data in fifo */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_UART_H_ */
1,231
27
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/ktens.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2023 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/nexgen32e/nexgen32e.h" const uint64_t kTens[] = { 1ull, 10ull, 100ull, 1000ull, 10000ull, 100000ull, 1000000ull, 10000000ull, 100000000ull, 1000000000ull, 10000000000ull, 100000000000ull, 1000000000000ull, 10000000000000ull, 100000000000000ull, 1000000000000000ull, 10000000000000000ull, 100000000000000000ull, 1000000000000000000ull, 10000000000000000000ull, };
2,295
43
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/setjmp.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" // Saves cpu state. // // @param rdi points to jmp_buf // @return rax 0 when set and !0 when longjmp'd // @returnstwice // @assume system five nexgen32e abi conformant // @note code built w/ microsoft abi compiler can't call this // @see longjmp(), _gclongjmp() setjmp: #ifdef __x86_64__ lea 8(%rsp),%rax mov %rax,(%rdi) mov %rbx,8(%rdi) mov %rbp,16(%rdi) mov %r12,24(%rdi) mov %r13,32(%rdi) mov %r14,40(%rdi) mov %r15,48(%rdi) mov (%rsp),%rax mov %rax,56(%rdi) xor %eax,%eax #elif defined(__aarch64__) stp x19,x20,[x0,#0] stp x21,x22,[x0,#16] stp x23,x24,[x0,#32] stp x25,x26,[x0,#48] stp x27,x28,[x0,#64] stp x29,x30,[x0,#80] mov x2,sp str x2,[x0,#104] stp d8,d9,[x0,#112] stp d10,d11,[x0,#128] stp d12,d13,[x0,#144] stp d14,d15,[x0,#160] mov x0,#0 #endif ret .endfn setjmp,globl .alias setjmp,_setjmp
2,702
60
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/x86compiler.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_X86COMPILER_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_X86COMPILER_H_ /** * @fileoverview x86 cpu feature compile-time requirement detection. * @see -march=native, -mavx2, etc. */ #ifdef __AES__ #define _X86_CC_AES 1 #else #define _X86_CC_AES 0 #endif #ifdef __AVX__ #define _X86_CC_AVX 1 #else #define _X86_CC_AVX 0 #endif #ifdef __AVX2__ #define _X86_CC_AVX2 1 #else #define _X86_CC_AVX2 0 #endif #ifdef __ABM__ #define _X86_CC_ABM 1 #else #define _X86_CC_ABM 0 #endif #ifdef __BMI__ #define _X86_CC_BMI 1 #else #define _X86_CC_BMI 0 #endif #ifdef __BMI2__ #define _X86_CC_BMI2 1 #else #define _X86_CC_BMI2 0 #endif #ifdef __FMA__ #define _X86_CC_FMA 1 #else #define _X86_CC_FMA 0 #endif #ifdef __ADX__ #define _X86_CC_ADX 1 #else #define _X86_CC_ADX 0 #endif #ifdef __PCLMUL__ #define _X86_CC_PCLMUL 1 #else #define _X86_CC_PCLMUL 0 #endif #ifdef __POPCNT__ #define _X86_CC_POPCNT 1 #else #define _X86_CC_POPCNT 0 #endif #ifdef __RDRND__ #define _X86_CC_RDRND 1 #else #define _X86_CC_RDRND 0 #endif #ifdef __RDSEED__ #define _X86_CC_RDSEED 1 #else #define _X86_CC_RDSEED 0 #endif #ifdef __SHA__ #define _X86_CC_SHA 1 #else #define _X86_CC_SHA 0 #endif #ifdef __SSSE3__ #define _X86_CC_SSSE3 1 #else #define _X86_CC_SSSE3 0 #endif #ifdef __SSE__ #define _X86_CC_SSE 1 #else #define _X86_CC_SSE 0 #endif #ifdef __SSE2__ #define _X86_CC_SSE2 1 #else #define _X86_CC_SSE2 0 #endif #ifdef __SSE3__ #define _X86_CC_SSE3 1 #else #define _X86_CC_SSE3 0 #endif #ifdef __SSE4_1__ #define _X86_CC_SSE4_1 1 #else #define _X86_CC_SSE4_1 0 #endif #ifdef __SSE4_2__ #define _X86_CC_SSE4_2 1 #else #define _X86_CC_SSE4_2 0 #endif #ifdef __XSAVE__ #define _X86_CC_XSAVE 1 #else #define _X86_CC_XSAVE 0 #endif #ifdef __CLFLUSHOPT__ #define _X86_CC_CLFLUSHOPT 1 #else #define _X86_CC_CLFLUSHOPT 0 #endif #ifdef __RDPID__ #define _X86_CC_RDPID 1 #else #define _X86_CC_RDPID 0 #endif #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_X86COMPILER_H_ */
1,981
142
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/pcmpstr.inc
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 sw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ // Documentation for Intel(R)'s “Most Complicated Instruction”(TM) // // mnemonic op1 op2 op3 op4 modif f description, notes // ═══════════ ════ ════ ════ ═══ ════════ ═════════════════════════════ // PCMPESTRM XMM0 Vdq Wdq ... o..szapc Explicit Length, Return Mask // PCMPESTRI rCX Vdq Wdq ... o..szapc Explicit Length, Return Index // PCMPISTRM XMM0 Vdq Wdq Ib o..szapc Implicit Length, Return Mask // PCMPISTRI rCX Vdq Wdq Ib o..szapc Implicit Length, Return Index // // CF ← Reset if IntRes2 is equal to zero, set otherwise // ZF ← Set if any byte/word of xmm2/mem128 is null, reset otherwise // SF ← Set if any byte/word of xmm1 is null, reset otherwise // OF ← IntRes2[0] // AF ← Reset // PF ← Reset // // PCMP{E,I}STR{I,M} Control Byte // @see Intel Manual V.2B §4.1.7 // // ┌─0:index of the LEAST significant, set, bit is used // │ regardless of corresponding input element validity // │ intres2 is returned in least significant bits of xmm0 // ├─1:index of the MOST significant, set, bit is used // │ regardless of corresponding input element validity // │ each bit of intres2 is expanded to byte/word // │┌─0:negation of intres1 is for all 16 (8) bits // │├─1:negation of intres1 is masked by reg/mem validity // ││┌─intres1 is negated (1’s complement) // │││┌─mode{equalany,ranges,equaleach,equalordered} // ││││ ┌─issigned // ││││ │┌─is16bit // u│││├┐││ .Lequalordered = 0b00001100 .Lequalorder16 = 0b00001101 .Lequalranges8 = 0b00000100
3,708
55
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/argv2.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2023 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/runtime/runtime.h" #ifndef __x86_64__ char **__argv; #endif /* __x86_64__ */
1,930
25
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/macros.internal.inc
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 sw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" #include "libc/nexgen32e/x86feature.h" // Broadcast byte literal to vector, e.g. // // # xmm0=0x12121212121212121212121212121212 // .bcblit $0x12,%al,%eax,%xmm0 // // @param reg and regSI need to be the same register .macro .bcblit lit:req reg:req regSI:req xmm:req mov \lit,\reg movd \regSI,\xmm pbroadcastb \xmm .endm // Broadcast word literal to vector, e.g. // // # xmm0=0x01230123012301230123012301230123 // .bcwlit $0x123,%ax,%eax,%xmm0 // // @param reg and regSI need to be the same register .macro .bcwlit lit:req reg:req regSI:req xmm:req mov \lit,\reg movd \regSI,\xmm pbroadcastw \xmm .endm // Broadcast int16 from register to vector. .macro .bcwreg regSI:req xmm:req movd \regSI,\xmm pbroadcastw \xmm .endm // Sets all bytes in XMM register to first byte, e.g. // // mov $0x11,%eax // movd %eax,%xmm0 // pbroadcastb %xmm0 // // 11000000000000000000000000000000 // → 11111111111111111111111111111111 // // @param xmm can be %xmm0,%xmm1,etc. .macro pbroadcastb xmm:req #if X86_NEED(AVX2) vpbroadcastb \xmm,\xmm #else punpcklbw \xmm,\xmm punpcklwd \xmm,\xmm pshufd $0,\xmm,\xmm #endif .endm // Sets all words in XMM register to first word, e.g. // // mov $0x1234,%eax // movd %eax,%xmm0 // pbroadcastw %xmm0 // // 12340000000000000000000000000000 // → 12341234123412341234123412341234 // // @param xmm can be %xmm0,%xmm1,etc. .macro pbroadcastw xmm:req #if X86_NEED(AVX2) vpbroadcastw \xmm,\xmm #else punpcklwd \xmm,\xmm pshufd $0,\xmm,\xmm #endif .endm
3,385
90
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/cescapec.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2023 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/str/str.h" /** * Converts byte to word-encoded C string literal representation. */ int _cescapec(int c) { switch ((c &= 255)) { case '\a': return '\\' | 'a' << 8; case '\b': return '\\' | 'b' << 8; case '\t': return '\\' | 't' << 8; case '\n': return '\\' | 'n' << 8; case '\v': return '\\' | 'v' << 8; case '\f': return '\\' | 'f' << 8; case '\r': return '\\' | 'r' << 8; case '"': return '\\' | '"' << 8; case '\'': return '\\' | '\'' << 8; case '\\': return '\\' | '\\' << 8; default: if (' ' <= c && c <= '~') { return c; } else { return '\\' | // ('0' + (c >> 6)) << 8 | // ('0' + ((c >> 3) & 7)) << 16 | // ('0' + (c & 7)) << 24; } } }
2,717
57
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/loadxmm.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .privileged // Loads XMM registers from buffer. // // @param %rdi points to &(forcealign(16) uint8_t[256])[128] // @note modern cpus have out-of-order execution engines _loadxmm: .leafprologue movaps -0x80(%rdi),%xmm0 movaps -0x70(%rdi),%xmm1 movaps -0x60(%rdi),%xmm2 movaps -0x50(%rdi),%xmm3 movaps -0x40(%rdi),%xmm4 movaps -0x30(%rdi),%xmm5 movaps -0x20(%rdi),%xmm6 movaps -0x10(%rdi),%xmm7 movaps 0x00(%rdi),%xmm8 movaps 0x10(%rdi),%xmm9 movaps 0x20(%rdi),%xmm10 movaps 0x30(%rdi),%xmm11 movaps 0x40(%rdi),%xmm12 movaps 0x50(%rdi),%xmm13 movaps 0x60(%rdi),%xmm14 movaps 0x70(%rdi),%xmm15 .leafepilogue .endfn _loadxmm,globl,hidden
2,524
46
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/crc32.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_CRC32_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_CRC32_H_ #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ extern const uint32_t kCrc32cTab[256]; void crc32init(uint32_t[hasatleast 256], uint32_t); uint32_t crc32a(uint32_t, const void *, size_t); uint32_t crc32c(uint32_t, const void *, size_t); COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_CRC32_H_ */
459
15
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/sha1.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ │ │ Copyright 2014 Intel Corporation │ │ │ │ Redistribution and use in source and binary forms, with or without │ │ modification, are permitted provided that the following conditions │ │ are met: │ │ │ │ * Redistributions of source code must retain the above copyright │ │ notice, this list of conditions and the following disclaimer. │ │ * Redistributions in binary form must reproduce the above copyright │ │ notice, this list of conditions and the following disclaimer in │ │ the documentation and/or other materials provided with the │ │ distribution. │ │ * Neither the name of Intel Corporation nor the names of its │ │ contributors may be used to endorse or promote products derived │ │ from this software without specific prior written permission. │ │ │ │ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS │ │ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT │ │ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR │ │ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT │ │ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, │ │ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT │ │ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, │ │ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY │ │ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT │ │ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE │ │ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .ident "\n\ AVX2 SHA-1 (BSD-3 License)\n\ Copyright 2014 Intel Corporation\n" .include "libc/disclaimer.inc" #define CTX %rdi /* arg1 */ #define BUF %rsi /* arg2 */ #define CNT %rdx /* arg3 */ #define REG_A %ecx #define REG_B %esi #define REG_C %edi #define REG_D %eax #define REG_E %edx #define REG_TB %ebx #define REG_TA %r12d #define REG_RA %rcx #define REG_RB %rsi #define REG_RC %rdi #define REG_RD %rax #define REG_RE %rdx #define REG_RTA %r12 #define REG_RTB %rbx #define REG_T1 %r11d #define xmm_mov vmovups #define RND_F1 1 #define RND_F2 2 #define RND_F3 3 .macro REGALLOC .set A, REG_A .set B, REG_B .set C, REG_C .set D, REG_D .set E, REG_E .set TB, REG_TB .set TA, REG_TA .set RA, REG_RA .set RB, REG_RB .set RC, REG_RC .set RD, REG_RD .set RE, REG_RE .set RTA, REG_RTA .set RTB, REG_RTB .set T1, REG_T1 .endm #define HASH_PTR %r9 #define BLOCKS_CTR %r8 #define BUFFER_PTR %r10 #define BUFFER_PTR2 %r13 #define PRECALC_BUF %r14 #define WK_BUF %r15 #define W_TMP %xmm0 #define WY_TMP %ymm0 #define WY_TMP2 %ymm9 # AVX2 variables #define WY0 %ymm3 #define WY4 %ymm5 #define WY08 %ymm7 #define WY12 %ymm8 #define WY16 %ymm12 #define WY20 %ymm13 #define WY24 %ymm14 #define WY28 %ymm15 #define YMM_SHUFB_BSWAP %ymm10 /* * Keep 2 iterations precalculated at a time: * - 80 DWORDs per iteration * 2 */ #define W_SIZE (80*2*2 +16) #define WK(t) ((((t) % 80) / 4)*32 + ( (t) % 4)*4 + ((t)/80)*16 )(WK_BUF) #define PRECALC_WK(t) ((t)*2*2)(PRECALC_BUF) .macro UPDATE_HASH hash, val add \hash, \val mov \val, \hash .endm .macro PRECALC_RESET_WY .set WY_00, WY0 .set WY_04, WY4 .set WY_08, WY08 .set WY_12, WY12 .set WY_16, WY16 .set WY_20, WY20 .set WY_24, WY24 .set WY_28, WY28 .set WY_32, WY_00 .endm .macro PRECALC_ROTATE_WY /* Rotate macros */ .set WY_32, WY_28 .set WY_28, WY_24 .set WY_24, WY_20 .set WY_20, WY_16 .set WY_16, WY_12 .set WY_12, WY_08 .set WY_08, WY_04 .set WY_04, WY_00 .set WY_00, WY_32 /* Define register aliases */ .set WY, WY_00 .set WY_minus_04, WY_04 .set WY_minus_08, WY_08 .set WY_minus_12, WY_12 .set WY_minus_16, WY_16 .set WY_minus_20, WY_20 .set WY_minus_24, WY_24 .set WY_minus_28, WY_28 .set WY_minus_32, WY .endm .macro PRECALC_00_15 .if (i == 0) # Initialize and rotate registers PRECALC_RESET_WY PRECALC_ROTATE_WY .endif /* message scheduling pre-compute for rounds 0-15 */ .if ((i & 7) == 0) /* * blended AVX2 and ALU instruction scheduling * 1 vector iteration per 8 rounds */ vmovdqu (i * 2)(BUFFER_PTR), W_TMP .elseif ((i & 7) == 1) vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\ WY_TMP, WY_TMP .elseif ((i & 7) == 2) vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY .elseif ((i & 7) == 4) vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP .elseif ((i & 7) == 7) vmovdqu WY_TMP, PRECALC_WK(i&~7) PRECALC_ROTATE_WY .endif .endm .macro PRECALC_16_31 /* * message scheduling pre-compute for rounds 16-31 * calculating last 32 w[i] values in 8 XMM registers * pre-calculate K+w[i] values and store to mem * for later load by ALU add instruction * * "brute force" vectorization for rounds 16-31 only * due to w[i]->w[i-3] dependency */ .if ((i & 7) == 0) /* * blended AVX2 and ALU instruction scheduling * 1 vector iteration per 8 rounds */ /* w[i-14] */ vpalignr $8, WY_minus_16, WY_minus_12, WY vpsrldq $4, WY_minus_04, WY_TMP /* w[i-3] */ .elseif ((i & 7) == 1) vpxor WY_minus_08, WY, WY vpxor WY_minus_16, WY_TMP, WY_TMP .elseif ((i & 7) == 2) vpxor WY_TMP, WY, WY vpslldq $12, WY, WY_TMP2 .elseif ((i & 7) == 3) vpslld $1, WY, WY_TMP vpsrld $31, WY, WY .elseif ((i & 7) == 4) vpor WY, WY_TMP, WY_TMP vpslld $2, WY_TMP2, WY .elseif ((i & 7) == 5) vpsrld $30, WY_TMP2, WY_TMP2 vpxor WY, WY_TMP, WY_TMP .elseif ((i & 7) == 7) vpxor WY_TMP2, WY_TMP, WY vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP vmovdqu WY_TMP, PRECALC_WK(i&~7) PRECALC_ROTATE_WY .endif .endm .macro PRECALC_32_79 /* * in SHA-1 specification: * w[i] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) rol 1 * instead we do equal: * w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]) rol 2 * allows more efficient vectorization * since w[i]=>w[i-3] dependency is broken */ .if ((i & 7) == 0) /* * blended AVX2 and ALU instruction scheduling * 1 vector iteration per 8 rounds */ vpalignr $8, WY_minus_08, WY_minus_04, WY_TMP .elseif ((i & 7) == 1) /* W is W_minus_32 before xor */ vpxor WY_minus_28, WY, WY .elseif ((i & 7) == 2) vpxor WY_minus_16, WY_TMP, WY_TMP .elseif ((i & 7) == 3) vpxor WY_TMP, WY, WY .elseif ((i & 7) == 4) vpslld $2, WY, WY_TMP .elseif ((i & 7) == 5) vpsrld $30, WY, WY vpor WY, WY_TMP, WY .elseif ((i & 7) == 7) vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP vmovdqu WY_TMP, PRECALC_WK(i&~7) PRECALC_ROTATE_WY .endif .endm .macro PRECALC r, s .set i, \r .if (i < 40) .set K_XMM, 32*0 .elseif (i < 80) .set K_XMM, 32*1 .elseif (i < 120) .set K_XMM, 32*2 .else .set K_XMM, 32*3 .endif .if (i<32) PRECALC_00_15 \s .elseif (i<64) PRECALC_16_31 \s .elseif (i < 160) PRECALC_32_79 \s .endif .endm .macro ROTATE_STATE .set T_REG, E .set E, D .set D, C .set C, B .set B, TB .set TB, A .set A, T_REG .set T_REG, RE .set RE, RD .set RD, RC .set RC, RB .set RB, RTB .set RTB, RA .set RA, T_REG .endm // Macro relies on saved ROUND_Fx .macro RND_FUN f, r .if (\f == RND_F1) ROUND_F1 \r .elseif (\f == RND_F2) ROUND_F2 \r .elseif (\f == RND_F3) ROUND_F3 \r .endif .endm .macro RR r .set round_id, (\r % 80) .if (round_id == 0) # Precalculate F for first round .set ROUND_FUNC, RND_F1 mov B, TB rorx $(32-30), B, B # b>>>2 andn D, TB, T1 and C, TB xor T1, TB .endif RND_FUN ROUND_FUNC, \r ROTATE_STATE .if (round_id == 18) .set ROUND_FUNC, RND_F2 .elseif (round_id == 38) .set ROUND_FUNC, RND_F3 .elseif (round_id == 58) .set ROUND_FUNC, RND_F2 .endif .set round_id, ( (\r+1) % 80) RND_FUN ROUND_FUNC, (\r+1) ROTATE_STATE .endm .macro ROUND_F1 r add WK(\r), E andn C, A, T1 # ~b&d lea (RE,RTB), E # Add F from the previous round rorx $(32-5), A, TA # T2 = A >>> 5 rorx $(32-30),A, TB # b>>>2 for next round PRECALC (\r) # msg scheduling for next 2 blocks // Calculate F for the next round // (b & c) ^ andn[b, d] and B, A # b&c xor T1, A # F1 = (b&c) ^ (~b&d) lea (RE,RTA), E # E += A >>> 5 .endm .macro ROUND_F2 r add WK(\r), E lea (RE,RTB), E # Add F from the previous round /* Calculate F for the next round */ rorx $(32-5), A, TA # T2 = A >>> 5 .if ((round_id) < 79) rorx $(32-30), A, TB # b>>>2 for next round .endif PRECALC (\r) # msg scheduling for next 2 blocks .if ((round_id) < 79) xor B, A .endif add TA, E # E += A >>> 5 .if ((round_id) < 79) xor C, A .endif .endm .macro ROUND_F3 r add WK(\r), E PRECALC (\r) # msg scheduling for next 2 blocks lea (RE,RTB), E # Add F from the previous round mov B, T1 or A, T1 rorx $(32-5), A, TA # T2 = A >>> 5 rorx $(32-30), A, TB # b>>>2 for next round // Calculate F for the next round // (b and c) or (d and (b or c)) and C, T1 and B, A or T1, A add TA, E # E += A >>> 5 .endm // Add constant only if (%2 > %3) condition met (uses RTA as temp) // %1 + %2 >= %3 ? %4 : 0 .macro ADD_IF_GE a, b, c, d mov \a, RTA add $\d, RTA cmp $\c, \b cmovge RTA, \a .endm // Performs 80 rounds of SHA-1 for multiple blocks with s/w pipelining .macro SHA1_PIPELINED_MAIN_BODY REGALLOC mov (HASH_PTR), A mov 4(HASH_PTR), B mov 8(HASH_PTR), C mov 12(HASH_PTR), D mov 16(HASH_PTR), E mov %rsp, PRECALC_BUF lea (2*4*80+32)(%rsp), WK_BUF // Precalc WK for first 2 blocks ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64 .set i, 0 .rept 160 PRECALC i .set i, i + 1 .endr // Go to next block if needed ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128 ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128 xchg WK_BUF, PRECALC_BUF .balign 32 .L_loop: // code loops through more than one block // we use K_BASE value as a signal of a last block, // it is set below by: cmovae BUFFER_PTR, K_BASE test BLOCKS_CTR, BLOCKS_CTR jnz .L_begin .balign 32 jmp .L_end .balign 32 .L_begin: // process first block // rounds: 0,2,4,6,8 .set j, 0 .rept 5 RR j .set j, j+2 .endr jmp .L_loop0 .L_loop0: // rounds // 10,12,14,16,18 // 20,22,24,26,28 // 30,32,34,36,38 // 40,42,44,46,48 // 50,52,54,56,58 .rept 25 RR j .set j, j+2 .endr // Update Counter */ sub $1, BLOCKS_CTR // Move to the next block only if needed*/ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128 // rounds // 60,62,64,66,68 // 70,72,74,76,78 .rept 10 RR j .set j, j+2 .endr UPDATE_HASH (HASH_PTR), A UPDATE_HASH 4(HASH_PTR), TB UPDATE_HASH 8(HASH_PTR), C UPDATE_HASH 12(HASH_PTR), D UPDATE_HASH 16(HASH_PTR), E test BLOCKS_CTR, BLOCKS_CTR jz .L_loop mov TB, B // process second block // 0+80, 2+80, 4+80, 6+80, 8+80 // 10+80,12+80,14+80,16+80,18+80 .set j, 0 .rept 10 RR j+80 .set j, j+2 .endr jmp .L_loop1 .L_loop1: // rounds // 20+80,22+80,24+80,26+80,28+80 // 30+80,32+80,34+80,36+80,38+80 .rept 10 RR j+80 .set j, j+2 .endr jmp .L_loop2 .L_loop2: // rounds // 40+80,42+80,44+80,46+80,48+80 // 50+80,52+80,54+80,56+80,58+80 .rept 10 RR j+80 .set j, j+2 .endr // update counter sub $1, BLOCKS_CTR // Move to the next block only if needed ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128 jmp .L_loop3 .L_loop3: // rounds // 60+80,62+80,64+80,66+80,68+80 // 70+80,72+80,74+80,76+80,78+80 .rept 10 RR j+80 .set j, j+2 .endr UPDATE_HASH (HASH_PTR), A UPDATE_HASH 4(HASH_PTR), TB UPDATE_HASH 8(HASH_PTR), C UPDATE_HASH 12(HASH_PTR), D UPDATE_HASH 16(HASH_PTR), E /* Reset state for AVX2 reg permutation */ mov A, TA mov TB, A mov C, TB mov E, C mov D, B mov TA, D REGALLOC xchg WK_BUF, PRECALC_BUF jmp .L_loop .balign 32 .L_end: .endm .section .rodata #define K1 0x5a827999 #define K2 0x6ed9eba1 #define K3 0x8f1bbcdc #define K4 0xca62c1d6 .balign 128 K_XMM_AR: .long K1,K1,K1,K1 .long K1,K1,K1,K1 .long K2,K2,K2,K2 .long K2,K2,K2,K2 .long K3,K3,K3,K3 .long K3,K3,K3,K3 .long K4,K4,K4,K4 .long K4,K4,K4,K4 BSWAP_SHUFB_CTL: .long 0x00010203 .long 0x04050607 .long 0x08090a0b .long 0x0c0d0e0f .long 0x00010203 .long 0x04050607 .long 0x08090a0b .long 0x0c0d0e0f .text // Performs Intel® AVX2™ optimized SHA-1 update. // // This implementation is based on the previous SSSE3 release: // Visit http://software.intel.com/en-us/articles/ and refer // to improving-the-performance-of-the-secure-hash-algorithm-1/ // // Updates 20-byte SHA-1 record at start of 'state', from 'input', // for even number of 'blocks' consecutive 64-byte blocks. // // void sha1_transform_avx2(struct sha1_state *state, // const uint8_t *input, // int blocks); // // @param %rdi points to output digest // @param %rsi points to input data // @param %rdx is number of 64-byte blocks to process // @see X86_HAVE(SHA) sha1_transform_avx2: push %rbp mov %rsp,%rbp .profilable push %rbx push %r12 push %r13 push %r14 push %r15 RESERVE_STACK = (W_SIZE*4 + 8+24) /* Align stack */ mov %rsp,%rbx and $~(0x20-1),%rsp push %rbx sub $RESERVE_STACK,%rsp vzeroupper /* Setup initial values */ mov CTX,HASH_PTR mov BUF,BUFFER_PTR mov BUF,BUFFER_PTR2 mov CNT,BLOCKS_CTR xmm_mov BSWAP_SHUFB_CTL(%rip),YMM_SHUFB_BSWAP SHA1_PIPELINED_MAIN_BODY vzeroupper add $RESERVE_STACK,%rsp pop %rsp pop %r15 pop %r14 pop %r13 pop %r12 pop %rbx pop %rbp ret .endfn sha1_transform_avx2,globl
14,809
650
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/bench.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_BENCH_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_BENCH_H_ #include "libc/nexgen32e/rdtsc.h" #if !(__ASSEMBLER__ + __LINKER__ + 0) /** * @fileoverview NexGen32e Microbenchmarking. * * @see X86_HAVE(INVTSC) * @see libc/testlib/bench.h */ #ifdef __x86__ #define __startbench() \ ({ \ uint64_t Ticks; \ asm volatile("lfence\n\t" \ "push\t%%rbx\n\t" \ "cpuid\n\t" \ "pop\t%%rbx\n\t" \ "rdtsc\n\t" \ "shl\t%2,%%rdx\n\t" \ "or\t%%rdx,%0" \ : "=a"(Ticks) \ : "0"(0), "J"(32) \ : "rcx", "rdx", "memory", "cc"); \ Ticks; \ }) #define __endbench() \ ({ \ uint64_t Ticks; \ asm volatile("rdtscp\n\t" \ "shl\t%1,%%rdx\n\t" \ "or\t%%rdx,%%rax\n\t" \ "mov\t%%rax,%0\n\t" \ "xor\t%%eax,%%eax\n\t" \ "push\t%%rbx\n\t" \ "cpuid\n\t" \ "pop\t%%rbx" \ : "=r"(Ticks) \ : "J"(32) \ : "rax", "rcx", "rdx", "memory", "cc"); \ Ticks; \ }) #else #define __startbench() \ ({ \ uint64_t _ts; \ asm volatile("isb" ::: "memory"); \ _ts = rdtsc(); \ asm volatile("isb" ::: "memory"); \ _ts; \ }) #define __endbench() \ ({ \ uint64_t _ts; \ asm volatile("isb" ::: "memory"); \ _ts = rdtsc(); \ asm volatile("isb" ::: "memory"); \ _ts; \ }) #endif #define __startbench_m() mfence_lfence_rdtsc_lfence() #define __endbench_m() __startbench_m() #define __marker() asm("nop") #define __ordered() asm volatile("" ::: "memory") #define __fakeread(X) asm volatile("" : /* no outputs */ : "g"(X)) #define __fakereadwrite(X) \ ({ \ autotype(X) Res = (X); \ asm volatile("" : "=g"(Res) : "0"(X)); \ Res; \ }) #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_BENCH_H_ */
3,082
78
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/x86feature.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_X86FEATURE_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_X86FEATURE_H_ #ifdef __x86_64__ #include "libc/nexgen32e/kcpuids.h" #include "libc/nexgen32e/x86compiler.h" /** * @fileoverview x86 cpu feature detection. */ #define X86_HAVE(FEATURE) _X86_HAVE(X86_##FEATURE) /* clang-format off */ /* --- FEATURE LEAF REG BIT COMPILE-TIME-DEFINE HOOK */ #define X86_ACC 1H, EDX, 29, 0, _ #define X86_ACPI 1H, EDX, 22, 0, _ #define X86_ADX 7H, EBX, 19, _X86_CC_ADX, _ /* broadwell c. 2014 */ #define X86_AES 1H, ECX, 25, _X86_CC_AES, _ /* westmere c. 2010 */ #define X86_APIC 1H, EDX, 9, 0, _ #define X86_ARCH_CAPABILITIES 7H, EDX, 29, 0, _ #define X86_AVX 1H, ECX, 28, _X86_CC_AVX, _ /* sandybridge c. 2012 */ #define X86_AVX2 7H, EBX, 5, _X86_CC_AVX2, _ /* haswell c. 2013 */ #define X86_AVX512BW 7H, EBX, 30, 0, _ #define X86_AVX512CD 7H, EBX, 28, 0, _ #define X86_AVX512DQ 7H, EBX, 17, 0, _ #define X86_AVX512ER 7H, EBX, 27, 0, _ #define X86_AVX512F 7H, EBX, 16, 0, _ #define X86_AVX512IFMA 7H, EBX, 21, 0, _ #define X86_AVX512PF 7H, EBX, 26, 0, _ #define X86_AVX512VBMI 7H, ECX, 1, 0, _ #define X86_AVX512VL 7H, EBX, 31, 0, _ #define X86_AVX512_4FMAPS 7H, EDX, 3, 0, _ #define X86_AVX512_4VNNIW 7H, EDX, 2, 0, _ #define X86_AVX512_BF16 7H, EAX, 5, 0, _ #define X86_AVX512_BITALG 7H, ECX, 12, 0, _ #define X86_AVX512_VBMI2 7H, ECX, 6, 0, _ #define X86_AVX512_VNNI 7H, ECX, 11, 0, _ #define X86_AVX512_VP2INTERSECT 7H, EDX, 8, 0, _ #define X86_AVX512_VPOPCNTDQ 7H, ECX, 14, 0, _ #define X86_BMI 7H, EBX, 3, _X86_CC_BMI, _ /* haswell c. 2013 */ #define X86_BMI2 7H, EBX, 8, _X86_CC_BMI2, _ /* haswell c. 2013 */ #define X86_CID 1H, ECX, 10, 0, _ #define X86_CLDEMOTE 7H, ECX, 25, 0, _ #define X86_CLFLUSH 1H, EDX, 19, _X86_CC_SSE2, _ #define X86_CLFLUSHOPT 7H, EBX, 23, _X86_CC_CLFLUSHOPT, _ /* skylake/zen */ #define X86_CLWB 7H, EBX, 24, 0, _ /* skylake/zen2 */ #define X86_CMOV 1H, EDX, 15, 0, _ #define X86_CQM 7H, EBX, 12, 0, _ #define X86_CX16 1H, ECX, 13, 0, _ #define X86_CX8 1H, EDX, 8, 0, _ #define X86_DCA 1H, ECX, 18, 0, _ #define X86_DE 1H, EDX, 2, 0, _ #define X86_DS 1H, EDX, 21, 0, _ #define X86_DSCPL 1H, ECX, 4, 0, _ #define X86_DTES64 1H, ECX, 2, 0, _ #define X86_ERMS 7H, EBX, 9, 0, _ /* broaadwell c. 2014 */ #define X86_EST 1H, ECX, 7, 0, _ #define X86_F16C 1H, ECX, 29, 0, _ #define X86_FDP_EXCPTN_ONLY 7H, EBX, 6, 0, _ #define X86_FLUSH_L1D 7H, EDX, 28, 0, _ #define X86_FMA 1H, ECX, 12, _X86_CC_FMA, _ /* haswell c. 2013 */ #define X86_FPU 1H, EDX, 0, 0, _ #define X86_FSGSBASE 7H, EBX, 0, 0, _ #define X86_FXSR 1H, EDX, 24, 0, _ #define X86_GBPAGES 80000001H, EDX, 26, 0, _ #define X86_GFNI 7H, ECX, 8, 0, _ #define X86_HLE 7H, EBX, 4, 0, _ #define X86_HT 1H, EDX, 28, 0, _ #define X86_HYPERVISOR 1H, ECX, 31, 0, _ #define X86_IA64 1H, EDX, 30, 0, _ #define X86_INTEL_PT 7H, EBX, 25, 0, _ #define X86_INTEL_STIBP 7H, EDX, 27, 0, _ #define X86_INVPCID 1H, EBX, 10, 0, _ #define X86_INVTSC 80000007H, EDX, 8, _X86_CC_POPCNT, _ /* i.e. not a K8 */ #define X86_LA57 7H, ECX, 16, 0, _ #define X86_LAHF_LM 80000001H, ECX, 0, 0, _ #define X86_LM 80000001H, EDX, 29, 0, _ #define X86_MCA 1H, EDX, 14, 0, _ #define X86_MCE 1H, EDX, 7, 0, _ #define X86_MD_CLEAR 7H, EDX, 10, 0, _ #define X86_MMX 1H, EDX, 23, 0, _ #define X86_MOVBE 1H, ECX, 22, 0, _ #define X86_MOVDIR64B 7H, ECX, 28, 0, _ #define X86_MOVDIRI 7H, ECX, 27, 0, _ #define X86_MP 80000001H, EDX, 19, 0, _ #define X86_MPX 7H, EBX, 14, 0, _ #define X86_MSR 1H, EDX, 5, 0, _ #define X86_MTRR 1H, EDX, 12, 0, _ #define X86_MWAIT 1H, ECX, 3, 0, _ #define X86_NX 80000001H, EDX, 20, 0, _ #define X86_OSPKE 7H, ECX, 4, 0, _ #define X86_OSXSAVE 1H, ECX, 27, 0, _ #define X86_PAE 1H, EDX, 6, 0, _ #define X86_PAT 1H, EDX, 16, 0, _ #define X86_PBE 1H, EDX, 31, 0, _ #define X86_PCID 1H, ECX, 17, 0, _ #define X86_PCLMUL 1H, ECX, 1, _X86_CC_PCLMUL, _ /* westmere c. 2010 */ #define X86_PCONFIG 7H, EDX, 18, 0, _ #define X86_PDCM 1H, ECX, 15, 0, _ #define X86_PGE 1H, EDX, 13, 0, _ #define X86_PKU 7H, ECX, 3, 0, _ #define X86_PN 1H, EDX, 18, 0, _ #define X86_POPCNT 1H, ECX, 23, _X86_CC_POPCNT, _ /* nehalem c. 2008 */ #define X86_PSE 1H, EDX, 3, 0, _ #define X86_PSE36 1H, EDX, 17, 0, _ #define X86_RDPID 7H, ECX, 22, _X86_CC_RDPID, _ /* cannonlake c. 2018 */ #define X86_RDRND 1H, ECX, 30, _X86_CC_RDRND, _ /* ivybridge c. 2012 */ #define X86_RDSEED 7H, EBX, 18, _X86_CC_RDSEED, _ /* broadwell c. 2014 */ #define X86_RDTSCP 80000001H, EDX, 27, 0, _ #define X86_RDT_A 7H, EBX, 15, 0, _ #define X86_RTM 7H, EBX, 11, 0, _ #define X86_SDBG 1H, ECX, 11, 0, _ #define X86_SELFSNOOP 1H, EDX, 27, 0, _ #define X86_SEP 1H, EDX, 11, 0, _ #define X86_SHA 7H, EBX, 29, _X86_CC_SHA, _ /* goldmont (2016) */ #define X86_SMAP 7H, EBX, 20, 0, _ #define X86_SMEP 7H, EBX, 7, 0, _ #define X86_SMX 1H, ECX, 6, 0, _ #define X86_SPEC_CTRL 7H, EDX, 26, 0, _ #define X86_SPEC_CTRL_SSBD 7H, EDX, 31, 0, _ #define X86_SSE 1H, EDX, 25, _X86_CC_SSE, _ /* pentium c. 1999 */ #define X86_SSE2 1H, EDX, 26, _X86_CC_SSE2, _ /* pentium c. 2001 */ #define X86_SSE3 1H, ECX, 0, _X86_CC_SSE3, _ /* k8 c. 2005 */ #define X86_SSE4_1 1H, ECX, 19, _X86_CC_SSE4_1, _ /* core c. 2006 */ #define X86_SSE4_2 1H, ECX, 20, _X86_CC_SSE4_2, _ /* nehalem c. 2008 */ #define X86_SSSE3 1H, ECX, 9, _X86_CC_SSSE3, _ /* westmere c. 2010 */ #define X86_SYSCALL 80000001H, EDX, 11, 0, _ #define X86_TM2 1H, ECX, 8, 0, _ #define X86_TME 7H, ECX, 13, 0, _ #define X86_TSC 1H, EDX, 4, 0, _ #define X86_TSC_ADJUST 7H, EBX, 1, 0, _ #define X86_TSC_DEADLINE_TIMER 1H, ECX, 24, 0, _ #define X86_TSX_FORCE_ABORT 7H, EDX, 13, 0, _ #define X86_UMIP 7H, ECX, 2, 0, _ #define X86_VAES 7H, ECX, 9, 0, _ #define X86_VME 1H, EDX, 1, 0, _ #define X86_VMX 1H, ECX, 5, 0, _ #define X86_VPCLMULQDQ 7H, ECX, 10, 0, _ #define X86_WAITPKG 7H, ECX, 5, 0, _ #define X86_X2APIC 1H, ECX, 21, 0, _ #define X86_XSAVE 1H, ECX, 26, _X86_CC_XSAVE, _ /* sandybridge c. 2012 */ #define X86_XTPR 1H, ECX, 14, 0, _ #define X86_ZERO_FCS_FDS 7H, EBX, 13, 0, _ #define X86_JIT 80000001H, ECX, 31, 0, _ /* IsGenuineBlink() */ /* clang-format on */ /* AMD specific features */ #define X86_ABM 80000001H, ECX, 5, _X86_CC_ABM, _ #define X86_3DNOW 80000001H, EDX, 31, 0, _ #define X86_3DNOWEXT 80000001H, EDX, 30, 0, _ #define X86_3DNOWPREFETCH 80000001H, ECX, 8, 0, _ #define X86_BPEXT 80000001H, ECX, 26, 0, _ #define X86_CMP_LEGACY 80000001H, ECX, 1, 0, _ #define X86_CR8_LEGACY 80000001H, ECX, 4, 0, _ #define X86_EXTAPIC 80000001H, ECX, 3, 0, _ #define X86_FMA4 80000001H, ECX, 16, 0, _ #define X86_FXSR_OPT 80000001H, EDX, 25, 0, _ #define X86_IBS 80000001H, ECX, 10, 0, _ #define X86_LWP 80000001H, ECX, 15, 0, _ #define X86_MISALIGNSSE 80000001H, ECX, 7, 0, _ #define X86_MMXEXT 80000001H, EDX, 22, 0, _ #define X86_MWAITX 80000001H, ECX, 29, 0, _ #define X86_NODEID_MSR 80000001H, ECX, 19, 0, _ #define X86_OSVW 80000001H, ECX, 9, 0, _ #define X86_OVERFLOW_RECOV 80000007H, EBX, 0, 0, _ #define X86_PERFCTR_CORE 80000001H, ECX, 23, 0, _ #define X86_PERFCTR_LLC 80000001H, ECX, 28, 0, _ #define X86_PERFCTR_NB 80000001H, ECX, 24, 0, _ #define X86_PTSC 80000001H, ECX, 27, 0, _ #define X86_SKINIT 80000001H, ECX, 12, 0, _ #define X86_SMCA 80000007H, EBX, 3, 0, _ #define X86_SSE4A 80000001H, ECX, 6, 0, _ #define X86_SUCCOR 80000007H, EBX, 1, 0, _ #define X86_SVM 80000001H, ECX, 2, 0, _ #define X86_TBM 80000001H, ECX, 21, 0, _ #define X86_TCE 80000001H, ECX, 17, 0, _ #define X86_TOPOEXT 80000001H, ECX, 22, 0, _ #define X86_WDT 80000001H, ECX, 13, 0, _ #define X86_XOP 80000001H, ECX, 11, 0, _ /* Defined but not loaded by kCpuids.S */ #define X86_ARAT 6H, EAX, 2, 0, _ #define X86_AVIC 8000000AH, EDX, 13, 0, _ #define X86_CLZERO 80000008H, EBX, 0, 0, _ #define X86_DECODEASSISTS 8000000AH, EDX, 7, 0, _ #define X86_DTHERM 6H, EAX, 0, 0, _ #define X86_FLUSHBYASID 8000000AH, EDX, 6, 0, _ #define X86_HWP 6H, EAX, 7, 0, _ #define X86_HWP_ACT_WINDOW 6H, EAX, 9, 0, _ #define X86_HWP_EPP 6H, EAX, 10, 0, _ #define X86_HWP_NOTIFY 6H, EAX, 8, 0, _ #define X86_HWP_PKG_REQ 6H, EAX, 11, 0, _ #define X86_IBPB 80000008H, EBX, 12, 0, _ #define X86_IBRS 80000008H, EBX, 14, 0, _ #define X86_IDA 6H, EAX, 1, 0, _ #define X86_IRPERF 80000008H, EBX, 1, 0, _ #define X86_LBRV 8000000AH, EDX, 1, 0, _ #define X86_NPT 8000000AH, EDX, 0, 0, _ #define X86_NRIPS 8000000AH, EDX, 3, 0, _ #define X86_PAUSEFILTER 8000000AH, EDX, 10, 0, _ #define X86_PFTHRESHOLD 8000000AH, EDX, 12, 0, _ #define X86_PLN 6H, EAX, 4, 0, _ #define X86_PTS 6H, EAX, 6, 0, _ #define X86_SSBD 80000008H, EBX, 24, 0, _ #define X86_SSB_NO 80000008H, EBX, 26, 0, _ #define X86_STIBP 80000008H, EBX, 15, 0, _ #define X86_STIBP_ALWAYS_ON 80000008H, EBX, 17, 0, _ #define X86_SVML 8000000AH, EDX, 2, 0, _ #define X86_TSCRATEMSR 8000000AH, EDX, 4, 0, _ #define X86_VGIF 8000000AH, EDX, 16, 0, _ #define X86_VIRT_SSBD 80000008H, EBX, 25, 0, _ #define X86_VMCBCLEAN 8000000AH, EDX, 5, 0, _ #define X86_V_VMSAVE_VMLOAD 8000000AH, EDX, 15, 0, _ #define X86_WBNOINVD 80000008H, EBX, 9, 0, _ #define X86_XGETBV1 DH, EAX, 2, 0, _ #define X86_XSAVEC DH, EAX, 1, 0, _ #define X86_XSAVEERPTR 80000008H, EBX, 2, 0, _ #define X86_XSAVEOPT DH, EAX, 0, 0, _ #define X86_XSAVES DH, EAX, 3, 0, _ #define X86_NEED(FEATURE) _X86_NEED(X86_##FEATURE) #define X86_WORD(FEATURE) _X86_WORD(X86_##FEATURE) #define X86_LEAF(FEATURE) _X86_LEAF(X86_##FEATURE) #define X86_REG(FEATURE) _X86_REG(X86_##FEATURE) #define X86_BIT(FEATURE) _X86_BIT(X86_##FEATURE) #define _X86_HAVE(FEATURE) __X86_HAVE(FEATURE) #define _X86_NEED(FEATURE) __X86_NEED(FEATURE) #define _X86_WORD(FEATURE) __X86_WORD(FEATURE) #define _X86_LEAF(FEATURE) __X86_LEAF(FEATURE) #define _X86_REG(FEATURE) __X86_REG(FEATURE) #define _X86_BIT(FEATURE) __X86_BIT(FEATURE) #define __X86_HAVE(LEAF, REG, BIT, MANDATORY, HOOK) \ ___X86_HAVE(LEAF, REG, BIT, MANDATORY, _X86_HOOK_##HOOK) #define __X86_NEED(LEAF, REG, BIT, MANDATORY, HOOK) MANDATORY #define __X86_WORD(LEAF, REG, BIT, MANDATORY, HOOK) KCPUIDS(LEAF, REG) #define __X86_LEAF(LEAF, REG, BIT, MANDATORY, HOOK) LEAF #define __X86_REG(LEAF, REG, BIT, MANDATORY, HOOK) REG #define __X86_BIT(LEAF, REG, BIT, MANDATORY, HOOK) BIT #ifndef __ASSEMBLER__ #define ___X86_HAVE(LEAF, REG, BIT, MANDATORY, HOOK) \ HOOK(!!(MANDATORY || KCPUIDS(LEAF, REG) & (1u << BIT))) #else #define ___X86_HAVE(LEAF, REG, BIT, MANDATORY, HOOK) \ $1 << (BIT % 8), BIT / 8 + KCPUIDS(LEAF, REG) #endif #define _X86_HOOK__(X) X #else #define X86_HAVE(FEATURE) 0 #define X86_NEED(FEATURE) 0 #endif /* __x86_64__ */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_X86FEATURE_H_ */
15,962
257
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/envp2.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2023 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/runtime/runtime.h" #ifndef __x86_64__ char **__envp; #endif /* __x86_64__ */
1,930
25
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/vidya.internal.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_VIDYA_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_VIDYA_H_ #define VIDYA_ROWS 25 #define VIDYA_COLUMNS 80 #define VIDYA_SIZE (VIDYA_ROWS * VIDYA_COLUMNS * 2) #define VIDYA_MODE_MDA 7 #define VIDYA_MODE_CGA 3 #define VIDYA_ADDR_MDA 0xb0000 #define VIDYA_ADDR_CGA 0xb8000 #define VIDYA_ATTR_NORMAL 0x07 /* cozy default for both mda and cga */ #define VIDYA_REWIND ~0x7fff /* derived from mode addr min. lzcnt */ #define VIDYA_SERVICE 0x10 #define VIDYA_SET_MODE 0x0003 #define VIDYA_SET_CURSOR 0x0100 #define VIDYA_SET_CURSOR_NONE 0x2000 #define VIDYA_SET_BLINKING 0x1003 #define VIDYA_SET_BLINKING_NONE 0x0000 #if !(__ASSEMBLER__ + __LINKER__ + 0) enum VidyaMode { kVidyaModeMda = VIDYA_MODE_MDA, kVidyaModeCga = VIDYA_MODE_CGA }; enum VidyaColor { kVidyaColorBlack = 0x0, kVidyaColorBlue = 0x1, kVidyaColorGreen = 0x2, kVidyaColorCyan = 0x3, kVidyaColorRed = 0x4, kVidyaColorMagenta = 0x5, kVidyaColorBrown = 0x6, kVidyaColorLightGray = 0x7, kVidyaColorDarkGray = 0x8, kVidyaColorLightBlue = 0x9, kVidyaColorLightGreen = 0xa, kVidyaColorLightCyan = 0xb, kVidyaColorLightRed = 0xc, kVidyaColorLightMagenta = 0xd, kVidyaColorYellow = 0xe, kVidyaColorWhite = 0xf }; struct thatispacked VidyaCell { unsigned glyph : 8; /* IBM Code Page 437 */ union VidyaAttr { enum { kVidyaAttrBlank = 0x00, kVidyaAttrNormal = VIDYA_ATTR_NORMAL, kVidyaAttrMdaFlipped = 0x70, kVidyaAttrMdaFlippedFaded = 0x78, kVidyaAttrMdaFlippedIntense = 0xf0, kVidyaAttrMdaFlippedFadedIntense = 0xf8 } preset : 8; struct VidyaTextDecoration { /* MDA Only */ unsigned underline : 1; unsigned __ignore1 : 1; unsigned bold : 1; unsigned __ignore2 : 3; unsigned intense : 1; } decoration; struct { /* CGA Only */ enum VidyaColor fg : 4; enum VidyaColor bg : 4; } color; } attr; }; typedef union VidyaAttr VidyaAttr; typedef struct VidyaCell VidyaCell; typedef struct VidyaCell VidyaPage[VIDYA_ROWS][VIDYA_COLUMNS]; __far VidyaPage *vinit(enum VidyaMode mode); __far VidyaPage *vcls(__far VidyaCell *pos); __far VidyaCell *vputc(__far VidyaCell *pos, int c); __far VidyaCell *vputs(__far VidyaCell *pos, const char *str); __far VidyaCell *vtput(__far VidyaCell *pos, const void *data, size_t size); __far VidyaCell *vscroll(__far VidyaCell *pos, size_t bytes); #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_VIDYA_H_ */
2,626
84
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/fentry.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .real .code16 # ∩ .code32 ∩ .code64 // Function entry hook stub. // // @note cc -pg -mfentry adds this to the start of every function // @see libc/log/shadowargs.ncabi.c // @mode long,legacy,real __fentry__: ret .endfn __fentry__,weak
2,112
31
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/kcpuids.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_KCPUIDS_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_KCPUIDS_H_ #define KCPUIDS_0H 0 #define KCPUIDS_1H 1 #define KCPUIDS_2H 2 #define KCPUIDS_7H 3 #define KCPUIDS_80000001H 4 #define KCPUIDS_80000007H 5 #define KCPUIDS_16H 6 #define KCPUIDS_LEN 7 #define KCPUIDS_6H -1 /* TBD: Thermal and Power Management */ #define KCPUIDS_DH -1 /* TBD: Extended state features */ #define KCPUIDS_80000008H -1 /* TBD: AMD Miscellaneous */ #define KCPUIDS_8000000AH -1 /* TBD: AMD SVM */ #define KCPUIDS_EAX 0 #define KCPUIDS_EBX 1 #define KCPUIDS_ECX 2 #define KCPUIDS_EDX 3 #define KCPUIDS(LEAF, REG) _KCPUIDS(LEAF, REG) #ifdef __ASSEMBLER__ #define _KCPUIDS(LEAF, REG) KCPUIDS_##LEAF * 16 + KCPUIDS_##REG * 4 #else #define _KCPUIDS(LEAF, REG) kCpuids[KCPUIDS_##LEAF][KCPUIDS_##REG] #endif #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ extern const unsigned kCpuids[KCPUIDS_LEN][4]; COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_KCPUIDS_H_ */
1,093
37
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/sha.h
#ifndef COSMOPOLITAN_LIBC_NEXGEN32E_SHA_H_ #define COSMOPOLITAN_LIBC_NEXGEN32E_SHA_H_ #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ void sha1_transform_avx2(uint32_t[hasatleast 5], const void *, unsigned); void sha1_transform_ni(uint32_t[hasatleast 5], const void *, unsigned); void sha256_transform_rorx(uint32_t[hasatleast 8], const void *, unsigned); void sha256_transform_ni(uint32_t[hasatleast 8], const void *, unsigned); COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_NEXGEN32E_SHA_H_ */
559
14
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/program_invocation_name.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 sw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .initbss 300,_init_program_invocation_name // Supplies argv[0] the GNU way. // // If argv[0] isn't supplied, this value will be null. // // @see program_invocation_short_name // @see GetProgramExecutableName() program_invocation_name: .quad 0 .endobj program_invocation_name,globl .previous .init.start 300,_init_program_invocation_name mov (%r13),%rax stosq .init.end 300,_init_program_invocation_name
2,283
37
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/rldecode.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 sw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .text.startup // Thirteen byte decompressor. // // @param di points to output buffer // @param si points to uint8_t {len₁,byte₁}, ..., {0,0} // @mode long,legacy,real rldecode: .leafprologue .profilable xor %ecx,%ecx 0: lodsb xchg %al,%cl lodsb jrcxz 2f rep stosb jmp 0b 2: .leafepilogue .endfn rldecode,globl
2,194
39
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/ksha512.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2022 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .rodata .balign 64 kSha512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .endobj kSha512,globl
3,724
65
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/sha256ni.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ │ │ Copyright 2015 Intel Corporation │ │ │ │ Redistribution and use in source and binary forms, with or without │ │ modification, are permitted provided that the following conditions │ │ are met: │ │ │ │ * Redistributions of source code must retain the above copyright │ │ notice, this list of conditions and the following disclaimer. │ │ * Redistributions in binary form must reproduce the above copyright │ │ notice, this list of conditions and the following disclaimer in │ │ the documentation and/or other materials provided with the │ │ distribution. │ │ * Neither the name of Intel Corporation nor the names of its │ │ contributors may be used to endorse or promote products derived │ │ from this software without specific prior written permission. │ │ │ │ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS │ │ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT │ │ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR │ │ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT │ │ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, │ │ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT │ │ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, │ │ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY │ │ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT │ │ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE │ │ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" .text .balign 32 .ident "\n\ Intel SHA-NI (BSD-3 License)\n\ Copyright 2015 Intel Corporation\n\ Sean Gulley <[email protected]>\n\ Tim Chen <[email protected]>\n" .include "libc/disclaimer.inc" #define DIGEST_PTR %rdi /* 1st arg */ #define DATA_PTR %rsi /* 2nd arg */ #define NUM_BLKS %rdx /* 3rd arg */ #define SHA256CONSTANTS %rax #define MSG %xmm0 #define STATE0 %xmm1 #define STATE1 %xmm2 #define MSGTMP0 %xmm3 #define MSGTMP1 %xmm4 #define MSGTMP2 %xmm5 #define MSGTMP3 %xmm6 #define MSGTMP4 %xmm7 #define SHUF_MASK %xmm8 #define ABEF_SAVE %xmm9 #define CDGH_SAVE %xmm10 // Performs Intel® SHA-NI™ optimized SHA-256 update. // // The function takes a pointer to the current hash values, a // pointer to the input data, and a number of 64 byte blocks to // process. Once all blocks have been processed, the digest pointer // is updated with the resulting hash value. The function only // processes complete blocks, there is no functionality to store // partial blocks. All message padding and hash value // initialization must be done outside the update function. // // The indented lines in the loop are instructions related to // rounds processing. The non-indented lines are instructions // related to the message schedule. // // void sha256_transform_ni(uint32_t digest[static 8], // const void *data, // int32_t numBlocks); // // @param %rdi points to output digest // @param %rsi points to input data // @param %rdx is number of blocks to process // @see X86_HAVE(SHA) sha256_transform_ni: .leafprologue .profilable shl $6,NUM_BLKS # convert to bytes jz .Ldone_hash add DATA_PTR,NUM_BLKS # pointer to end of data // Load initial hash values // Need to reorder these appropriately // DCBA, HGFE -> ABEF, CDGH movdqu 0*16(DIGEST_PTR),STATE0 movdqu 1*16(DIGEST_PTR),STATE1 pshufd $0xB1,STATE0,STATE0 # CDAB pshufd $0x1B,STATE1,STATE1 # EFGH movdqa STATE0,MSGTMP4 palignr $8,STATE1,STATE0 # ABEF pblendw $0xF0,MSGTMP4,STATE1 # CDGH movdqa PSHUFFLE_BYTE_FLIP_MASK(%rip),SHUF_MASK lea kSha256(%rip),SHA256CONSTANTS .Lloop0: // Save hash values for addition after rounds movdqa STATE0,ABEF_SAVE movdqa STATE1,CDGH_SAVE // Rounds 0-3 movdqu 0*16(DATA_PTR),MSG pshufb SHUF_MASK,MSG movdqa MSG,MSGTMP0 paddd 0*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 // Rounds 4-7 movdqu 1*16(DATA_PTR),MSG pshufb SHUF_MASK,MSG movdqa MSG,MSGTMP1 paddd 1*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 sha256msg1 MSGTMP1,MSGTMP0 // Rounds 8-11 movdqu 2*16(DATA_PTR),MSG pshufb SHUF_MASK,MSG movdqa MSG,MSGTMP2 paddd 2*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 sha256msg1 MSGTMP2,MSGTMP1 // Rounds 12-15 movdqu 3*16(DATA_PTR),MSG pshufb SHUF_MASK,MSG movdqa MSG,MSGTMP3 paddd 3*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 movdqa MSGTMP3,MSGTMP4 palignr $4,MSGTMP2,MSGTMP4 paddd MSGTMP4,MSGTMP0 sha256msg2 MSGTMP3,MSGTMP0 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 sha256msg1 MSGTMP3,MSGTMP2 // Rounds 16-19 movdqa MSGTMP0,MSG paddd 4*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 movdqa MSGTMP0,MSGTMP4 palignr $4,MSGTMP3,MSGTMP4 paddd MSGTMP4,MSGTMP1 sha256msg2 MSGTMP0,MSGTMP1 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 sha256msg1 MSGTMP0,MSGTMP3 // Rounds 20-23 movdqa MSGTMP1,MSG paddd 5*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 movdqa MSGTMP1,MSGTMP4 palignr $4,MSGTMP0,MSGTMP4 paddd MSGTMP4,MSGTMP2 sha256msg2 MSGTMP1,MSGTMP2 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 sha256msg1 MSGTMP1,MSGTMP0 // Rounds 24-27 movdqa MSGTMP2,MSG paddd 6*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 movdqa MSGTMP2,MSGTMP4 palignr $4,MSGTMP1,MSGTMP4 paddd MSGTMP4,MSGTMP3 sha256msg2 MSGTMP2,MSGTMP3 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 sha256msg1 MSGTMP2,MSGTMP1 // Rounds 28-31 movdqa MSGTMP3,MSG paddd 7*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 movdqa MSGTMP3,MSGTMP4 palignr $4,MSGTMP2,MSGTMP4 paddd MSGTMP4,MSGTMP0 sha256msg2 MSGTMP3,MSGTMP0 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 sha256msg1 MSGTMP3,MSGTMP2 // Rounds 32-35 movdqa MSGTMP0,MSG paddd 8*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 movdqa MSGTMP0,MSGTMP4 palignr $4,MSGTMP3,MSGTMP4 paddd MSGTMP4,MSGTMP1 sha256msg2 MSGTMP0,MSGTMP1 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 sha256msg1 MSGTMP0,MSGTMP3 // Rounds 36-39 movdqa MSGTMP1,MSG paddd 9*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 movdqa MSGTMP1,MSGTMP4 palignr $4,MSGTMP0,MSGTMP4 paddd MSGTMP4,MSGTMP2 sha256msg2 MSGTMP1,MSGTMP2 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 sha256msg1 MSGTMP1,MSGTMP0 // Rounds 40-43 movdqa MSGTMP2,MSG paddd 10*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 movdqa MSGTMP2,MSGTMP4 palignr $4,MSGTMP1,MSGTMP4 paddd MSGTMP4,MSGTMP3 sha256msg2 MSGTMP2,MSGTMP3 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 sha256msg1 MSGTMP2,MSGTMP1 // Rounds 44-47 movdqa MSGTMP3,MSG paddd 11*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 movdqa MSGTMP3,MSGTMP4 palignr $4,MSGTMP2,MSGTMP4 paddd MSGTMP4,MSGTMP0 sha256msg2 MSGTMP3,MSGTMP0 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 sha256msg1 MSGTMP3,MSGTMP2 // Rounds 48-51 movdqa MSGTMP0,MSG paddd 12*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 movdqa MSGTMP0,MSGTMP4 palignr $4,MSGTMP3,MSGTMP4 paddd MSGTMP4,MSGTMP1 sha256msg2 MSGTMP0,MSGTMP1 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 sha256msg1 MSGTMP0,MSGTMP3 // Rounds 52-55 movdqa MSGTMP1,MSG paddd 13*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 movdqa MSGTMP1,MSGTMP4 palignr $4,MSGTMP0,MSGTMP4 paddd MSGTMP4,MSGTMP2 sha256msg2 MSGTMP1,MSGTMP2 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 // Rounds 56-59 movdqa MSGTMP2,MSG paddd 14*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 movdqa MSGTMP2,MSGTMP4 palignr $4,MSGTMP1,MSGTMP4 paddd MSGTMP4,MSGTMP3 sha256msg2 MSGTMP2,MSGTMP3 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 // Rounds 60-63 movdqa MSGTMP3,MSG paddd 15*16(SHA256CONSTANTS),MSG sha256rnds2 STATE0,STATE1 pshufd $0x0E,MSG,MSG sha256rnds2 STATE1,STATE0 // Add current hash values with previously saved paddd ABEF_SAVE,STATE0 paddd CDGH_SAVE,STATE1 // Increment data pointer and loop if more to process add $64,DATA_PTR cmp NUM_BLKS,DATA_PTR jne .Lloop0 // Write hash values back in the correct order pshufd $0x1B,STATE0,STATE0 # FEBA pshufd $0xB1,STATE1,STATE1 # DCHG movdqa STATE0,MSGTMP4 pblendw $0xF0,STATE1,STATE0 # DCBA palignr $8,MSGTMP4,STATE1 # HGFE movdqu STATE0,0*16(DIGEST_PTR) movdqu STATE1,1*16(DIGEST_PTR) .Ldone_hash: .leafepilogue .endfn sha256_transform_ni,globl .section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK,"aM",@progbits,16 .balign 16 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203 .endobj PSHUFFLE_BYTE_FLIP_MASK
10,341
319
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/argv.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 sw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" #include "libc/notice.inc" .initbss 300,_init_argv // Global variable holding _start(argv) parameter. __argv: .quad 0 .endobj __argv,globl .previous .init.start 300,_init_argv mov %r13,%rax stosq .init.end 300,_init_argv
2,100
32
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/pid.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2023 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/runtime/internal.h" int __pid;
1,883
22
jart/cosmopolitan
false
cosmopolitan/libc/nexgen32e/environ.S
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│ │vi: set et ft=asm ts=8 sw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/macros.internal.h" #include "libc/notice.inc" // Environment variable pointer list. .bss .balign 8 environ: .quad 0 .endobj environ,globl .previous .init.start 300,_init_environ mov %r14,environ(%rip) .init.end 300,_init_environ
2,090
33
jart/cosmopolitan
false
cosmopolitan/libc/linux/exit.h
#ifndef COSMOPOLITAN_LIBC_LINUX_EXIT_H_ #define COSMOPOLITAN_LIBC_LINUX_EXIT_H_ #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ forceinline wontreturn long LinuxExit(long rc) { asm volatile("syscall" : /* no outputs */ : "a"(0xE7), "D"(rc) : "memory"); unreachable; } COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_LIBC_LINUX_EXIT_H_ */
443
17
jart/cosmopolitan
false