From bac773b4914dfc374671ba4be144424fa598e0da Mon Sep 17 00:00:00 2001 From: liuqiang Date: Fri, 27 May 2022 15:49:45 +0800 Subject: [PATCH] opt: remove branch in copied escaped chars --- internal/native/avx/native_amd64.s | 4198 ++++++++-------- internal/native/avx/native_subr_amd64.go | 26 +- internal/native/avx2/native_amd64.s | 5315 +++++++++++---------- internal/native/avx2/native_subr_amd64.go | 26 +- native/parsing.c | 20 +- 5 files changed, 5114 insertions(+), 4471 deletions(-) diff --git a/internal/native/avx/native_amd64.s b/internal/native/avx/native_amd64.s index 5acbc2c9a..4f9b9fba9 100644 --- a/internal/native/avx/native_amd64.s +++ b/internal/native/avx/native_amd64.s @@ -274,7 +274,7 @@ LBB2_8: LONG $0x4ff56941; WORD $0x1293; BYTE $0x00 // imull $1217359, %r13d, %esi MOVQ R13, AX SHLQ $4, AX - LONG $0x6f0d8d48; WORD $0x0080; BYTE $0x00 // leaq $32879(%rip), %rcx /* _DOUBLE_POW5_INV_SPLIT(%rip) */ + LONG $0x560d8d48; WORD $0x0084; BYTE $0x00 // leaq $33878(%rip), %rcx /* _DOUBLE_POW5_INV_SPLIT(%rip) */ MOVQ R10, R12 ORQ $2, R12 MOVQ 0(AX)(CX*1), R11 @@ -360,7 +360,7 @@ LBB2_22: SHRL $19, SI MOVLQSX AX, DI SHLQ $4, DI - LONG $0xa21d8d4c; WORD $0x0094; BYTE $0x00 // leaq $38050(%rip), %r11 /* _DOUBLE_POW5_SPLIT(%rip) */ + LONG $0x891d8d4c; WORD $0x0098; BYTE $0x00 // leaq $39049(%rip), %r11 /* _DOUBLE_POW5_SPLIT(%rip) */ MOVQ R10, CX ORQ $2, CX MOVQ 0(DI)(R11*1), R9 @@ -759,7 +759,7 @@ LBB2_67: LEAQ 1(R12), BX MOVQ BX, SI MOVL R15, DX - LONG $0x0049f5e8; BYTE $0x00 // callq _print_mantissa + LONG $0x004dc5e8; BYTE $0x00 // callq _print_mantissa MOVB 1(R12), AX MOVB AX, 0(R12) MOVL $1, AX @@ -788,7 +788,7 @@ LBB2_74: LEAL 0(CX)(CX*1), AX LEAL 0(AX)(AX*4), AX SUBL AX, R14 - LONG $0xb2058d48; WORD $0x00a3; BYTE $0x00 // leaq $41906(%rip), %rax /* _Digits(%rip) */ + LONG $0x99058d48; WORD $0x00a7; BYTE $0x00 // leaq $42905(%rip), %rax /* _Digits(%rip) */ MOVWLZX 0(AX)(CX*2), AX MOVL BX, CX MOVW AX, 0(R12)(CX*1) @@ -823,7 +823,7 @@ LBB2_75: CMPL R14, $10 JL LBB2_77 MOVLQSX R14, AX - LONG $0x490d8d48; WORD $0x00a3; BYTE $0x00 // leaq $41801(%rip), %rcx /* _Digits(%rip) */ + LONG $0x300d8d48; WORD $0x00a7; BYTE $0x00 // leaq $42800(%rip), %rcx /* _Digits(%rip) */ MOVWLZX 0(CX)(AX*2), AX MOVL BX, CX MOVW AX, 0(R12)(CX*1) @@ -842,7 +842,7 @@ LBB2_80: MOVL BX, SI ADDQ -56(BP), SI MOVL R15, DX - LONG $0x0048f4e8; BYTE $0x00 // callq _print_mantissa + LONG $0x004cc4e8; BYTE $0x00 // callq _print_mantissa TESTL R12, R12 JE LBB2_81 LEAL 0(R12)(BX*1), AX @@ -923,7 +923,7 @@ LBB2_85: ADDQ BX, R12 MOVQ R12, SI MOVL R15, DX - LONG $0x004818e8; BYTE $0x00 // callq _print_mantissa + LONG $0x004be8e8; BYTE $0x00 // callq _print_mantissa ADDL BX, R15 MOVL R15, BX MOVL -44(BP), R9 @@ -1077,7 +1077,7 @@ _u64toa: ADDQ AX, AX CMPL SI, $1000 JB LBB4_3 - LONG $0x340d8d48; WORD $0x00a0; BYTE $0x00 // leaq $41012(%rip), %rcx /* _Digits(%rip) */ + LONG $0x1b0d8d48; WORD $0x00a4; BYTE $0x00 // leaq $42011(%rip), %rcx /* _Digits(%rip) */ MOVB 0(DX)(CX*1), CX MOVB CX, 0(DI) MOVL $1, CX @@ -1091,14 +1091,14 @@ LBB4_3: LBB4_4: MOVWLZX DX, DX ORQ $1, DX - LONG $0x13358d48; WORD $0x00a0; BYTE $0x00 // leaq $40979(%rip), %rsi /* _Digits(%rip) */ + LONG $0xfa358d48; WORD $0x00a3; BYTE $0x00 // leaq $41978(%rip), %rsi /* _Digits(%rip) */ MOVB 0(DX)(SI*1), DX MOVL CX, SI ADDL $1, CX MOVB DX, 0(DI)(SI*1) LBB4_6: - LONG $0x01158d48; WORD $0x00a0; BYTE $0x00 // leaq $40961(%rip), %rdx /* _Digits(%rip) */ + LONG $0xe8158d48; WORD $0x00a3; BYTE $0x00 // leaq $41960(%rip), %rdx /* _Digits(%rip) */ MOVB 0(AX)(DX*1), DX MOVL CX, SI ADDL $1, CX @@ -1107,7 +1107,7 @@ LBB4_6: LBB4_7: MOVWLZX AX, AX ORQ $1, AX - LONG $0xe8158d48; WORD $0x009f; BYTE $0x00 // leaq $40936(%rip), %rdx /* _Digits(%rip) */ + LONG $0xcf158d48; WORD $0x00a3; BYTE $0x00 // leaq $41935(%rip), %rdx /* _Digits(%rip) */ MOVB 0(AX)(DX*1), AX MOVL CX, DX ADDL $1, CX @@ -1154,7 +1154,7 @@ LBB4_8: ADDQ R11, R11 CMPL SI, $10000000 JB LBB4_11 - LONG $0x50058d48; WORD $0x009f; BYTE $0x00 // leaq $40784(%rip), %rax /* _Digits(%rip) */ + LONG $0x37058d48; WORD $0x00a3; BYTE $0x00 // leaq $41783(%rip), %rax /* _Digits(%rip) */ MOVB 0(R10)(AX*1), AX MOVB AX, 0(DI) MOVL $1, CX @@ -1168,14 +1168,14 @@ LBB4_11: LBB4_12: MOVL R10, AX ORQ $1, AX - LONG $0x2b358d48; WORD $0x009f; BYTE $0x00 // leaq $40747(%rip), %rsi /* _Digits(%rip) */ + LONG $0x12358d48; WORD $0x00a3; BYTE $0x00 // leaq $41746(%rip), %rsi /* _Digits(%rip) */ MOVB 0(AX)(SI*1), AX MOVL CX, SI ADDL $1, CX MOVB AX, 0(DI)(SI*1) LBB4_14: - LONG $0x19058d48; WORD $0x009f; BYTE $0x00 // leaq $40729(%rip), %rax /* _Digits(%rip) */ + LONG $0x00058d48; WORD $0x00a3; BYTE $0x00 // leaq $41728(%rip), %rax /* _Digits(%rip) */ MOVB 0(R9)(AX*1), AX MOVL CX, SI ADDL $1, CX @@ -1184,7 +1184,7 @@ LBB4_14: LBB4_15: MOVWLZX R9, AX ORQ $1, AX - LONG $0xfe358d48; WORD $0x009e; BYTE $0x00 // leaq $40702(%rip), %rsi /* _Digits(%rip) */ + LONG $0xe5358d48; WORD $0x00a2; BYTE $0x00 // leaq $41701(%rip), %rsi /* _Digits(%rip) */ MOVB 0(AX)(SI*1), AX MOVL CX, DX MOVB AX, 0(DI)(DX*1) @@ -1266,7 +1266,7 @@ LBB4_16: MOVL $16, CX SUBL AX, CX SHLQ $4, AX - LONG $0x76158d48; WORD $0x009e; BYTE $0x00 // leaq $40566(%rip), %rdx /* _VecShiftShuffles(%rip) */ + LONG $0x5d158d48; WORD $0x00a2; BYTE $0x00 // leaq $41565(%rip), %rdx /* _VecShiftShuffles(%rip) */ LONG $0x0071e2c4; WORD $0x1004 // vpshufb (%rax,%rdx), %xmm1, %xmm0 LONG $0x077ffac5 // vmovdqu %xmm0, (%rdi) MOVL CX, AX @@ -1292,7 +1292,7 @@ LBB4_20: CMPL DX, $99 JA LBB4_22 MOVL DX, AX - LONG $0x590d8d48; WORD $0x009d; BYTE $0x00 // leaq $40281(%rip), %rcx /* _Digits(%rip) */ + LONG $0x400d8d48; WORD $0x00a1; BYTE $0x00 // leaq $41280(%rip), %rcx /* _Digits(%rip) */ MOVWLZX 0(CX)(AX*2), AX MOVW AX, 0(DI) MOVL $2, CX @@ -1315,7 +1315,7 @@ LBB4_22: WORD $0xc96b; BYTE $0x64 // imull $100, %ecx, %ecx SUBL CX, AX MOVWLZX AX, AX - LONG $0x100d8d48; WORD $0x009d; BYTE $0x00 // leaq $40208(%rip), %rcx /* _Digits(%rip) */ + LONG $0xf70d8d48; WORD $0x00a0; BYTE $0x00 // leaq $41207(%rip), %rcx /* _Digits(%rip) */ MOVWLZX 0(CX)(AX*2), AX MOVW AX, 1(DI) MOVL $3, CX @@ -1325,7 +1325,7 @@ LBB4_24: WORD $0xc86b; BYTE $0x64 // imull $100, %eax, %ecx SUBL CX, DX MOVWLZX AX, AX - LONG $0xf20d8d48; WORD $0x009c; BYTE $0x00 // leaq $40178(%rip), %rcx /* _Digits(%rip) */ + LONG $0xd90d8d48; WORD $0x00a0; BYTE $0x00 // leaq $41177(%rip), %rcx /* _Digits(%rip) */ MOVWLZX 0(CX)(AX*2), AX MOVW AX, 0(DI) MOVWLZX DX, AX @@ -1388,12 +1388,313 @@ LBB4_25: RET LCPI5_0: + QUAD $0x0000000000000000; QUAD $0x0000000000000000 // .space 16, '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + QUAD $0x0000000000000000; QUAD $0x0000000000000000 // .space 16, '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + +_xprintf: + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + WORD $0x5741 // pushq %r15 + WORD $0x5641 // pushq %r14 + WORD $0x5541 // pushq %r13 + WORD $0x5441 // pushq %r12 + BYTE $0x53 // pushq %rbx + ANDQ $-32, SP + SUBQ $576, SP + MOVQ DI, R15 + MOVQ SI, 376(SP) + MOVQ DX, 384(SP) + MOVQ CX, 392(SP) + MOVQ R8, 400(SP) + MOVQ R9, 408(SP) + TESTB AX, AX + JE LBB5_52 + QUAD $0x0001a0248429f8c5; BYTE $0x00 // vmovaps %xmm0, $416(%rsp) + QUAD $0x0001b0248c29f8c5; BYTE $0x00 // vmovaps %xmm1, $432(%rsp) + QUAD $0x0001c0249429f8c5; BYTE $0x00 // vmovaps %xmm2, $448(%rsp) + QUAD $0x0001d0249c29f8c5; BYTE $0x00 // vmovaps %xmm3, $464(%rsp) + QUAD $0x0001e024a429f8c5; BYTE $0x00 // vmovaps %xmm4, $480(%rsp) + QUAD $0x0001f024ac29f8c5; BYTE $0x00 // vmovaps %xmm5, $496(%rsp) + QUAD $0x00020024b429f8c5; BYTE $0x00 // vmovaps %xmm6, $512(%rsp) + QUAD $0x00021024bc29f8c5; BYTE $0x00 // vmovaps %xmm7, $528(%rsp) + +LBB5_52: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + QUAD $0x000140248429fcc5; BYTE $0x00 // vmovaps %ymm0, $320(%rsp) + QUAD $0x000120248429fcc5; BYTE $0x00 // vmovaps %ymm0, $288(%rsp) + QUAD $0x000100248429fcc5; BYTE $0x00 // vmovaps %ymm0, $256(%rsp) + QUAD $0x0000e0248429fcc5; BYTE $0x00 // vmovaps %ymm0, $224(%rsp) + QUAD $0x0000c0248429fcc5; BYTE $0x00 // vmovaps %ymm0, $192(%rsp) + QUAD $0x0000a0248429fcc5; BYTE $0x00 // vmovaps %ymm0, $160(%rsp) + QUAD $0x000080248429fcc5; BYTE $0x00 // vmovaps %ymm0, $128(%rsp) + LONG $0x4429fcc5; WORD $0x6024 // vmovaps %ymm0, $96(%rsp) + MOVQ $206158430216, AX + MOVQ AX, 32(SP) + LEAQ 16(BP), AX + MOVQ AX, 40(SP) + LEAQ 368(SP), AX + MOVQ AX, 48(SP) + XORL AX, AX + LEAQ 96(SP), BX + LEAQ 64(SP), R14 + MOVQ $7378697629483820647, R13 + LONG $0xfb258d4c; WORD $0x009f; BYTE $0x00 // leaq $40955(%rip), %r12 /* _printhex.tab(%rip) */ + JMP LBB5_1 + +LBB5_17: + MOVB $37, 64(SP) + MOVL $1, SI + MOVQ R14, DI + +LBB5_25: + WORD $0xf8c5; BYTE $0x77 // vzeroupper + LONG $0x0045ece8; BYTE $0x00 // callq _write_syscall + XORL AX, AX + +LBB5_1: + MOVB 0(R15), CX + CMPB CX, $37 + JE LBB5_4 + TESTB CX, CX + JE LBB5_44 + ADDQ $1, R15 + MOVB CX, 96(SP)(AX*1) + ADDQ $1, AX + JMP LBB5_1 + +LBB5_4: + MOVB $0, 96(SP)(AX*1) + CMPB 96(SP), $0 + JE LBB5_5 + XORL AX, AX + +LBB5_7: + LEAQ 1(AX), SI + CMPB 97(SP)(AX*1), $0 + MOVQ SI, AX + JNE LBB5_7 + JMP LBB5_8 + +LBB5_5: + XORL SI, SI + +LBB5_8: + MOVQ BX, DI + WORD $0xf8c5; BYTE $0x77 // vzeroupper + LONG $0x0045a1e8; BYTE $0x00 // callq _write_syscall + MOVBLSX 1(R15), CX + ADDQ $2, R15 + CMPL CX, $114 + JG LBB5_13 + CMPL CX, $37 + JE LBB5_17 + MOVL $0, AX + CMPL CX, $100 + JNE LBB5_1 + MOVL 32(SP), CX + CMPQ CX, $40 + JA LBB5_26 + MOVQ CX, AX + ADDQ 48(SP), AX + ADDL $8, CX + MOVL CX, 32(SP) + JMP LBB5_27 + +LBB5_13: + CMPL CX, $115 + JE LBB5_18 + MOVL $0, AX + CMPL CX, $120 + JNE LBB5_1 + MOVL 32(SP), CX + CMPQ CX, $40 + JA LBB5_36 + MOVQ CX, AX + ADDQ 48(SP), AX + ADDL $8, CX + MOVL CX, 32(SP) + MOVQ 0(AX), CX + TESTQ CX, CX + JE LBB5_38 + +LBB5_39: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0x4429fcc5; WORD $0x4024 // vmovaps %ymm0, $64(%rsp) + LEAQ 95(SP), DI + MOVQ CX, DX + +LBB5_40: + MOVQ DI, AX + MOVL CX, SI + ANDL $15, SI + MOVBLZX 0(SI)(R12*1), BX + ADDQ $-1, DI + MOVB BX, -1(AX) + SHRQ $4, DX + CMPQ CX, $15 + MOVQ DX, CX + JA LBB5_40 + XORL SI, SI + +LBB5_42: + ADDQ $1, SI + CMPB 0(AX), $0 + LEAQ 1(AX), AX + JNE LBB5_42 + JMP LBB5_43 + +LBB5_18: + MOVL 32(SP), CX + CMPQ CX, $40 + JA LBB5_20 + MOVQ CX, AX + ADDQ 48(SP), AX + ADDL $8, CX + MOVL CX, 32(SP) + MOVQ 0(AX), DI + CMPB 0(DI), $0 + JE LBB5_22 + +LBB5_23: + XORL AX, AX + +LBB5_24: + LEAQ 1(AX), SI + CMPB 1(DI)(AX*1), $0 + MOVQ SI, AX + JNE LBB5_24 + JMP LBB5_25 + +LBB5_26: + MOVQ 40(SP), AX + LEAQ 8(AX), CX + MOVQ CX, 40(SP) + +LBB5_27: + MOVQ 0(AX), R8 + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0x4429fcc5; WORD $0x4024 // vmovaps %ymm0, $64(%rsp) + TESTQ R8, R8 + JE LBB5_28 + MOVQ R8, CX + NEGQ CX + LONG $0xc84c0f49 // cmovlq %r8, %rcx + LEAQ 94(SP), DI + +LBB5_30: + MOVQ CX, AX + IMULQ R13 + MOVQ DX, AX + SHRQ $63, AX + SARQ $2, DX + ADDQ AX, DX + LEAL 0(DX)(DX*1), AX + LEAL 0(AX)(AX*4), AX + MOVL CX, SI + SUBL AX, SI + ADDB $48, SI + MOVB SI, 0(DI) + ADDQ $9, CX + ADDQ $-1, DI + CMPQ CX, $18 + MOVQ DX, CX + JA LBB5_30 + TESTQ R8, R8 + JS LBB5_33 + ADDQ $1, DI + JMP LBB5_34 + +LBB5_36: + MOVQ 40(SP), AX + LEAQ 8(AX), CX + MOVQ CX, 40(SP) + MOVQ 0(AX), CX + TESTQ CX, CX + JNE LBB5_39 + +LBB5_38: + MOVB $48, 64(SP) + MOVL $1, SI + MOVQ R14, DI + +LBB5_43: + WORD $0xf8c5; BYTE $0x77 // vzeroupper + LONG $0x0043fee8; BYTE $0x00 // callq _write_syscall + XORL AX, AX + LEAQ 96(SP), BX + JMP LBB5_1 + +LBB5_20: + MOVQ 40(SP), AX + LEAQ 8(AX), CX + MOVQ CX, 40(SP) + MOVQ 0(AX), DI + CMPB 0(DI), $0 + JNE LBB5_23 + +LBB5_22: + XORL SI, SI + JMP LBB5_25 + +LBB5_28: + MOVB $48, 31(SP) + MOVL $1, SI + LEAQ 31(SP), DI + JMP LBB5_25 + +LBB5_33: + MOVB $45, 0(DI) + +LBB5_34: + XORL SI, SI + +LBB5_35: + CMPB 1(DI)(SI*1), $0 + LEAQ 1(SI), SI + JNE LBB5_35 + JMP LBB5_25 + +LBB5_44: + TESTQ AX, AX + JE LBB5_50 + MOVB $0, 96(SP)(AX*1) + CMPB 96(SP), $0 + JE LBB5_46 + XORL AX, AX + +LBB5_48: + LEAQ 1(AX), SI + CMPB 97(SP)(AX*1), $0 + MOVQ SI, AX + JNE LBB5_48 + JMP LBB5_49 + +LBB5_46: + XORL SI, SI + +LBB5_49: + LEAQ 96(SP), DI + WORD $0xf8c5; BYTE $0x77 // vzeroupper + LONG $0x004376e8; BYTE $0x00 // callq _write_syscall + +LBB5_50: + LEAQ -40(BP), SP + BYTE $0x5b // popq %rbx + WORD $0x5c41 // popq %r12 + WORD $0x5d41 // popq %r13 + WORD $0x5e41 // popq %r14 + WORD $0x5f41 // popq %r15 + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + RET + +LCPI6_0: QUAD $0x2020202020202020; QUAD $0x2020202020202020 // .space 16, ' ' -LCPI5_1: +LCPI6_1: QUAD $0x2222222222222222; QUAD $0x2222222222222222 // .space 16, '""""""""""""""""' -LCPI5_2: +LCPI6_2: QUAD $0x5c5c5c5c5c5c5c5c; QUAD $0x5c5c5c5c5c5c5c5c // .space 16, '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' _quote: @@ -1405,49 +1706,45 @@ _quote: WORD $0x5441 // pushq %r12 BYTE $0x53 // pushq %rbx SUBQ $32, SP - ANDL $1, R8 - TESTQ R8, R8 - LONG $0xff1d8d4c; WORD $0x009c; BYTE $0x00 // leaq $40191(%rip), %r11 /* __SingleQuoteTab(%rip) */ - LONG $0xf8158d4c; WORD $0x00ac; BYTE $0x00 // leaq $44280(%rip), %r10 /* __DoubleQuoteTab(%rip) */ - LONG $0xd3440f4d // cmoveq %r11, %r10 - MOVQ CX, R9 - MOVQ SI, R13 - ORQ $6, R8 - IMULQ SI, R8 - CMPQ R8, CX - JBE LBB5_56 + MOVQ CX, R10 + MOVQ 0(CX), R11 + TESTB $1, R8 + MOVQ SI, AX + LONG $0x18358d4c; WORD $0x009d; BYTE $0x00 // leaq $40216(%rip), %r14 /* __SingleQuoteTab(%rip) */ + LONG $0x110d8d4c; WORD $0x00ad; BYTE $0x00 // leaq $44305(%rip), %r9 /* __DoubleQuoteTab(%rip) */ + LONG $0xce440f4d // cmoveq %r14, %r9 + LEAQ 0(SI*8), CX + CMPQ R11, CX + JGE LBB6_56 MOVQ DX, R14 - MOVQ DI, AX - TESTQ R13, R13 - JE LBB5_71 - MOVQ R13, R8 - MOVQ 0(R9), R11 - QUAD $0xffffff72056ffac5 // vmovdqu $-142(%rip), %xmm0 /* LCPI5_0(%rip) */ - QUAD $0xffffff7a0d6ffac5 // vmovdqu $-134(%rip), %xmm1 /* LCPI5_1(%rip) */ - QUAD $0xffffff82156ffac5 // vmovdqu $-126(%rip), %xmm2 /* LCPI5_2(%rip) */ + MOVQ DI, R8 + TESTQ AX, AX + JE LBB6_71 + QUAD $0xffffff78056ffac5 // vmovdqu $-136(%rip), %xmm0 /* LCPI6_0(%rip) */ + QUAD $0xffffff800d6ffac5 // vmovdqu $-128(%rip), %xmm1 /* LCPI6_1(%rip) */ + QUAD $0xffffff88156ffac5 // vmovdqu $-120(%rip), %xmm2 /* LCPI6_2(%rip) */ LONG $0xdb76e1c5 // vpcmpeqd %xmm3, %xmm3, %xmm3 MOVQ DI, CX MOVQ DX, -48(BP) MOVQ DX, R14 -LBB5_3: - MOVQ CX, AX - CMPQ R8, $16 +LBB6_3: + MOVQ CX, R8 + CMPQ AX, $16 SETGE CX - MOVQ AX, -56(BP) MOVQ R11, R12 MOVQ R14, R15 - MOVQ R8, SI - MOVQ AX, R13 - JL LBB5_10 + MOVQ AX, SI + MOVQ R8, R13 + JL LBB6_10 CMPQ R11, $16 - JL LBB5_10 + JL LBB6_10 XORL R15, R15 - MOVQ R8, BX + MOVQ AX, BX MOVQ R11, DX -LBB5_6: - LONG $0x6f7aa1c4; WORD $0x3824 // vmovdqu (%rax,%r15), %xmm4 +LBB6_6: + LONG $0x6f7a81c4; WORD $0x3824 // vmovdqu (%r8,%r15), %xmm4 LONG $0xec64f9c5 // vpcmpgtb %xmm4, %xmm0, %xmm5 LONG $0xf174d9c5 // vpcmpeqb %xmm1, %xmm4, %xmm6 LONG $0xfa74d9c5 // vpcmpeqb %xmm2, %xmm4, %xmm7 @@ -1458,25 +1755,25 @@ LBB5_6: LONG $0xe4ebc9c5 // vpor %xmm4, %xmm6, %xmm4 LONG $0xccd7f9c5 // vpmovmskb %xmm4, %ecx TESTL CX, CX - JNE LBB5_23 + JNE LBB6_23 LEAQ -16(BX), SI LEAQ -16(DX), R12 ADDQ $16, R15 CMPQ BX, $32 SETGE CX - JL LBB5_9 + JL LBB6_9 MOVQ SI, BX CMPQ DX, $31 MOVQ R12, DX - JG LBB5_6 + JG LBB6_6 -LBB5_9: - LEAQ 0(AX)(R15*1), R13 +LBB6_9: + LEAQ 0(R8)(R15*1), R13 ADDQ R14, R15 -LBB5_10: +LBB6_10: TESTB CX, CX - JE LBB5_14 + JE LBB6_14 LONG $0x6f7ac1c4; WORD $0x0065 // vmovdqu (%r13), %xmm4 LONG $0xec64f9c5 // vpcmpgtb %xmm4, %xmm0, %xmm5 LONG $0xf174d9c5 // vpcmpeqb %xmm1, %xmm4, %xmm6 @@ -1490,263 +1787,265 @@ LBB5_10: BSFL CX, CX LONG $0x7ef9e1c4; BYTE $0xe2 // vmovq %xmm4, %rdx CMPQ R12, CX - JGE LBB5_24 + JGE LBB6_24 CMPQ R12, $8 - JB LBB5_28 + JB LBB6_28 MOVQ DX, 0(R15) LEAQ 8(R13), CX ADDQ $8, R15 LEAQ -8(R12), BX CMPQ BX, $4 - JAE LBB5_29 - JMP LBB5_30 + JAE LBB6_29 + JMP LBB6_30 -LBB5_14: +LBB6_14: TESTQ SI, SI - JLE LBB5_21 + JLE LBB6_21 TESTQ R12, R12 - JLE LBB5_21 - MOVQ R8, -72(BP) - MOVQ R9, -64(BP) + JLE LBB6_21 + MOVQ R8, -56(BP) + MOVQ AX, -72(BP) + MOVQ R10, -64(BP) XORL DX, DX XORL CX, CX -LBB5_17: - MOVBLZX 0(R13)(DX*1), R9 - MOVQ R9, BX +LBB6_17: + MOVBLZX 0(R13)(DX*1), R8 + MOVQ R8, BX SHLQ $4, BX - LONG $0x87058d48; WORD $0x009b; BYTE $0x00 // leaq $39815(%rip), %rax /* __SingleQuoteTab(%rip) */ + LONG $0xac058d48; WORD $0x009b; BYTE $0x00 // leaq $39852(%rip), %rax /* __SingleQuoteTab(%rip) */ CMPQ 0(BX)(AX*1), $0 - JNE LBB5_27 - LEAQ 0(SI)(CX*1), R8 - MOVB R9, 0(R15)(DX*1) + JNE LBB6_27 + LEAQ 0(SI)(CX*1), R10 + MOVB R8, 0(R15)(DX*1) LEAQ -1(CX), BX - CMPQ R8, $2 - JL LBB5_20 + CMPQ R10, $2 + JL LBB6_20 ADDQ R12, CX ADDQ $1, DX CMPQ CX, $1 MOVQ BX, CX - JG LBB5_17 + JG LBB6_17 -LBB5_20: +LBB6_20: SUBQ BX, R13 ADDQ BX, SI - MOVQ -64(BP), R9 - MOVQ -72(BP), R8 - MOVQ -56(BP), AX + MOVQ -64(BP), R10 + MOVQ -72(BP), AX + MOVQ -56(BP), R8 -LBB5_21: - MOVQ $12884901889, R15 +LBB6_21: + MOVQ $12884901889, R12 TESTQ SI, SI - JE LBB5_26 + JE LBB6_26 NOTQ R13 - ADDQ AX, R13 - JMP LBB5_41 + ADDQ R8, R13 + JMP LBB6_41 -LBB5_23: +LBB6_23: BSFW CX, CX MOVWLZX CX, R13 ADDQ R15, R13 - JMP LBB5_40 + JMP LBB6_40 -LBB5_24: +LBB6_24: CMPL CX, $8 - JB LBB5_34 + JB LBB6_34 MOVQ DX, 0(R15) LEAQ 8(R13), R12 ADDQ $8, R15 LEAQ -8(CX), BX CMPQ BX, $4 - JAE LBB5_35 - JMP LBB5_36 + JAE LBB6_35 + JMP LBB6_36 -LBB5_26: - SUBQ AX, R13 - JMP LBB5_41 +LBB6_26: + SUBQ R8, R13 + JMP LBB6_41 -LBB5_27: - MOVQ -56(BP), AX - SUBQ AX, R13 +LBB6_27: + MOVQ -56(BP), R8 + SUBQ R8, R13 SUBQ CX, R13 - MOVQ -64(BP), R9 - MOVQ -72(BP), R8 - JMP LBB5_40 + MOVQ -64(BP), R10 + MOVQ -72(BP), AX + JMP LBB6_40 -LBB5_28: +LBB6_28: MOVQ R13, CX MOVQ R12, BX CMPQ BX, $4 - JB LBB5_30 + JB LBB6_30 -LBB5_29: +LBB6_29: MOVL 0(CX), DX MOVL DX, 0(R15) ADDQ $4, CX ADDQ $4, R15 ADDQ $-4, BX -LBB5_30: +LBB6_30: CMPQ BX, $2 - JB LBB5_31 + JB LBB6_31 MOVWLZX 0(CX), DX MOVW DX, 0(R15) ADDQ $2, CX ADDQ $2, R15 ADDQ $-2, BX TESTQ BX, BX - JNE LBB5_32 - JMP LBB5_33 + JNE LBB6_32 + JMP LBB6_33 -LBB5_31: +LBB6_31: TESTQ BX, BX - JE LBB5_33 + JE LBB6_33 -LBB5_32: +LBB6_32: MOVB 0(CX), CX MOVB CX, 0(R15) -LBB5_33: +LBB6_33: ADDQ R13, R12 NOTQ R12 - ADDQ AX, R12 + ADDQ R8, R12 MOVQ R12, R13 - JMP LBB5_40 + JMP LBB6_40 -LBB5_34: +LBB6_34: MOVQ R13, R12 MOVQ CX, BX CMPQ BX, $4 - JB LBB5_36 + JB LBB6_36 -LBB5_35: +LBB6_35: MOVL 0(R12), DX MOVL DX, 0(R15) ADDQ $4, R12 ADDQ $4, R15 ADDQ $-4, BX -LBB5_36: +LBB6_36: CMPQ BX, $2 - JB LBB5_37 + JB LBB6_37 MOVWLZX 0(R12), DX MOVW DX, 0(R15) ADDQ $2, R12 ADDQ $2, R15 ADDQ $-2, BX TESTQ BX, BX - JNE LBB5_38 - JMP LBB5_39 + JNE LBB6_38 + JMP LBB6_39 -LBB5_37: +LBB6_37: TESTQ BX, BX - JE LBB5_39 + JE LBB6_39 -LBB5_38: +LBB6_38: MOVB 0(R12), DX MOVB DX, 0(R15) -LBB5_39: - SUBQ AX, R13 +LBB6_39: + SUBQ R8, R13 ADDQ CX, R13 -LBB5_40: - MOVQ $12884901889, R15 +LBB6_40: + MOVQ $12884901889, R12 -LBB5_41: +LBB6_41: TESTQ R13, R13 - JS LBB5_74 + JS LBB6_74 ADDQ R13, R14 - CMPQ R8, R13 - JE LBB5_70 + CMPQ AX, R13 + JE LBB6_70 SUBQ R13, R11 - JMP LBB5_45 + JMP LBB6_45 -LBB5_44: +LBB6_44: ADDQ SI, R14 ADDQ $1, R13 - CMPQ R8, R13 - MOVQ -56(BP), AX - JE LBB5_70 + CMPQ AX, R13 + MOVQ R15, R8 + JE LBB6_70 -LBB5_45: - MOVBLZX 0(AX)(R13*1), CX +LBB6_45: + MOVBLZX 0(R8)(R13*1), CX SHLQ $4, CX - MOVQ 0(R10)(CX*1), DX + MOVQ 0(R9)(CX*1), DX TESTL DX, DX - JE LBB5_53 + JE LBB6_53 MOVLQSX DX, SI SUBQ SI, R11 - JL LBB5_72 + JL LBB6_72 + MOVQ R8, R15 SHLQ $32, DX - LEAQ 0(R10)(CX*1), BX + LEAQ 0(R9)(CX*1), BX ADDQ $8, BX - CMPQ DX, R15 - JL LBB5_49 + CMPQ DX, R12 + JL LBB6_49 MOVL 0(BX), DX MOVL DX, 0(R14) - LEAQ 0(R10)(CX*1), BX + LEAQ 0(R9)(CX*1), BX ADDQ $12, BX - LEAQ 4(R14), CX + LEAQ 4(R14), R8 LEAQ -4(SI), DX CMPQ DX, $2 - JGE LBB5_50 - JMP LBB5_51 + JGE LBB6_50 + JMP LBB6_51 -LBB5_49: - MOVQ R14, CX +LBB6_49: + MOVQ R14, R8 MOVQ SI, DX CMPQ DX, $2 - JL LBB5_51 + JL LBB6_51 -LBB5_50: - MOVWLZX 0(BX), AX - MOVW AX, 0(CX) +LBB6_50: + MOVWLZX 0(BX), CX + MOVW CX, 0(R8) ADDQ $2, BX - ADDQ $2, CX + ADDQ $2, R8 ADDQ $-2, DX -LBB5_51: +LBB6_51: TESTQ DX, DX - JLE LBB5_44 - MOVBLZX 0(BX), AX - MOVB AX, 0(CX) - JMP LBB5_44 + JLE LBB6_44 + MOVBLZX 0(BX), CX + MOVB CX, 0(R8) + JMP LBB6_44 -LBB5_53: - LEAQ 0(AX)(R13*1), CX - SUBQ R13, R8 - JNE LBB5_3 +LBB6_53: + LEAQ 0(R8)(R13*1), CX + SUBQ R13, AX + JNE LBB6_3 -LBB5_70: - ADDQ R13, AX +LBB6_70: + ADDQ R13, R8 MOVQ -48(BP), DX -LBB5_71: +LBB6_71: SUBQ DX, R14 - MOVQ R14, 0(R9) - SUBQ DI, AX - MOVQ AX, R13 - JMP LBB5_73 + MOVQ R14, 0(R10) + SUBQ DI, R8 + MOVQ R8, AX + JMP LBB6_73 -LBB5_56: - QUAD $0xfffffc01056ffac5 // vmovdqu $-1023(%rip), %xmm0 /* LCPI5_0(%rip) */ - QUAD $0xfffffc090d6ffac5 // vmovdqu $-1015(%rip), %xmm1 /* LCPI5_1(%rip) */ - QUAD $0xfffffc11156ffac5 // vmovdqu $-1007(%rip), %xmm2 /* LCPI5_2(%rip) */ +LBB6_56: + QUAD $0xfffffc03056ffac5 // vmovdqu $-1021(%rip), %xmm0 /* LCPI6_0(%rip) */ + QUAD $0xfffffc0b0d6ffac5 // vmovdqu $-1013(%rip), %xmm1 /* LCPI6_1(%rip) */ + QUAD $0xfffffc13156ffac5 // vmovdqu $-1005(%rip), %xmm2 /* LCPI6_2(%rip) */ LONG $0xdb76e1c5 // vpcmpeqd %xmm3, %xmm3, %xmm3 MOVQ DX, SI - MOVQ R13, BX + MOVQ AX, R11 -LBB5_57: - CMPQ BX, $16 - JL LBB5_62 - MOVQ BX, AX - NEGQ AX - ADDQ $16, BX - MOVQ BX, CX - MOVQ AX, BX +LBB6_57: + CMPQ R11, $16 + JL LBB6_62 + MOVQ R11, R8 + NEGQ R8 + ADDQ $16, R11 + MOVQ R11, CX + MOVQ R8, R11 -LBB5_59: +LBB6_59: LONG $0x276ffac5 // vmovdqu (%rdi), %xmm4 LONG $0xec64f9c5 // vpcmpgtb %xmm4, %xmm0, %xmm5 LONG $0xf174d9c5 // vpcmpeqb %xmm1, %xmm4, %xmm6 @@ -1756,76 +2055,76 @@ LBB5_59: LONG $0xe364d9c5 // vpcmpgtb %xmm3, %xmm4, %xmm4 LONG $0xe4dbd1c5 // vpand %xmm4, %xmm5, %xmm4 LONG $0xe4ebc9c5 // vpor %xmm4, %xmm6, %xmm4 - LONG $0xc4d7f9c5 // vpmovmskb %xmm4, %eax - TESTL AX, AX - JNE LBB5_65 + LONG $0xdcd7f9c5 // vpmovmskb %xmm4, %ebx + TESTL BX, BX + JNE LBB6_65 ADDQ $16, DI ADDQ $16, SI - ADDQ $16, BX + ADDQ $16, R11 ADDQ $-16, CX CMPQ CX, $31 - JG LBB5_59 - NEGQ BX + JG LBB6_59 + NEGQ R11 -LBB5_62: - TESTQ BX, BX - JLE LBB5_69 +LBB6_62: + TESTQ R11, R11 + JLE LBB6_69 -LBB5_63: - MOVBLZX 0(DI), CX - MOVQ CX, AX - SHLQ $4, AX - CMPQ 0(AX)(R11*1), $0 - JNE LBB5_67 - LEAQ -1(BX), AX +LBB6_63: + MOVBLZX 0(DI), BX + MOVQ BX, CX + SHLQ $4, CX + CMPQ 0(CX)(R14*1), $0 + JNE LBB6_67 + LEAQ -1(R11), CX ADDQ $1, DI - MOVB CX, 0(SI) + MOVB BX, 0(SI) ADDQ $1, SI - CMPQ BX, $1 - MOVQ AX, BX - JG LBB5_63 - JMP LBB5_69 + CMPQ R11, $1 + MOVQ CX, R11 + JG LBB6_63 + JMP LBB6_69 -LBB5_65: - BSFW AX, CX +LBB6_65: + BSFW BX, CX MOVWLZX CX, CX - ADDQ CX, BX - NEGQ BX - TESTQ BX, BX - JLE LBB5_75 + ADDQ CX, R11 + NEGQ R11 + TESTQ R11, R11 + JLE LBB6_75 ADDQ CX, DI ADDQ CX, SI -LBB5_67: - MOVBLZX 0(DI), AX - SHLQ $4, AX - MOVQ 0(R10)(AX*1), CX +LBB6_67: + MOVBLZX 0(DI), BX + SHLQ $4, BX + MOVQ 0(R9)(BX*1), CX TESTL CX, CX - JE LBB5_57 - MOVQ 8(R10)(AX*1), AX - MOVQ AX, 0(SI) + JE LBB6_57 + MOVQ 8(R9)(BX*1), BX + MOVQ BX, 0(SI) ADDQ $1, DI - LEAQ -1(BX), AX + LEAQ -1(R11), BX MOVLQSX CX, CX ADDQ CX, SI - CMPQ BX, $1 - MOVQ AX, BX - JG LBB5_67 + CMPQ R11, $1 + MOVQ BX, R11 + JG LBB6_67 -LBB5_69: +LBB6_69: SUBQ DX, SI - MOVQ SI, 0(R9) - JMP LBB5_73 + MOVQ SI, 0(R10) + JMP LBB6_73 -LBB5_72: +LBB6_72: SUBQ -48(BP), R14 - MOVQ R14, 0(R9) - SUBQ AX, DI + MOVQ R14, 0(R10) + SUBQ R8, DI NOTQ R13 ADDQ DI, R13 - -LBB5_73: MOVQ R13, AX + +LBB6_73: ADDQ $32, SP BYTE $0x5b // popq %rbx WORD $0x5c41 // popq %r12 @@ -1835,23 +2134,22 @@ LBB5_73: BYTE $0x5d // popq %rbp RET -LBB5_74: - MOVQ AX, CX +LBB6_74: MOVQ -48(BP), AX ADDQ R13, AX NOTQ AX ADDQ R14, AX - MOVQ AX, 0(R9) - SUBQ CX, DI + MOVQ AX, 0(R10) + SUBQ R8, DI ADDQ R13, DI - MOVQ DI, R13 - JMP LBB5_73 + MOVQ DI, AX + JMP LBB6_73 -LBB5_75: +LBB6_75: ADDQ CX, SI - JMP LBB5_69 + JMP LBB6_69 -LCPI6_0: +LCPI7_0: QUAD $0x5c5c5c5c5c5c5c5c; QUAD $0x5c5c5c5c5c5c5c5c // .space 16, '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' _unquote: @@ -1864,109 +2162,109 @@ _unquote: BYTE $0x53 // pushq %rbx SUBQ $24, SP TESTQ SI, SI - JE LBB6_118 + JE LBB7_118 MOVL R8, R9 ANDL $1, R9 - QUAD $0xffffffc7056ffac5 // vmovdqu $-57(%rip), %xmm0 /* LCPI6_0(%rip) */ + QUAD $0xffffffc7056ffac5 // vmovdqu $-57(%rip), %xmm0 /* LCPI7_0(%rip) */ MOVQ DI, R11 MOVQ SI, R15 MOVQ DX, AX -LBB6_2: +LBB7_2: CMPB 0(R11), $92 - JNE LBB6_4 + JNE LBB7_4 XORL R14, R14 - JMP LBB6_15 + JMP LBB7_15 -LBB6_4: +LBB7_4: MOVQ R15, R12 MOVQ AX, R13 MOVQ R11, R14 CMPQ R15, $16 - JL LBB6_9 + JL LBB7_9 XORL R13, R13 MOVQ R15, R14 -LBB6_6: +LBB7_6: LONG $0x6f7a81c4; WORD $0x2b0c // vmovdqu (%r11,%r13), %xmm1 LONG $0x7f7aa1c4; WORD $0x280c // vmovdqu %xmm1, (%rax,%r13) LONG $0xc874f1c5 // vpcmpeqb %xmm0, %xmm1, %xmm1 LONG $0xd9d7f9c5 // vpmovmskb %xmm1, %ebx TESTL BX, BX - JNE LBB6_14 + JNE LBB7_14 LEAQ -16(R14), R12 ADDQ $16, R13 CMPQ R14, $31 MOVQ R12, R14 - JG LBB6_6 + JG LBB7_6 LEAQ 0(R11)(R13*1), R14 ADDQ AX, R13 -LBB6_9: +LBB7_9: TESTQ R12, R12 - JE LBB6_119 + JE LBB7_119 XORL BX, BX -LBB6_11: +LBB7_11: MOVBLZX 0(R14)(BX*1), R10 CMPB R10, $92 - JE LBB6_13 + JE LBB7_13 MOVB R10, 0(R13)(BX*1) ADDQ $1, BX CMPQ R12, BX - JNE LBB6_11 - JMP LBB6_119 + JNE LBB7_11 + JMP LBB7_119 -LBB6_13: +LBB7_13: SUBQ R11, R14 ADDQ BX, R14 CMPQ R14, $-1 - JNE LBB6_15 - JMP LBB6_119 + JNE LBB7_15 + JMP LBB7_119 -LBB6_14: +LBB7_14: BSFW BX, BX MOVWLZX BX, R14 ADDQ R13, R14 CMPQ R14, $-1 - JE LBB6_119 + JE LBB7_119 -LBB6_15: +LBB7_15: LEAQ 2(R14), BX SUBQ BX, R15 - JS LBB6_155 + JS LBB7_155 ADDQ R14, R11 ADDQ $2, R11 TESTQ R9, R9 - JNE LBB6_42 + JNE LBB7_42 -LBB6_17: +LBB7_17: ADDQ R14, AX MOVBLZX -1(R11), BX - LONG $0x00158d4c; WORD $0x00b7; BYTE $0x00 // leaq $46848(%rip), %r10 /* __UnquoteTab(%rip) */ + LONG $0x28158d4c; WORD $0x00b7; BYTE $0x00 // leaq $46888(%rip), %r10 /* __UnquoteTab(%rip) */ MOVB 0(BX)(R10*1), BX CMPB BX, $-1 - JE LBB6_20 + JE LBB7_20 TESTB BX, BX - JE LBB6_134 + JE LBB7_134 MOVB BX, 0(AX) ADDQ $1, AX - JMP LBB6_72 + JMP LBB7_72 -LBB6_20: +LBB7_20: CMPQ R15, $3 - JLE LBB6_155 + JLE LBB7_155 MOVL 0(R11), R14 MOVL R14, R12 NOTL R12 LEAL -808464432(R14), BX ANDL $-2139062144, R12 TESTL BX, R12 - JNE LBB6_121 + JNE LBB7_121 LEAL 421075225(R14), BX ORL R14, BX TESTL $-2139062144, BX - JNE LBB6_121 + JNE LBB7_121 MOVL R14, BX ANDL $2139062143, BX MOVL $-1061109568, R10 @@ -1974,13 +2272,13 @@ LBB6_20: LEAL 1179010630(BX), R13 ANDL R12, R10 TESTL R13, R10 - JNE LBB6_121 + JNE LBB7_121 MOVL $-522133280, R10 SUBL BX, R10 ADDL $960051513, BX ANDL R10, R12 TESTL BX, R12 - JNE LBB6_121 + JNE LBB7_121 BSWAPL R14 MOVL R14, BX SHRL $4, BX @@ -2000,28 +2298,28 @@ LBB6_20: LEAQ 4(R11), R12 LEAQ -4(R15), R10 CMPL R14, $128 - JB LBB6_50 + JB LBB7_50 TESTQ R9, R9 - JNE LBB6_52 + JNE LBB7_52 TESTB $2, R8 - JE LBB6_75 + JE LBB7_75 XORL R12, R12 -LBB6_29: +LBB7_29: CMPL R14, $2048 - JB LBB6_73 + JB LBB7_73 MOVL R14, BX ANDL $-2048, BX CMPL BX, $55296 - JNE LBB6_69 + JNE LBB7_69 CMPQ R10, $6 - JL LBB6_87 + JL LBB7_87 CMPL R14, $56319 - JA LBB6_87 + JA LBB7_87 CMPB 4(R11)(R12*1), $92 - JNE LBB6_87 + JNE LBB7_87 CMPB 5(R11)(R12*1), $117 - JNE LBB6_87 + JNE LBB7_87 MOVL 6(R11)(R12*1), R13 MOVL R13, BX NOTL BX @@ -2030,12 +2328,12 @@ LBB6_29: ANDL $-2139062144, BX MOVL BX, -60(BP) TESTL R13, BX - JNE LBB6_135 + JNE LBB7_135 MOVQ -56(BP), R13 LEAL 421075225(R13), BX ORL R13, BX TESTL $-2139062144, BX - JNE LBB6_135 + JNE LBB7_135 MOVQ -56(BP), R13 ANDL $2139062143, R13 MOVL $-1061109568, BX @@ -2046,7 +2344,7 @@ LBB6_29: MOVL -64(BP), BX ANDL -60(BP), BX TESTL BX, -44(BP) - JNE LBB6_135 + JNE LBB7_135 MOVL $-522133280, BX SUBL R13, BX MOVL BX, -44(BP) @@ -2054,7 +2352,7 @@ LBB6_29: MOVL -60(BP), BX ANDL -44(BP), BX TESTL R13, BX - JNE LBB6_135 + JNE LBB7_135 MOVQ -56(BP), R13 BSWAPL R13 MOVL R13, BX @@ -2076,7 +2374,7 @@ LBB6_29: ADDL -56(BP), R13 ANDL $16515072, BX CMPL BX, $14417920 - JE LBB6_90 + JE LBB7_90 MOVW $-16401, 0(AX) MOVB $-67, 2(AX) ADDQ $3, AX @@ -2084,67 +2382,67 @@ LBB6_29: ADDQ $-6, R10 MOVL R13, R14 CMPL R13, $127 - JA LBB6_29 + JA LBB7_29 ADDQ R11, R12 ADDQ $4, R12 - JMP LBB6_51 + JMP LBB7_51 -LBB6_42: +LBB7_42: TESTL R15, R15 - JE LBB6_155 + JE LBB7_155 CMPB -1(R11), $92 - JNE LBB6_146 + JNE LBB7_146 CMPB 0(R11), $92 - JNE LBB6_49 + JNE LBB7_49 CMPL R15, $1 - JLE LBB6_155 + JLE LBB7_155 MOVB 1(R11), BX CMPB BX, $34 - JE LBB6_48 + JE LBB7_48 CMPB BX, $92 - JNE LBB6_148 + JNE LBB7_148 -LBB6_48: +LBB7_48: ADDQ $1, R11 ADDQ $-1, R15 -LBB6_49: +LBB7_49: ADDQ $1, R11 ADDQ $-1, R15 - JMP LBB6_17 + JMP LBB7_17 -LBB6_50: +LBB7_50: MOVL R14, R13 -LBB6_51: +LBB7_51: MOVB R13, 0(AX) ADDQ $1, AX - JMP LBB6_71 + JMP LBB7_71 -LBB6_52: +LBB7_52: TESTB $2, R8 - JE LBB6_93 + JE LBB7_93 XORL R12, R12 -LBB6_54: +LBB7_54: CMPL R14, $2048 - JB LBB6_73 + JB LBB7_73 MOVL R14, BX ANDL $-2048, BX CMPL BX, $55296 - JNE LBB6_69 + JNE LBB7_69 TESTQ R10, R10 - JLE LBB6_152 + JLE LBB7_152 CMPB 4(R11)(R12*1), $92 - JNE LBB6_113 + JNE LBB7_113 CMPQ R10, $7 - JL LBB6_111 + JL LBB7_111 CMPL R14, $56319 - JA LBB6_111 + JA LBB7_111 CMPB 5(R11)(R12*1), $92 - JNE LBB6_111 + JNE LBB7_111 CMPB 6(R11)(R12*1), $117 - JNE LBB6_111 + JNE LBB7_111 MOVL 7(R11)(R12*1), R13 MOVL R13, BX NOTL BX @@ -2153,12 +2451,12 @@ LBB6_54: ANDL $-2139062144, BX MOVL BX, -60(BP) TESTL R13, BX - JNE LBB6_149 + JNE LBB7_149 MOVQ -56(BP), R13 LEAL 421075225(R13), BX ORL R13, BX TESTL $-2139062144, BX - JNE LBB6_149 + JNE LBB7_149 MOVQ -56(BP), R13 ANDL $2139062143, R13 MOVL $-1061109568, BX @@ -2169,7 +2467,7 @@ LBB6_54: MOVL -64(BP), BX ANDL -60(BP), BX TESTL BX, -44(BP) - JNE LBB6_149 + JNE LBB7_149 MOVL $-522133280, BX SUBL R13, BX MOVL BX, -44(BP) @@ -2177,7 +2475,7 @@ LBB6_54: MOVL -60(BP), BX ANDL -44(BP), BX TESTL R13, BX - JNE LBB6_149 + JNE LBB7_149 MOVQ -56(BP), R13 BSWAPL R13 MOVL R13, BX @@ -2198,7 +2496,7 @@ LBB6_54: ADDL -56(BP), BX ANDL $16515072, R13 CMPL R13, $14417920 - JE LBB6_114 + JE LBB7_114 MOVW $-16401, 0(AX) MOVB $-67, 2(AX) ADDQ $3, AX @@ -2207,17 +2505,17 @@ LBB6_54: MOVQ BX, R13 MOVL R13, R14 CMPL R13, $128 - JAE LBB6_54 + JAE LBB7_54 ADDQ R11, R12 ADDQ $4, R12 MOVQ BX, R13 - JMP LBB6_51 + JMP LBB7_51 -LBB6_69: +LBB7_69: ADDQ R11, R12 ADDQ $4, R12 -LBB6_70: +LBB7_70: MOVL R14, BX SHRL $12, BX ORB $-32, BX @@ -2232,20 +2530,20 @@ LBB6_70: MOVB R14, 2(AX) ADDQ $3, AX -LBB6_71: +LBB7_71: MOVQ R10, R15 MOVQ R12, R11 -LBB6_72: +LBB7_72: TESTQ R15, R15 - JNE LBB6_2 - JMP LBB6_154 + JNE LBB7_2 + JMP LBB7_154 -LBB6_73: +LBB7_73: ADDQ R11, R12 ADDQ $4, R12 -LBB6_74: +LBB7_74: MOVL R14, BX SHRL $6, BX ORB $-64, BX @@ -2254,22 +2552,22 @@ LBB6_74: ORB $-128, R14 MOVB R14, 1(AX) ADDQ $2, AX - JMP LBB6_71 + JMP LBB7_71 -LBB6_75: +LBB7_75: CMPL R14, $2048 - JB LBB6_74 + JB LBB7_74 ANDL $16252928, R13 CMPL R13, $14155776 - JNE LBB6_70 + JNE LBB7_70 CMPQ R15, $10 - JL LBB6_107 + JL LBB7_107 CMPL R14, $56319 - JA LBB6_107 + JA LBB7_107 CMPB 0(R12), $92 - JNE LBB6_107 + JNE LBB7_107 CMPB 5(R11), $117 - JNE LBB6_107 + JNE LBB7_107 MOVL 6(R11), BX MOVL BX, R10 NOTL R10 @@ -2277,12 +2575,12 @@ LBB6_75: ADDL $-808464432, BX ANDL $-2139062144, R10 TESTL BX, R10 - JNE LBB6_136 + JNE LBB7_136 MOVQ -56(BP), R13 LEAL 421075225(R13), BX ORL R13, BX TESTL $-2139062144, BX - JNE LBB6_136 + JNE LBB7_136 MOVQ -56(BP), R13 ANDL $2139062143, R13 MOVL $-1061109568, BX @@ -2293,13 +2591,13 @@ LBB6_75: MOVL -44(BP), BX ANDL R10, BX TESTL BX, -60(BP) - JNE LBB6_136 + JNE LBB7_136 MOVL $-522133280, BX SUBL R13, BX ADDL $960051513, R13 ANDL BX, R10 TESTL R13, R10 - JNE LBB6_136 + JNE LBB7_136 MOVQ -56(BP), R12 BSWAPL R12 MOVL R12, BX @@ -2316,39 +2614,39 @@ LBB6_75: MOVL R10, BX ANDL $16515072, BX CMPL BX, $14417920 - JNE LBB6_117 + JNE LBB7_117 MOVL R10, R12 SHRL $8, R12 ANDL $65280, R12 MOVBLZX R10, BX ORL R12, BX - JMP LBB6_91 + JMP LBB7_91 -LBB6_87: +LBB7_87: ADDQ R12, R11 ADDQ $4, R11 -LBB6_88: +LBB7_88: TESTB $2, R8 - JE LBB6_150 + JE LBB7_150 -LBB6_89: +LBB7_89: MOVW $-16401, 0(AX) MOVB $-67, 2(AX) ADDQ $3, AX MOVQ R10, R15 - JMP LBB6_72 + JMP LBB7_72 -LBB6_90: +LBB7_90: MOVQ R13, BX ADDQ R12, R11 ADDQ $10, R11 SUBQ R12, R15 -LBB6_91: +LBB7_91: ADDQ $-10, R15 -LBB6_92: +LBB7_92: SHLL $10, R14 MOVL BX, R10 ADDL R14, R10 @@ -2371,27 +2669,27 @@ LBB6_92: ORB $-128, R10 MOVB R10, 3(AX) ADDQ $4, AX - JMP LBB6_72 + JMP LBB7_72 -LBB6_93: +LBB7_93: CMPL R14, $2048 - JB LBB6_74 + JB LBB7_74 ANDL $16252928, R13 CMPL R13, $14155776 - JNE LBB6_70 + JNE LBB7_70 CMPQ R15, $5 - JL LBB6_152 + JL LBB7_152 CMPB 0(R12), $92 - JNE LBB6_116 + JNE LBB7_116 LEAQ 5(R11), R12 CMPQ R15, $11 - JL LBB6_112 + JL LBB7_112 CMPL R14, $56319 - JA LBB6_112 + JA LBB7_112 CMPB 0(R12), $92 - JNE LBB6_112 + JNE LBB7_112 CMPB 6(R11), $117 - JNE LBB6_112 + JNE LBB7_112 MOVL 7(R11), BX MOVL BX, R10 NOTL R10 @@ -2399,12 +2697,12 @@ LBB6_93: ADDL $-808464432, BX ANDL $-2139062144, R10 TESTL BX, R10 - JNE LBB6_136 + JNE LBB7_136 MOVQ -56(BP), R13 LEAL 421075225(R13), BX ORL R13, BX TESTL $-2139062144, BX - JNE LBB6_136 + JNE LBB7_136 MOVQ -56(BP), R13 ANDL $2139062143, R13 MOVL $-1061109568, BX @@ -2415,13 +2713,13 @@ LBB6_93: MOVL -44(BP), BX ANDL R10, BX TESTL BX, -60(BP) - JNE LBB6_136 + JNE LBB7_136 MOVL $-522133280, BX SUBL R13, BX ADDL $960051513, R13 ANDL BX, R10 TESTL R13, R10 - JNE LBB6_136 + JNE LBB7_136 MOVQ -56(BP), R12 BSWAPL R12 MOVL R12, BX @@ -2438,61 +2736,61 @@ LBB6_93: MOVL R10, BX ANDL $16515072, BX CMPL BX, $14417920 - JNE LBB6_117 + JNE LBB7_117 MOVL R10, R12 SHRL $8, R12 ANDL $65280, R12 MOVBLZX R10, BX ORL R12, BX - JMP LBB6_115 + JMP LBB7_115 -LBB6_107: +LBB7_107: MOVQ R12, R11 - JMP LBB6_88 + JMP LBB7_88 -LBB6_111: +LBB7_111: ADDQ R11, R12 ADDQ $5, R12 -LBB6_112: +LBB7_112: ADDQ $-1, R10 MOVQ R12, R11 - JMP LBB6_88 + JMP LBB7_88 -LBB6_113: +LBB7_113: ADDQ R12, R11 ADDQ $4, R11 TESTB $2, R8 - JNE LBB6_89 - JMP LBB6_117 + JNE LBB7_89 + JMP LBB7_117 -LBB6_114: +LBB7_114: ADDQ R12, R11 ADDQ $11, R11 SUBQ R12, R15 -LBB6_115: +LBB7_115: ADDQ $-11, R15 - JMP LBB6_92 + JMP LBB7_92 -LBB6_116: +LBB7_116: MOVQ R12, R11 TESTB $2, R8 - JNE LBB6_89 + JNE LBB7_89 -LBB6_117: +LBB7_117: SUBQ DI, R11 - JMP LBB6_151 + JMP LBB7_151 -LBB6_118: +LBB7_118: XORL R15, R15 MOVQ DX, AX -LBB6_119: +LBB7_119: ADDQ R15, AX SUBQ DX, AX -LBB6_120: +LBB7_120: ADDQ $24, SP BYTE $0x5b // popq %rbx WORD $0x5c41 // popq %r12 @@ -2502,91 +2800,91 @@ LBB6_120: BYTE $0x5d // popq %rbp RET -LBB6_121: +LBB7_121: MOVQ R11, DX SUBQ DI, DX MOVQ DX, 0(CX) MOVB 0(R11), SI LEAL -48(SI), AX CMPB AX, $10 - JB LBB6_124 + JB LBB7_124 MOVQ $-2, AX ADDB $-65, SI CMPB SI, $37 - JA LBB6_120 + JA LBB7_120 MOVBLZX SI, SI MOVQ $270582939711, DI BTQ SI, DI - JAE LBB6_120 + JAE LBB7_120 -LBB6_124: +LBB7_124: LEAQ 1(DX), AX MOVQ AX, 0(CX) MOVB 1(R11), SI LEAL -48(SI), AX CMPB AX, $9 - JBE LBB6_127 + JBE LBB7_127 MOVQ $-2, AX ADDB $-65, SI CMPB SI, $37 - JA LBB6_120 + JA LBB7_120 MOVBLZX SI, SI MOVQ $270582939711, DI BTQ SI, DI - JAE LBB6_120 + JAE LBB7_120 -LBB6_127: +LBB7_127: LEAQ 2(DX), AX MOVQ AX, 0(CX) MOVB 2(R11), SI LEAL -48(SI), AX CMPB AX, $10 - JB LBB6_130 + JB LBB7_130 MOVQ $-2, AX ADDB $-65, SI CMPB SI, $37 - JA LBB6_120 + JA LBB7_120 MOVBLZX SI, SI MOVQ $270582939711, DI BTQ SI, DI - JAE LBB6_120 + JAE LBB7_120 -LBB6_130: +LBB7_130: LEAQ 3(DX), AX MOVQ AX, 0(CX) MOVB 3(R11), SI LEAL -48(SI), AX CMPB AX, $10 - JB LBB6_133 + JB LBB7_133 -LBB6_131: +LBB7_131: MOVQ $-2, AX ADDB $-65, SI CMPB SI, $37 - JA LBB6_120 + JA LBB7_120 MOVBLZX SI, SI MOVQ $270582939711, DI BTQ SI, DI - JAE LBB6_120 + JAE LBB7_120 -LBB6_133: +LBB7_133: ADDQ $4, DX MOVQ DX, 0(CX) MOVQ $-2, AX - JMP LBB6_120 + JMP LBB7_120 -LBB6_134: +LBB7_134: NOTQ DI ADDQ DI, R11 MOVQ R11, 0(CX) MOVQ $-3, AX - JMP LBB6_120 + JMP LBB7_120 -LBB6_135: +LBB7_135: ADDQ R11, R12 ADDQ $4, R12 -LBB6_136: +LBB7_136: MOVQ R12, DX SUBQ DI, DX ADDQ $2, DX @@ -2594,112 +2892,112 @@ LBB6_136: MOVB 2(R12), SI LEAL -48(SI), AX CMPB AX, $10 - JB LBB6_139 + JB LBB7_139 MOVQ $-2, AX ADDB $-65, SI CMPB SI, $37 - JA LBB6_120 + JA LBB7_120 MOVBLZX SI, SI MOVQ $270582939711, DI BTQ SI, DI - JAE LBB6_120 + JAE LBB7_120 -LBB6_139: +LBB7_139: LEAQ 1(DX), AX MOVQ AX, 0(CX) MOVB 3(R12), SI LEAL -48(SI), AX CMPB AX, $9 - JBE LBB6_142 + JBE LBB7_142 MOVQ $-2, AX ADDB $-65, SI CMPB SI, $37 - JA LBB6_120 + JA LBB7_120 MOVBLZX SI, SI MOVQ $270582939711, DI BTQ SI, DI - JAE LBB6_120 + JAE LBB7_120 -LBB6_142: +LBB7_142: LEAQ 2(DX), AX MOVQ AX, 0(CX) MOVB 4(R12), SI LEAL -48(SI), AX CMPB AX, $10 - JB LBB6_145 + JB LBB7_145 MOVQ $-2, AX ADDB $-65, SI CMPB SI, $37 - JA LBB6_120 + JA LBB7_120 MOVBLZX SI, SI MOVQ $270582939711, DI BTQ SI, DI - JAE LBB6_120 + JAE LBB7_120 -LBB6_145: +LBB7_145: LEAQ 3(DX), AX MOVQ AX, 0(CX) MOVB 5(R12), SI LEAL -48(SI), AX CMPB AX, $10 - JAE LBB6_131 - JMP LBB6_133 + JAE LBB7_131 + JMP LBB7_133 -LBB6_146: +LBB7_146: NOTQ DI ADDQ DI, R11 -LBB6_147: +LBB7_147: MOVQ R11, 0(CX) MOVQ $-2, AX - JMP LBB6_120 + JMP LBB7_120 -LBB6_148: +LBB7_148: SUBQ DI, R11 ADDQ $1, R11 - JMP LBB6_147 + JMP LBB7_147 -LBB6_149: +LBB7_149: ADDQ R11, R12 ADDQ $5, R12 - JMP LBB6_136 + JMP LBB7_136 -LBB6_150: +LBB7_150: ADDQ DI, R9 SUBQ R9, R11 -LBB6_151: +LBB7_151: ADDQ $-4, R11 MOVQ R11, 0(CX) MOVQ $-4, AX - JMP LBB6_120 + JMP LBB7_120 -LBB6_152: +LBB7_152: TESTB $2, R8 - JE LBB6_155 + JE LBB7_155 MOVW $-16401, 0(AX) MOVB $-67, 2(AX) ADDQ $3, AX -LBB6_154: +LBB7_154: XORL R15, R15 - JMP LBB6_119 + JMP LBB7_119 -LBB6_155: +LBB7_155: MOVQ SI, 0(CX) MOVQ $-1, AX - JMP LBB6_120 + JMP LBB7_120 -LCPI7_0: +LCPI8_0: QUAD $0x2626262626262626; QUAD $0x2626262626262626 // .space 16, '&&&&&&&&&&&&&&&&' -LCPI7_1: +LCPI8_1: QUAD $0xe2e2e2e2e2e2e2e2; QUAD $0xe2e2e2e2e2e2e2e2 // .space 16, '\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2' -LCPI7_2: +LCPI8_2: QUAD $0xfdfdfdfdfdfdfdfd; QUAD $0xfdfdfdfdfdfdfdfd // .space 16, '\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd' -LCPI7_3: +LCPI8_3: QUAD $0x3c3c3c3c3c3c3c3c; QUAD $0x3c3c3c3c3c3c3c3c // .space 16, '<<<<<<<<<<<<<<<<' _html_escape: @@ -2716,34 +3014,34 @@ _html_escape: MOVQ DX, -48(BP) MOVQ DI, AX TESTQ SI, SI - JLE LBB7_70 + JLE LBB8_70 MOVQ -56(BP), AX MOVQ 0(AX), R9 - QUAD $0xffffff89056ffac5 // vmovdqu $-119(%rip), %xmm0 /* LCPI7_0(%rip) */ - QUAD $0xffffff910d6ffac5 // vmovdqu $-111(%rip), %xmm1 /* LCPI7_1(%rip) */ - QUAD $0xffffff99156ffac5 // vmovdqu $-103(%rip), %xmm2 /* LCPI7_2(%rip) */ - QUAD $0xffffffa11d6ffac5 // vmovdqu $-95(%rip), %xmm3 /* LCPI7_3(%rip) */ - LONG $0xd91d8d4c; WORD $0x00ac; BYTE $0x00 // leaq $44249(%rip), %r11 /* __HtmlQuoteTab(%rip) */ + QUAD $0xffffff89056ffac5 // vmovdqu $-119(%rip), %xmm0 /* LCPI8_0(%rip) */ + QUAD $0xffffff910d6ffac5 // vmovdqu $-111(%rip), %xmm1 /* LCPI8_1(%rip) */ + QUAD $0xffffff99156ffac5 // vmovdqu $-103(%rip), %xmm2 /* LCPI8_2(%rip) */ + QUAD $0xffffffa11d6ffac5 // vmovdqu $-95(%rip), %xmm3 /* LCPI8_3(%rip) */ + LONG $0x011d8d4c; WORD $0x00ad; BYTE $0x00 // leaq $44289(%rip), %r11 /* __HtmlQuoteTab(%rip) */ MOVQ DI, R12 MOVQ -48(BP), R14 -LBB7_2: +LBB8_2: TESTQ R9, R9 - JLE LBB7_3 + JLE LBB8_3 CMPQ SI, $16 SETGE AX MOVQ R9, R13 MOVQ R14, R8 MOVQ SI, BX MOVQ R12, R15 - JL LBB7_12 + JL LBB8_12 CMPQ R9, $16 - JL LBB7_12 + JL LBB8_12 XORL R8, R8 MOVQ SI, DX MOVQ R9, CX -LBB7_7: +LBB8_7: LONG $0x6f7a81c4; WORD $0x0424 // vmovdqu (%r12,%r8), %xmm4 LONG $0xe874d9c5 // vpcmpeqb %xmm0, %xmm4, %xmm5 LONG $0xf174d9c5 // vpcmpeqb %xmm1, %xmm4, %xmm6 @@ -2754,25 +3052,25 @@ LBB7_7: LONG $0x7f7a81c4; WORD $0x0624 // vmovdqu %xmm4, (%r14,%r8) LONG $0xc5d7f9c5 // vpmovmskb %xmm5, %eax TESTL AX, AX - JNE LBB7_8 + JNE LBB8_8 LEAQ -16(DX), BX LEAQ -16(CX), R13 ADDQ $16, R8 CMPQ DX, $32 SETGE AX - JL LBB7_11 + JL LBB8_11 MOVQ BX, DX CMPQ CX, $31 MOVQ R13, CX - JG LBB7_7 + JG LBB8_7 -LBB7_11: +LBB8_11: LEAQ 0(R12)(R8*1), R15 ADDQ R14, R8 -LBB7_12: +LBB8_12: TESTB AX, AX - JE LBB7_13 + JE LBB8_13 LONG $0x6f7ac1c4; BYTE $0x27 // vmovdqu (%r15), %xmm4 LONG $0xe874d9c5 // vpcmpeqb %xmm0, %xmm4, %xmm5 LONG $0xf174d9c5 // vpcmpeqb %xmm1, %xmm4, %xmm6 @@ -2785,199 +3083,199 @@ LBB7_12: BSFL AX, R10 LONG $0x7ef9e1c4; BYTE $0xe0 // vmovq %xmm4, %rax CMPQ R13, R10 - JGE LBB7_24 + JGE LBB8_24 CMPQ R13, $8 - JB LBB7_35 + JB LBB8_35 MOVQ AX, 0(R8) LEAQ 8(R15), R10 ADDQ $8, R8 LEAQ -8(R13), BX CMPQ BX, $4 - JAE LBB7_38 - JMP LBB7_39 + JAE LBB8_38 + JMP LBB8_39 -LBB7_13: +LBB8_13: TESTQ BX, BX - JLE LBB7_21 + JLE LBB8_21 TESTQ R13, R13 - JLE LBB7_21 + JLE LBB8_21 XORL DX, DX XORL AX, AX -LBB7_16: +LBB8_16: MOVBLZX 0(R15)(DX*1), R11 CMPQ R11, $62 - JA LBB7_17 + JA LBB8_17 MOVQ $5764607797912141824, CX BTQ R11, CX - JB LBB7_45 + JB LBB8_45 -LBB7_17: +LBB8_17: CMPB R11, $-30 - JE LBB7_45 + JE LBB8_45 LEAQ 0(BX)(AX*1), R10 MOVB R11, 0(R8)(DX*1) LEAQ -1(AX), CX CMPQ R10, $2 - JL LBB7_20 + JL LBB8_20 ADDQ R13, AX ADDQ $1, DX CMPQ AX, $1 MOVQ CX, AX - JG LBB7_16 + JG LBB8_16 -LBB7_20: +LBB8_20: SUBQ CX, R15 ADDQ CX, BX - LONG $0x831d8d4c; WORD $0x00ab; BYTE $0x00 // leaq $43907(%rip), %r11 /* __HtmlQuoteTab(%rip) */ + LONG $0xab1d8d4c; WORD $0x00ab; BYTE $0x00 // leaq $43947(%rip), %r11 /* __HtmlQuoteTab(%rip) */ -LBB7_21: +LBB8_21: TESTQ BX, BX - JE LBB7_22 + JE LBB8_22 NOTQ R15 ADDQ R12, R15 TESTQ R15, R15 - JNS LBB7_49 - JMP LBB7_48 + JNS LBB8_49 + JMP LBB8_48 -LBB7_8: +LBB8_8: BSFW AX, AX MOVWLZX AX, R15 ADDQ R8, R15 TESTQ R15, R15 - JNS LBB7_49 - JMP LBB7_48 + JNS LBB8_49 + JMP LBB8_48 -LBB7_24: +LBB8_24: CMPL R10, $8 - JB LBB7_25 + JB LBB8_25 MOVQ AX, 0(R8) LEAQ 8(R15), R13 ADDQ $8, R8 LEAQ -8(R10), BX CMPQ BX, $4 - JAE LBB7_28 - JMP LBB7_29 + JAE LBB8_28 + JMP LBB8_29 -LBB7_45: +LBB8_45: SUBQ R12, R15 SUBQ AX, R15 - LONG $0x201d8d4c; WORD $0x00ab; BYTE $0x00 // leaq $43808(%rip), %r11 /* __HtmlQuoteTab(%rip) */ + LONG $0x481d8d4c; WORD $0x00ab; BYTE $0x00 // leaq $43848(%rip), %r11 /* __HtmlQuoteTab(%rip) */ TESTQ R15, R15 - JNS LBB7_49 - JMP LBB7_48 + JNS LBB8_49 + JMP LBB8_48 -LBB7_35: +LBB8_35: MOVQ R15, R10 MOVQ R13, BX CMPQ BX, $4 - JB LBB7_39 + JB LBB8_39 -LBB7_38: +LBB8_38: MOVL 0(R10), AX MOVL AX, 0(R8) ADDQ $4, R10 ADDQ $4, R8 ADDQ $-4, BX -LBB7_39: +LBB8_39: CMPQ BX, $2 - JAE LBB7_40 + JAE LBB8_40 TESTQ BX, BX - JE LBB7_43 + JE LBB8_43 -LBB7_42: +LBB8_42: MOVB 0(R10), AX MOVB AX, 0(R8) -LBB7_43: +LBB8_43: ADDQ R15, R13 NOTQ R13 ADDQ R12, R13 MOVQ R13, R15 TESTQ R15, R15 - JNS LBB7_49 - JMP LBB7_48 + JNS LBB8_49 + JMP LBB8_48 -LBB7_25: +LBB8_25: MOVQ R15, R13 MOVQ R10, BX CMPQ BX, $4 - JB LBB7_29 + JB LBB8_29 -LBB7_28: +LBB8_28: MOVL 0(R13), AX MOVL AX, 0(R8) ADDQ $4, R13 ADDQ $4, R8 ADDQ $-4, BX -LBB7_29: +LBB8_29: CMPQ BX, $2 - JAE LBB7_30 + JAE LBB8_30 TESTQ BX, BX - JE LBB7_33 + JE LBB8_33 -LBB7_32: +LBB8_32: MOVB 0(R13), AX MOVB AX, 0(R8) -LBB7_33: +LBB8_33: SUBQ R12, R15 ADDQ R10, R15 TESTQ R15, R15 - JNS LBB7_49 - JMP LBB7_48 + JNS LBB8_49 + JMP LBB8_48 -LBB7_40: +LBB8_40: MOVWLZX 0(R10), AX MOVW AX, 0(R8) ADDQ $2, R10 ADDQ $2, R8 ADDQ $-2, BX TESTQ BX, BX - JNE LBB7_42 - JMP LBB7_43 + JNE LBB8_42 + JMP LBB8_43 -LBB7_30: +LBB8_30: MOVWLZX 0(R13), AX MOVW AX, 0(R8) ADDQ $2, R13 ADDQ $2, R8 ADDQ $-2, BX TESTQ BX, BX - JNE LBB7_32 - JMP LBB7_33 + JNE LBB8_32 + JMP LBB8_33 -LBB7_22: +LBB8_22: SUBQ R12, R15 TESTQ R15, R15 - JS LBB7_48 + JS LBB8_48 -LBB7_49: +LBB8_49: ADDQ R15, R12 ADDQ R15, R14 SUBQ R15, SI - JLE LBB7_50 + JLE LBB8_50 SUBQ R15, R9 MOVB 0(R12), CX CMPB CX, $-30 - JE LBB7_53 + JE LBB8_53 MOVQ R12, AX -LBB7_57: +LBB8_57: MOVBLZX CX, CX SHLQ $4, CX MOVQ 0(CX)(R11*1), DX MOVLQSX DX, R15 SUBQ R15, R9 - JL LBB7_58 + JL LBB8_58 SHLQ $32, DX LEAQ 0(CX)(R11*1), R10 ADDQ $8, R10 MOVQ $12884901889, BX CMPQ DX, BX - JL LBB7_62 + JL LBB8_62 MOVL 0(R10), DX MOVL DX, 0(R14) LEAQ 0(CX)(R11*1), R10 @@ -2985,64 +3283,64 @@ LBB7_57: LEAQ 4(R14), R8 LEAQ -4(R15), CX CMPQ CX, $2 - JGE LBB7_65 - JMP LBB7_66 + JGE LBB8_65 + JMP LBB8_66 -LBB7_62: +LBB8_62: MOVQ R14, R8 MOVQ R15, CX CMPQ CX, $2 - JL LBB7_66 + JL LBB8_66 -LBB7_65: +LBB8_65: MOVWLZX 0(R10), DX MOVW DX, 0(R8) ADDQ $2, R10 ADDQ $2, R8 ADDQ $-2, CX -LBB7_66: +LBB8_66: TESTQ CX, CX - JLE LBB7_68 + JLE LBB8_68 MOVB 0(R10), CX MOVB CX, 0(R8) -LBB7_68: +LBB8_68: ADDQ R15, R14 -LBB7_69: +LBB8_69: ADDQ $1, AX LEAQ -1(SI), CX MOVQ AX, R12 CMPQ SI, $1 MOVQ CX, SI - JG LBB7_2 - JMP LBB7_70 + JG LBB8_2 + JMP LBB8_70 -LBB7_53: +LBB8_53: CMPQ SI, $3 - JL LBB7_59 + JL LBB8_59 CMPB 1(R12), $-128 - JNE LBB7_59 + JNE LBB8_59 MOVB 2(R12), CX MOVL CX, AX ANDB $-2, AX CMPB AX, $-88 - JNE LBB7_59 + JNE LBB8_59 LEAQ 2(R12), AX ADDQ $-2, SI - JMP LBB7_57 + JMP LBB8_57 -LBB7_59: +LBB8_59: TESTQ R9, R9 - JLE LBB7_3 + JLE LBB8_3 MOVB $-30, 0(R14) ADDQ $1, R14 ADDQ $-1, R9 MOVQ R12, AX - JMP LBB7_69 + JMP LBB8_69 -LBB7_48: +LBB8_48: MOVQ -48(BP), CX ADDQ R15, CX NOTQ CX @@ -3052,29 +3350,29 @@ LBB7_48: SUBQ R12, DI ADDQ R15, DI MOVQ DI, AX - JMP LBB7_71 + JMP LBB8_71 -LBB7_50: +LBB8_50: MOVQ R12, AX -LBB7_70: +LBB8_70: SUBQ -48(BP), R14 MOVQ -56(BP), CX MOVQ R14, 0(CX) SUBQ DI, AX - JMP LBB7_71 + JMP LBB8_71 -LBB7_58: +LBB8_58: SUBQ -48(BP), R14 MOVQ -56(BP), AX MOVQ R14, 0(AX) -LBB7_3: +LBB8_3: NOTQ R12 ADDQ DI, R12 MOVQ R12, AX -LBB7_71: +LBB8_71: ADDQ $16, SP BYTE $0x5b // popq %rbx WORD $0x5c41 // popq %r12 @@ -3092,35 +3390,35 @@ _atof_eisel_lemire64: BYTE $0x53 // pushq %rbx LEAL 348(SI), AX CMPL AX, $695 - JA LBB8_1 + JA LBB9_1 MOVQ CX, R8 MOVL DX, R9 TESTQ DI, DI - JE LBB8_4 + JE LBB9_4 BSRQ DI, R10 XORQ $63, R10 - JMP LBB8_5 + JMP LBB9_5 -LBB8_4: +LBB9_4: MOVL $64, R10 -LBB8_5: +LBB9_5: MOVL R10, CX SHLQ CX, DI MOVL AX, CX SHLQ $4, CX - LONG $0xd73d8d4c; WORD $0x0030; BYTE $0x00 // leaq $12503(%rip), %r15 /* _POW10_M128_TAB(%rip) */ + LONG $0xee3d8d4c; WORD $0x0030; BYTE $0x00 // leaq $12526(%rip), %r15 /* _POW10_M128_TAB(%rip) */ MOVQ DI, AX MULQ 8(CX)(R15*1) MOVQ AX, R11 MOVQ DX, R14 ANDL $511, DX CMPQ DX, $511 - JNE LBB8_11 + JNE LBB9_11 MOVQ DI, BX NOTQ BX CMPQ R11, BX - JBE LBB8_11 + JBE LBB9_11 MOVQ DI, AX MULQ 0(CX)(R15*1) ADDQ DX, R11 @@ -3128,27 +3426,27 @@ LBB8_5: MOVL R14, DX ANDL $511, DX CMPQ DX, $511 - JNE LBB8_11 + JNE LBB9_11 CMPQ R11, $-1 - JNE LBB8_11 + JNE LBB9_11 CMPQ AX, BX - JA LBB8_1 + JA LBB9_1 -LBB8_11: +LBB9_11: MOVQ R14, AX SHRQ $63, AX LEAL 9(AX), CX SHRQ CX, R14 TESTQ R11, R11 - JNE LBB8_15 + JNE LBB9_15 TESTQ DX, DX - JNE LBB8_15 + JNE LBB9_15 MOVL R14, CX ANDL $3, CX CMPL CX, $1 - JE LBB8_1 + JE LBB9_1 -LBB8_15: +LBB9_15: LONG $0x526ace69; WORD $0x0003 // imull $217706, %esi, %ecx SARL $16, CX ADDL $1087, CX @@ -3164,19 +3462,19 @@ LBB8_15: SBBQ $0, AX LEAQ -1(AX), SI CMPQ SI, $2045 - JBE LBB8_17 + JBE LBB9_17 -LBB8_1: +LBB9_1: XORL AX, AX -LBB8_18: +LBB9_18: BYTE $0x5b // popq %rbx WORD $0x5e41 // popq %r14 WORD $0x5f41 // popq %r15 BYTE $0x5d // popq %rbp RET -LBB8_17: +LBB9_17: CMPQ CX, $1 MOVB $2, CX SBBB $0, CX @@ -3191,7 +3489,7 @@ LBB8_17: LONG $0xc1450f48 // cmovneq %rcx, %rax MOVQ AX, 0(R8) MOVB $1, AX - JMP LBB8_18 + JMP LBB9_18 _decimal_to_f64: BYTE $0x55 // pushq %rbp @@ -3206,150 +3504,150 @@ _decimal_to_f64: MOVQ DI, R15 MOVQ $4503599627370496, R13 CMPL 16(DI), $0 - JE LBB9_4 + JE LBB10_4 MOVQ $9218868437227405312, R14 MOVL 20(R15), AX XORL R12, R12 CMPL AX, $310 - JG LBB9_64 + JG LBB10_64 CMPL AX, $-330 - JGE LBB9_5 + JGE LBB10_5 XORL R14, R14 - JMP LBB9_64 + JMP LBB10_64 -LBB9_4: +LBB10_4: XORL R14, R14 XORL R12, R12 - JMP LBB9_64 + JMP LBB10_64 -LBB9_5: +LBB10_5: TESTL AX, AX MOVQ BX, -48(BP) - JLE LBB9_12 + JLE LBB10_12 XORL R12, R12 - LONG $0x04358d4c; WORD $0x005b; BYTE $0x00 // leaq $23300(%rip), %r14 /* _POW_TAB(%rip) */ - JMP LBB9_8 + LONG $0x1b358d4c; WORD $0x005b; BYTE $0x00 // leaq $23323(%rip), %r14 /* _POW_TAB(%rip) */ + JMP LBB10_8 -LBB9_10: +LBB10_10: MOVL AX, AX MOVL 0(R14)(AX*4), BX CMPL 16(R15), $0 - JE LBB9_7 + JE LBB10_7 -LBB9_11: +LBB10_11: MOVQ R15, DI MOVL BX, SI - LONG $0x002df1e8; BYTE $0x00 // callq _right_shift + LONG $0x002e08e8; BYTE $0x00 // callq _right_shift -LBB9_7: +LBB10_7: ADDL BX, R12 MOVL 20(R15), AX TESTL AX, AX - JLE LBB9_12 + JLE LBB10_12 -LBB9_8: +LBB10_8: CMPL AX, $8 - JLE LBB9_10 + JLE LBB10_10 MOVL $27, BX CMPL 16(R15), $0 - JNE LBB9_11 - JMP LBB9_7 + JNE LBB10_11 + JMP LBB10_7 -LBB9_12: - LONG $0xc6358d4c; WORD $0x005a; BYTE $0x00 // leaq $23238(%rip), %r14 /* _POW_TAB(%rip) */ - JMP LBB9_14 +LBB10_12: + LONG $0xdd358d4c; WORD $0x005a; BYTE $0x00 // leaq $23261(%rip), %r14 /* _POW_TAB(%rip) */ + JMP LBB10_14 -LBB9_18: +LBB10_18: MOVL $27, BX CMPL 16(R15), $0 - JE LBB9_13 + JE LBB10_13 -LBB9_20: +LBB10_20: MOVQ R15, DI MOVL BX, SI - LONG $0x002c15e8; BYTE $0x00 // callq _left_shift + LONG $0x002c2ce8; BYTE $0x00 // callq _left_shift MOVL 20(R15), AX -LBB9_13: +LBB10_13: SUBL BX, R12 -LBB9_14: +LBB10_14: TESTL AX, AX - JS LBB9_17 - JNE LBB9_21 + JS LBB10_17 + JNE LBB10_21 MOVQ 0(R15), CX CMPB 0(CX), $53 - JL LBB9_19 - JMP LBB9_21 + JL LBB10_19 + JMP LBB10_21 -LBB9_17: +LBB10_17: CMPL AX, $-8 - JL LBB9_18 + JL LBB10_18 -LBB9_19: +LBB10_19: MOVL AX, CX NEGL CX MOVL 0(R14)(CX*4), BX CMPL 16(R15), $0 - JNE LBB9_20 - JMP LBB9_13 + JNE LBB10_20 + JMP LBB10_13 -LBB9_21: +LBB10_21: CMPL R12, $-1022 - JG LBB9_27 + JG LBB10_27 CMPL 16(R15), $0 MOVQ -48(BP), BX - JE LBB9_29 + JE LBB10_29 CMPL R12, $-1082 - JG LBB9_30 + JG LBB10_30 ADDL $961, R12 -LBB9_25: +LBB10_25: MOVQ R15, DI MOVL $60, SI - LONG $0x002d56e8; BYTE $0x00 // callq _right_shift + LONG $0x002d6de8; BYTE $0x00 // callq _right_shift ADDL $60, R12 CMPL R12, $-120 - JL LBB9_25 + JL LBB10_25 ADDL $60, R12 - JMP LBB9_31 + JMP LBB10_31 -LBB9_27: +LBB10_27: CMPL R12, $1024 MOVQ -48(BP), BX - JG LBB9_61 + JG LBB10_61 ADDL $-1, R12 MOVL R12, R14 - JMP LBB9_32 + JMP LBB10_32 -LBB9_29: +LBB10_29: MOVL $-1022, R14 - JMP LBB9_34 + JMP LBB10_34 -LBB9_30: +LBB10_30: ADDL $1021, R12 -LBB9_31: +LBB10_31: NEGL R12 MOVQ R15, DI MOVL R12, SI - LONG $0x002d0fe8; BYTE $0x00 // callq _right_shift + LONG $0x002d26e8; BYTE $0x00 // callq _right_shift MOVL $-1022, R14 -LBB9_32: +LBB10_32: CMPL 16(R15), $0 - JE LBB9_34 + JE LBB10_34 MOVQ R15, DI MOVL $53, SI - LONG $0x002b56e8; BYTE $0x00 // callq _left_shift + LONG $0x002b6de8; BYTE $0x00 // callq _left_shift -LBB9_34: +LBB10_34: MOVL 20(R15), AX MOVQ $-1, R12 CMPL AX, $20 - JG LBB9_63 + JG LBB10_63 TESTL AX, AX - JLE LBB9_40 + JLE LBB10_40 MOVL 16(R15), DX XORL SI, SI TESTL DX, DX @@ -3360,9 +3658,9 @@ LBB9_34: LEAL 1(R9), R8 XORL R12, R12 -LBB9_37: +LBB10_37: CMPQ DX, SI - JE LBB9_41 + JE LBB10_41 LEAQ 0(R12)(R12*4), DI MOVQ 0(R15), CX MOVBQSX 0(CX)(SI*1), CX @@ -3370,91 +3668,91 @@ LBB9_37: ADDQ $-48, R12 ADDQ $1, SI CMPQ AX, SI - JNE LBB9_37 + JNE LBB10_37 MOVL R8, R9 - JMP LBB9_41 + JMP LBB10_41 -LBB9_40: +LBB10_40: XORL R9, R9 XORL R12, R12 -LBB9_41: +LBB10_41: CMPL AX, R9 - JLE LBB9_49 + JLE LBB10_49 MOVL AX, SI SUBL R9, SI MOVL R9, DX NOTL DX ADDL AX, DX ANDL $7, SI - JE LBB9_46 + JE LBB10_46 NEGL SI XORL DI, DI -LBB9_44: +LBB10_44: ADDQ R12, R12 LEAQ 0(R12)(R12*4), R12 ADDL $-1, DI CMPL SI, DI - JNE LBB9_44 + JNE LBB10_44 SUBL DI, R9 -LBB9_46: +LBB10_46: CMPL DX, $7 - JB LBB9_49 + JB LBB10_49 MOVL AX, DX SUBL R9, DX -LBB9_48: +LBB10_48: IMUL3Q $100000000, R12, R12 ADDL $-8, DX - JNE LBB9_48 + JNE LBB10_48 -LBB9_49: +LBB10_49: TESTL AX, AX - JS LBB9_57 + JS LBB10_57 MOVL 16(R15), CX CMPL CX, AX - JLE LBB9_57 + JLE LBB10_57 MOVQ 0(R15), SI MOVB 0(SI)(AX*1), DX CMPB DX, $53 - JNE LBB9_58 + JNE LBB10_58 LEAL 1(AX), DI CMPL DI, CX - JNE LBB9_58 + JNE LBB10_58 CMPL 28(R15), $0 SETNE CX - JNE LBB9_59 + JNE LBB10_59 TESTL AX, AX - JLE LBB9_59 + JLE LBB10_59 ADDL $-1, AX MOVB 0(SI)(AX*1), CX ANDB $1, CX - JMP LBB9_59 + JMP LBB10_59 -LBB9_57: +LBB10_57: XORL CX, CX -LBB9_59: +LBB10_59: MOVBLZX CX, AX ADDQ AX, R12 MOVQ $9007199254740992, AX CMPQ R12, AX - JNE LBB9_63 + JNE LBB10_63 CMPL R14, $1022 - JLE LBB9_62 + JLE LBB10_62 -LBB9_61: +LBB10_61: XORL R12, R12 MOVQ $9218868437227405312, R14 - JMP LBB9_64 + JMP LBB10_64 -LBB9_62: +LBB10_62: ADDL $1, R14 MOVQ R13, R12 -LBB9_63: +LBB10_63: MOVQ R12, AX ANDQ R13, AX ADDL $1023, R14 @@ -3463,7 +3761,7 @@ LBB9_63: TESTQ AX, AX LONG $0xf0440f4c // cmoveq %rax, %r14 -LBB9_64: +LBB10_64: ADDQ $-1, R13 ANDQ R12, R13 ORQ R14, R13 @@ -3482,10 +3780,10 @@ LBB9_64: BYTE $0x5d // popq %rbp RET -LBB9_58: +LBB10_58: CMPB DX, $53 SETGE CX - JMP LBB9_59 + JMP LBB10_59 _atof_native: BYTE $0x55 // pushq %rbp @@ -3495,173 +3793,173 @@ _atof_native: MOVQ DX, -32(BP) MOVQ CX, -24(BP) TESTQ CX, CX - JE LBB10_5 + JE LBB11_5 MOVB $0, 0(DX) CMPQ CX, $1 - JE LBB10_5 + JE LBB11_5 MOVB $0, 1(DX) CMPQ -24(BP), $3 - JB LBB10_5 + JB LBB11_5 MOVL $2, AX -LBB10_4: +LBB11_4: MOVQ -32(BP), CX MOVB $0, 0(CX)(AX*1) ADDQ $1, AX CMPQ -24(BP), AX - JA LBB10_4 + JA LBB11_4 -LBB10_5: +LBB11_5: LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 LONG $0x4511f8c5; BYTE $0xf0 // vmovups %xmm0, $-16(%rbp) XORL DX, DX CMPB 0(DI), $45 - JNE LBB10_7 + JNE LBB11_7 MOVL $1, -8(BP) MOVL $1, AX CMPQ AX, SI - JL LBB10_8 - JMP LBB10_39 + JL LBB11_8 + JMP LBB11_39 -LBB10_7: +LBB11_7: XORL AX, AX CMPQ AX, SI - JGE LBB10_39 + JGE LBB11_39 -LBB10_8: +LBB11_8: MOVB $1, R11 XORL R9, R9 XORL R10, R10 XORL R8, R8 - JMP LBB10_12 + JMP LBB11_12 -LBB10_20: +LBB11_20: MOVL $1, -4(BP) -LBB10_11: +LBB11_11: ADDQ $1, AX CMPQ AX, SI SETLT R11 CMPQ SI, AX - JE LBB10_22 + JE LBB11_22 -LBB10_12: +LBB11_12: MOVBLZX 0(DI)(AX*1), CX LEAL -48(CX), DX CMPB DX, $9 - JA LBB10_17 + JA LBB11_17 CMPB CX, $48 - JNE LBB10_19 + JNE LBB11_19 TESTL R10, R10 - JE LBB10_21 + JE LBB11_21 MOVLQSX R9, R11 CMPQ -24(BP), R11 - JA LBB10_9 - JMP LBB10_10 + JA LBB11_9 + JMP LBB11_10 -LBB10_17: +LBB11_17: CMPB CX, $46 - JNE LBB10_23 + JNE LBB11_23 MOVL R10, -12(BP) MOVL $1, R8 - JMP LBB10_11 + JMP LBB11_11 -LBB10_19: +LBB11_19: MOVLQSX R10, R11 CMPQ -24(BP), R11 - JBE LBB10_20 + JBE LBB11_20 -LBB10_9: +LBB11_9: MOVQ -32(BP), DX MOVB CX, 0(DX)(R11*1) MOVL -16(BP), R9 ADDL $1, R9 MOVL R9, -16(BP) -LBB10_10: +LBB11_10: MOVL R9, R10 - JMP LBB10_11 + JMP LBB11_11 -LBB10_21: +LBB11_21: ADDL $-1, -12(BP) XORL R10, R10 - JMP LBB10_11 + JMP LBB11_11 -LBB10_22: +LBB11_22: MOVQ SI, AX -LBB10_23: +LBB11_23: TESTL R8, R8 - JE LBB10_25 + JE LBB11_25 TESTB $1, R11 - JNE LBB10_26 - JMP LBB10_40 + JNE LBB11_26 + JMP LBB11_40 -LBB10_25: +LBB11_25: MOVL R9, -12(BP) TESTB $1, R11 - JE LBB10_40 + JE LBB11_40 -LBB10_26: +LBB11_26: MOVL AX, DX MOVB 0(DI)(DX*1), CX ORB $32, CX CMPB CX, $101 - JNE LBB10_40 + JNE LBB11_40 MOVB 1(DI)(DX*1), CX CMPB CX, $45 - JE LBB10_30 + JE LBB11_30 MOVL $1, R8 CMPB CX, $43 - JNE LBB10_32 + JNE LBB11_32 ADDL $2, AX - JMP LBB10_31 + JMP LBB11_31 -LBB10_30: +LBB11_30: ADDL $2, AX MOVL $-1, R8 -LBB10_31: +LBB11_31: MOVL AX, DX MOVLQSX DX, AX XORL DX, DX CMPQ AX, SI - JL LBB10_33 - JMP LBB10_38 + JL LBB11_33 + JMP LBB11_38 -LBB10_32: +LBB11_32: ADDQ $1, DX MOVLQSX DX, AX XORL DX, DX CMPQ AX, SI - JGE LBB10_38 + JGE LBB11_38 -LBB10_33: +LBB11_33: XORL DX, DX -LBB10_34: +LBB11_34: MOVBLSX 0(DI)(AX*1), CX CMPL CX, $48 - JL LBB10_38 + JL LBB11_38 CMPB CX, $57 - JG LBB10_38 + JG LBB11_38 CMPL DX, $9999 - JG LBB10_38 + JG LBB11_38 LEAL 0(DX)(DX*4), DX LEAL 0(CX)(DX*2), DX ADDL $-48, DX ADDQ $1, AX CMPQ SI, AX - JNE LBB10_34 + JNE LBB11_34 -LBB10_38: +LBB11_38: IMULL R8, DX ADDL -12(BP), DX -LBB10_39: +LBB11_39: MOVL DX, -12(BP) -LBB10_40: +LBB11_40: LEAQ -32(BP), DI LEAQ -40(BP), SI LONG $0xfffb65e8; BYTE $0xff // callq _decimal_to_f64 @@ -3690,68 +3988,68 @@ _value: LONG $0x000554e8; BYTE $0x00 // callq _advance_ns MOVBLSX AX, AX CMPL AX, $125 - JA LBB11_11 - LONG $0x090d8d48; WORD $0x0003; BYTE $0x00 // leaq $777(%rip), %rcx /* LJTI11_0(%rip) */ + JA LBB12_11 + LONG $0x090d8d48; WORD $0x0003; BYTE $0x00 // leaq $777(%rip), %rcx /* LJTI12_0(%rip) */ MOVLQSX 0(CX)(AX*4), AX ADDQ CX, AX JMP AX -LBB11_2: +LBB12_2: MOVQ R14, -56(BP) MOVQ -48(BP), R14 LEAQ -1(R14), R13 MOVQ R13, -48(BP) TESTB $2, R12 - JNE LBB11_4 + JNE LBB12_4 LEAQ -80(BP), DI LEAQ -48(BP), SI MOVQ -56(BP), DX LONG $0x000ae9e8; BYTE $0x00 // callq _vnumber MOVQ -48(BP), R12 - JMP LBB11_49 + JMP LBB12_49 -LBB11_4: +LBB12_4: XORL AX, AX CMPB 0(R15)(R13*1), $45 LEAQ 0(R15)(R13*1), R12 SETEQ AX ADDQ AX, R12 SUBQ AX, BX - JE LBB11_44 + JE LBB12_44 CMPQ R13, BX - JAE LBB11_7 + JAE LBB12_7 MOVB 0(R12), AX ADDB $-48, AX CMPB AX, $9 - JA LBB11_46 + JA LBB12_46 -LBB11_7: +LBB12_7: MOVQ R12, DI MOVQ BX, SI LONG $0x002109e8; BYTE $0x00 // callq _do_skip_number TESTQ AX, AX - JS LBB11_45 + JS LBB12_45 ADDQ AX, R12 SUBQ R15, R12 TESTQ R14, R14 - JLE LBB11_48 + JLE LBB12_48 MOVQ -56(BP), AX MOVQ $8, 0(AX) MOVQ R13, 24(AX) - JMP LBB11_49 + JMP LBB12_49 -LBB11_10: +LBB12_10: MOVQ $1, 0(R14) MOVQ -48(BP), R12 - JMP LBB11_49 + JMP LBB12_49 -LBB11_11: +LBB12_11: MOVQ $-2, 0(R14) MOVQ -48(BP), R12 ADDQ $-1, R12 - JMP LBB11_49 + JMP LBB12_49 -LBB11_12: +LBB12_12: MOVQ $-1, -64(BP) MOVQ -48(BP), R15 LEAQ -80(BP), DI @@ -3760,7 +4058,7 @@ LBB11_12: LONG $0x0006d1e8; BYTE $0x00 // callq _advance_string MOVQ AX, R12 TESTQ AX, AX - JS LBB11_33 + JS LBB12_33 MOVQ R12, -48(BP) MOVQ R15, 16(R14) MOVQ -64(BP), AX @@ -3770,176 +4068,176 @@ LBB11_12: MOVQ CX, 24(R14) MOVL $7, AX MOVQ AX, 0(R14) - JMP LBB11_49 + JMP LBB12_49 -LBB11_14: +LBB12_14: TESTL R12, R12 MOVQ $-2, AX MOVL $11, CX - JMP LBB11_32 + JMP LBB12_32 -LBB11_15: +LBB12_15: TESTL R12, R12 MOVQ $-2, AX MOVL $10, CX - JMP LBB11_32 + JMP LBB12_32 -LBB11_16: +LBB12_16: MOVQ $5, 0(R14) MOVQ -48(BP), R12 - JMP LBB11_49 + JMP LBB12_49 -LBB11_17: +LBB12_17: TESTL R12, R12 MOVQ $-2, AX MOVL $12, CX - JMP LBB11_32 + JMP LBB12_32 -LBB11_18: +LBB12_18: MOVQ -48(BP), R12 LEAQ -4(BX), CX MOVQ $-1, AX CMPQ R12, CX - JA LBB11_25 + JA LBB12_25 MOVL 0(R15)(R12*1), CX CMPL CX, $1702063201 - JNE LBB11_34 + JNE LBB12_34 ADDQ $4, R12 MOVL $4, AX - JMP LBB11_24 + JMP LBB12_24 -LBB11_21: +LBB12_21: MOVQ -48(BP), R12 LEAQ -3(BX), CX MOVQ $-1, AX CMPQ R12, CX - JA LBB11_25 + JA LBB12_25 MOVL -1(R15)(R12*1), CX CMPL CX, $1819047278 - JNE LBB11_37 + JNE LBB12_37 ADDQ $3, R12 MOVL $2, AX - JMP LBB11_24 + JMP LBB12_24 -LBB11_27: +LBB12_27: MOVQ -48(BP), R12 LEAQ -3(BX), CX MOVQ $-1, AX CMPQ R12, CX - JA LBB11_25 + JA LBB12_25 MOVL -1(R15)(R12*1), CX CMPL CX, $1702195828 - JNE LBB11_41 + JNE LBB12_41 ADDQ $3, R12 MOVL $3, AX -LBB11_24: +LBB12_24: MOVQ R12, BX -LBB11_25: +LBB12_25: MOVQ BX, -48(BP) MOVQ BX, R12 -LBB11_26: +LBB12_26: MOVQ AX, 0(R14) - JMP LBB11_49 + JMP LBB12_49 -LBB11_30: +LBB12_30: MOVQ $6, 0(R14) MOVQ -48(BP), R12 - JMP LBB11_49 + JMP LBB12_49 -LBB11_31: +LBB12_31: TESTL R12, R12 MOVQ $-2, AX MOVL $13, CX -LBB11_32: +LBB12_32: LONG $0xc8490f48 // cmovnsq %rax, %rcx MOVQ CX, 0(R14) SARL $31, R12 NOTL R12 MOVLQSX R12, R12 ADDQ -48(BP), R12 - JMP LBB11_49 + JMP LBB12_49 -LBB11_33: +LBB12_33: MOVQ BX, -48(BP) MOVQ R12, 0(R14) MOVQ BX, R12 - JMP LBB11_49 + JMP LBB12_49 -LBB11_34: +LBB12_34: MOVQ $-2, AX CMPB CX, $97 - JNE LBB11_26 + JNE LBB12_26 MOVL $1702063201, CX -LBB11_36: +LBB12_36: SHRL $8, CX MOVBLSX 1(R15)(R12*1), DX ADDQ $1, R12 MOVBLZX CX, SI CMPL SI, DX - JE LBB11_36 - JMP LBB11_40 + JE LBB12_36 + JMP LBB12_40 -LBB11_37: +LBB12_37: ADDQ $-1, R12 MOVQ $-2, AX CMPB CX, $110 - JNE LBB11_26 + JNE LBB12_26 MOVL $1819047278, CX -LBB11_39: +LBB12_39: SHRL $8, CX MOVBLSX 1(R15)(R12*1), DX ADDQ $1, R12 MOVBLZX CX, SI CMPL SI, DX - JE LBB11_39 - JMP LBB11_40 + JE LBB12_39 + JMP LBB12_40 -LBB11_41: +LBB12_41: ADDQ $-1, R12 MOVQ $-2, AX CMPB CX, $116 - JNE LBB11_26 + JNE LBB12_26 MOVL $1702195828, CX -LBB11_43: +LBB12_43: SHRL $8, CX MOVBLSX 1(R15)(R12*1), DX ADDQ $1, R12 MOVBLZX CX, SI CMPL SI, DX - JE LBB11_43 + JE LBB12_43 -LBB11_40: +LBB12_40: MOVQ R12, -48(BP) MOVQ AX, 0(R14) - JMP LBB11_49 + JMP LBB12_49 -LBB11_44: +LBB12_44: MOVQ $-1, R13 - JMP LBB11_47 + JMP LBB12_47 -LBB11_45: +LBB12_45: NOTQ AX ADDQ AX, R12 -LBB11_46: +LBB12_46: MOVQ $-2, R13 -LBB11_47: +LBB12_47: SUBQ R15, R12 MOVQ R12, -48(BP) -LBB11_48: +LBB12_48: MOVQ -56(BP), AX MOVQ R13, 0(AX) -LBB11_49: +LBB12_49: MOVQ R12, AX ADDQ $40, SP BYTE $0x5b // popq %rbx @@ -3950,157 +4248,157 @@ LBB11_49: BYTE $0x5d // popq %rbp RET -// .set L11_0_set_10, LBB11_10-LJTI11_0 -// .set L11_0_set_11, LBB11_11-LJTI11_0 -// .set L11_0_set_12, LBB11_12-LJTI11_0 -// .set L11_0_set_14, LBB11_14-LJTI11_0 -// .set L11_0_set_2, LBB11_2-LJTI11_0 -// .set L11_0_set_15, LBB11_15-LJTI11_0 -// .set L11_0_set_16, LBB11_16-LJTI11_0 -// .set L11_0_set_17, LBB11_17-LJTI11_0 -// .set L11_0_set_18, LBB11_18-LJTI11_0 -// .set L11_0_set_21, LBB11_21-LJTI11_0 -// .set L11_0_set_27, LBB11_27-LJTI11_0 -// .set L11_0_set_30, LBB11_30-LJTI11_0 -// .set L11_0_set_31, LBB11_31-LJTI11_0 -LJTI11_0: - LONG $0xfffffd94 // .long L11_0_set_10 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffdb8 // .long L11_0_set_12 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffe0b // .long L11_0_set_14 - LONG $0xfffffd00 // .long L11_0_set_2 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffd00 // .long L11_0_set_2 - LONG $0xfffffd00 // .long L11_0_set_2 - LONG $0xfffffd00 // .long L11_0_set_2 - LONG $0xfffffd00 // .long L11_0_set_2 - LONG $0xfffffd00 // .long L11_0_set_2 - LONG $0xfffffd00 // .long L11_0_set_2 - LONG $0xfffffd00 // .long L11_0_set_2 - LONG $0xfffffd00 // .long L11_0_set_2 - LONG $0xfffffd00 // .long L11_0_set_2 - LONG $0xfffffd00 // .long L11_0_set_2 - LONG $0xfffffe1f // .long L11_0_set_15 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffe33 // .long L11_0_set_16 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffe43 // .long L11_0_set_17 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffe57 // .long L11_0_set_18 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffe86 // .long L11_0_set_21 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffeb6 // .long L11_0_set_27 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xfffffef6 // .long L11_0_set_30 - LONG $0xfffffda4 // .long L11_0_set_11 - LONG $0xffffff06 // .long L11_0_set_31 - -LCPI12_0: +// .set L12_0_set_10, LBB12_10-LJTI12_0 +// .set L12_0_set_11, LBB12_11-LJTI12_0 +// .set L12_0_set_12, LBB12_12-LJTI12_0 +// .set L12_0_set_14, LBB12_14-LJTI12_0 +// .set L12_0_set_2, LBB12_2-LJTI12_0 +// .set L12_0_set_15, LBB12_15-LJTI12_0 +// .set L12_0_set_16, LBB12_16-LJTI12_0 +// .set L12_0_set_17, LBB12_17-LJTI12_0 +// .set L12_0_set_18, LBB12_18-LJTI12_0 +// .set L12_0_set_21, LBB12_21-LJTI12_0 +// .set L12_0_set_27, LBB12_27-LJTI12_0 +// .set L12_0_set_30, LBB12_30-LJTI12_0 +// .set L12_0_set_31, LBB12_31-LJTI12_0 +LJTI12_0: + LONG $0xfffffd94 // .long L12_0_set_10 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffdb8 // .long L12_0_set_12 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffe0b // .long L12_0_set_14 + LONG $0xfffffd00 // .long L12_0_set_2 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffd00 // .long L12_0_set_2 + LONG $0xfffffd00 // .long L12_0_set_2 + LONG $0xfffffd00 // .long L12_0_set_2 + LONG $0xfffffd00 // .long L12_0_set_2 + LONG $0xfffffd00 // .long L12_0_set_2 + LONG $0xfffffd00 // .long L12_0_set_2 + LONG $0xfffffd00 // .long L12_0_set_2 + LONG $0xfffffd00 // .long L12_0_set_2 + LONG $0xfffffd00 // .long L12_0_set_2 + LONG $0xfffffd00 // .long L12_0_set_2 + LONG $0xfffffe1f // .long L12_0_set_15 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffe33 // .long L12_0_set_16 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffe43 // .long L12_0_set_17 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffe57 // .long L12_0_set_18 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffe86 // .long L12_0_set_21 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffeb6 // .long L12_0_set_27 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xfffffef6 // .long L12_0_set_30 + LONG $0xfffffda4 // .long L12_0_set_11 + LONG $0xffffff06 // .long L12_0_set_31 + +LCPI13_0: QUAD $0x2020202020202020; QUAD $0x2020202020202020 // .space 16, ' ' -LCPI12_1: +LCPI13_1: QUAD $0x0909090909090909; QUAD $0x0909090909090909 // .space 16, '\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t' -LCPI12_2: +LCPI13_2: QUAD $0x0a0a0a0a0a0a0a0a; QUAD $0x0a0a0a0a0a0a0a0a // .space 16, '\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n' -LCPI12_3: +LCPI13_3: QUAD $0x0d0d0d0d0d0d0d0d; QUAD $0x0d0d0d0d0d0d0d0d // .space 16, '\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r' _advance_ns: @@ -4108,106 +4406,106 @@ _advance_ns: WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp MOVQ 0(DX), R8 CMPQ R8, SI - JAE LBB12_6 + JAE LBB13_6 MOVB 0(DI)(R8*1), AX CMPB AX, $13 - JE LBB12_6 + JE LBB13_6 CMPB AX, $32 - JE LBB12_6 + JE LBB13_6 ADDB $-9, AX CMPB AX, $1 - JBE LBB12_6 + JBE LBB13_6 MOVQ R8, AX - JMP LBB12_5 + JMP LBB13_5 -LBB12_6: +LBB13_6: LEAQ 1(R8), AX CMPQ AX, SI - JAE LBB12_10 + JAE LBB13_10 MOVB 0(DI)(AX*1), CX CMPB CX, $13 - JE LBB12_10 + JE LBB13_10 CMPB CX, $32 - JE LBB12_10 + JE LBB13_10 ADDB $-9, CX CMPB CX, $1 - JA LBB12_5 + JA LBB13_5 -LBB12_10: +LBB13_10: LEAQ 2(R8), AX CMPQ AX, SI - JAE LBB12_14 + JAE LBB13_14 MOVB 0(DI)(AX*1), CX CMPB CX, $13 - JE LBB12_14 + JE LBB13_14 CMPB CX, $32 - JE LBB12_14 + JE LBB13_14 ADDB $-9, CX CMPB CX, $1 - JA LBB12_5 + JA LBB13_5 -LBB12_14: +LBB13_14: LEAQ 3(R8), AX CMPQ AX, SI - JAE LBB12_18 + JAE LBB13_18 MOVB 0(DI)(AX*1), CX CMPB CX, $13 - JE LBB12_18 + JE LBB13_18 CMPB CX, $32 - JE LBB12_18 + JE LBB13_18 ADDB $-9, CX CMPB CX, $1 - JA LBB12_5 + JA LBB13_5 -LBB12_18: +LBB13_18: ADDQ $4, R8 CMPQ R8, SI - JAE LBB12_19 + JAE LBB13_19 LEAQ 0(DI)(R8*1), R9 MOVQ SI, R11 SUBQ R8, R11 - JE LBB12_27 + JE LBB13_27 MOVL R9, AX ANDL $15, AX TESTQ AX, AX - JE LBB12_27 + JE LBB13_27 MOVL $5, R10 SUBQ SI, R10 MOVQ $4294977024, R9 -LBB12_23: +LBB13_23: MOVBLSX 0(DI)(R8*1), CX CMPL CX, $32 - JA LBB12_42 + JA LBB13_42 BTQ CX, R9 - JAE LBB12_42 + JAE LBB13_42 LEAQ 0(R10)(R8*1), AX LEAQ 1(R8), CX CMPQ AX, $4 - JE LBB12_26 + JE LBB13_26 LEAL 0(DI)(R8*1), AX ADDL $1, AX ANDL $15, AX MOVQ CX, R8 TESTQ AX, AX - JNE LBB12_23 + JNE LBB13_23 -LBB12_26: +LBB13_26: LEAQ 0(DI)(CX*1), R9 MOVQ SI, R11 SUBQ CX, R11 -LBB12_27: +LBB13_27: CMPQ R11, $16 - JB LBB12_33 + JB LBB13_33 MOVQ DI, CX SUBQ R9, CX - QUAD $0xfffffea7056ffac5 // vmovdqu $-345(%rip), %xmm0 /* LCPI12_0(%rip) */ - QUAD $0xfffffeaf0d6ffac5 // vmovdqu $-337(%rip), %xmm1 /* LCPI12_1(%rip) */ - QUAD $0xfffffeb7156ffac5 // vmovdqu $-329(%rip), %xmm2 /* LCPI12_2(%rip) */ - QUAD $0xfffffebf1d6ffac5 // vmovdqu $-321(%rip), %xmm3 /* LCPI12_3(%rip) */ + QUAD $0xfffffea7056ffac5 // vmovdqu $-345(%rip), %xmm0 /* LCPI13_0(%rip) */ + QUAD $0xfffffeaf0d6ffac5 // vmovdqu $-337(%rip), %xmm1 /* LCPI13_1(%rip) */ + QUAD $0xfffffeb7156ffac5 // vmovdqu $-329(%rip), %xmm2 /* LCPI13_2(%rip) */ + QUAD $0xfffffebf1d6ffac5 // vmovdqu $-321(%rip), %xmm3 /* LCPI13_3(%rip) */ -LBB12_29: +LBB13_29: LONG $0x6f79c1c4; BYTE $0x21 // vmovdqa (%r9), %xmm4 LONG $0xe874d9c5 // vpcmpeqb %xmm0, %xmm4, %xmm5 LONG $0xf174d9c5 // vpcmpeqb %xmm1, %xmm4, %xmm6 @@ -4218,54 +4516,54 @@ LBB12_29: LONG $0xe5ebd9c5 // vpor %xmm5, %xmm4, %xmm4 LONG $0xc4d7f9c5 // vpmovmskb %xmm4, %eax CMPW AX, $-1 - JNE LBB12_30 + JNE LBB13_30 ADDQ $16, R9 ADDQ $-16, R11 ADDQ $-16, CX CMPQ R11, $15 - JA LBB12_29 + JA LBB13_29 -LBB12_33: +LBB13_33: TESTQ R11, R11 - JE LBB12_40 + JE LBB13_40 LEAQ 0(R9)(R11*1), R8 XORL CX, CX MOVQ $4294977024, R10 -LBB12_35: +LBB13_35: MOVBLSX 0(R9)(CX*1), AX CMPL AX, $32 - JA LBB12_37 + JA LBB13_37 BTQ AX, R10 - JAE LBB12_37 + JAE LBB13_37 ADDQ $1, CX CMPQ R11, CX - JNE LBB12_35 + JNE LBB13_35 MOVQ R8, R9 -LBB12_40: +LBB13_40: SUBQ DI, R9 -LBB12_41: +LBB13_41: MOVQ R9, R8 - JMP LBB12_42 + JMP LBB13_42 -LBB12_19: +LBB13_19: MOVQ R8, 0(DX) - JMP LBB12_43 + JMP LBB13_43 -LBB12_30: +LBB13_30: MOVWLZX AX, AX NOTL AX BSFL AX, R8 SUBQ CX, R8 -LBB12_42: +LBB13_42: MOVQ R8, AX CMPQ R8, SI - JAE LBB12_43 + JAE LBB13_43 -LBB12_5: +LBB13_5: LEAQ 1(AX), CX MOVQ CX, 0(DX) MOVB 0(DI)(AX*1), AX @@ -4273,16 +4571,16 @@ LBB12_5: BYTE $0x5d // popq %rbp RET -LBB12_43: +LBB13_43: XORL AX, AX MOVBLSX AX, AX BYTE $0x5d // popq %rbp RET -LBB12_37: +LBB13_37: SUBQ DI, R9 ADDQ CX, R9 - JMP LBB12_41 + JMP LBB13_41 _vstring: BYTE $0x55 // pushq %rbp @@ -4301,7 +4599,7 @@ _vstring: MOVQ R12, SI LONG $0x000060e8; BYTE $0x00 // callq _advance_string TESTQ AX, AX - JS LBB13_1 + JS LBB14_1 MOVQ AX, 0(BX) MOVQ R12, 16(R14) MOVQ -40(BP), CX @@ -4310,13 +4608,13 @@ _vstring: LONG $0xc14c0f48 // cmovlq %rcx, %rax MOVQ AX, 24(R14) MOVL $7, AX - JMP LBB13_3 + JMP LBB14_3 -LBB13_1: +LBB14_1: MOVQ 8(R15), CX MOVQ CX, 0(BX) -LBB13_3: +LBB14_3: MOVQ AX, 0(R14) ADDQ $16, SP BYTE $0x5b // popq %rbx @@ -4326,10 +4624,10 @@ LBB13_3: BYTE $0x5d // popq %rbp RET -LCPI14_0: +LCPI15_0: QUAD $0x2222222222222222; QUAD $0x2222222222222222 // .space 16, '""""""""""""""""' -LCPI14_1: +LCPI15_1: QUAD $0x5c5c5c5c5c5c5c5c; QUAD $0x5c5c5c5c5c5c5c5c // .space 16, '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' _advance_string: @@ -4343,19 +4641,19 @@ _advance_string: BYTE $0x50 // pushq %rax MOVQ 8(DI), R15 SUBQ SI, R15 - JE LBB14_17 + JE LBB15_17 MOVQ 0(DI), R9 MOVQ $-1, 0(DX) CMPQ R15, $64 - JB LBB14_18 + JB LBB15_18 MOVQ SI, DI NOTQ DI MOVQ $-1, -48(BP) XORL R14, R14 - QUAD $0xffffff98056ffac5 // vmovdqu $-104(%rip), %xmm0 /* LCPI14_0(%rip) */ - QUAD $0xffffffa00d6ffac5 // vmovdqu $-96(%rip), %xmm1 /* LCPI14_1(%rip) */ + QUAD $0xffffff98056ffac5 // vmovdqu $-104(%rip), %xmm0 /* LCPI15_0(%rip) */ + QUAD $0xffffffa00d6ffac5 // vmovdqu $-96(%rip), %xmm1 /* LCPI15_1(%rip) */ -LBB14_3: +LBB15_3: LONG $0x6f7ac1c4; WORD $0x3114 // vmovdqu (%r9,%rsi), %xmm2 LONG $0x6f7ac1c4; WORD $0x315c; BYTE $0x10 // vmovdqu $16(%r9,%rsi), %xmm3 LONG $0x6f7ac1c4; WORD $0x3164; BYTE $0x20 // vmovdqu $32(%r9,%rsi), %xmm4 @@ -4388,30 +4686,30 @@ LBB14_3: SHLQ $16, CX ORQ R10, CX ORQ CX, R13 - JNE LBB14_7 + JNE LBB15_7 TESTQ R14, R14 - JNE LBB14_9 + JNE LBB15_9 XORL R14, R14 TESTQ R12, R12 - JNE LBB14_10 + JNE LBB15_10 -LBB14_6: +LBB15_6: ADDQ $-64, R15 ADDQ $-64, DI ADDQ $64, SI CMPQ R15, $63 - JA LBB14_3 - JMP LBB14_12 + JA LBB15_3 + JMP LBB15_12 -LBB14_7: +LBB15_7: CMPQ -48(BP), $-1 - JNE LBB14_9 + JNE LBB15_9 BSFQ R13, AX ADDQ SI, AX MOVQ AX, -48(BP) MOVQ AX, 0(DX) -LBB14_9: +LBB15_9: MOVQ R14, AX NOTQ AX ANDQ R13, AX @@ -4432,13 +4730,13 @@ LBB14_9: NOTQ CX ANDQ CX, R12 TESTQ R12, R12 - JE LBB14_6 + JE LBB15_6 -LBB14_10: +LBB15_10: BSFQ R12, AX SUBQ DI, AX -LBB14_11: +LBB15_11: ADDQ $8, SP BYTE $0x5b // popq %rbx WORD $0x5c41 // popq %r12 @@ -4448,16 +4746,16 @@ LBB14_11: BYTE $0x5d // popq %rbp RET -LBB14_12: +LBB15_12: ADDQ R9, SI CMPQ R15, $32 - JB LBB14_23 + JB LBB15_23 -LBB14_13: +LBB15_13: LONG $0x066ffac5 // vmovdqu (%rsi), %xmm0 LONG $0x4e6ffac5; BYTE $0x10 // vmovdqu $16(%rsi), %xmm1 - QUAD $0xfffffe4d156ffac5 // vmovdqu $-435(%rip), %xmm2 /* LCPI14_0(%rip) */ - QUAD $0xfffffe551d6ffac5 // vmovdqu $-427(%rip), %xmm3 /* LCPI14_1(%rip) */ + QUAD $0xfffffe4d156ffac5 // vmovdqu $-435(%rip), %xmm2 /* LCPI15_0(%rip) */ + QUAD $0xfffffe551d6ffac5 // vmovdqu $-427(%rip), %xmm3 /* LCPI15_1(%rip) */ LONG $0xe274f9c5 // vpcmpeqb %xmm2, %xmm0, %xmm4 LONG $0xfcd7f9c5 // vpmovmskb %xmm4, %edi LONG $0xd274f1c5 // vpcmpeqb %xmm2, %xmm1, %xmm2 @@ -4470,31 +4768,31 @@ LBB14_13: ORQ CX, DI SHLQ $16, BX ORQ BX, AX - JNE LBB14_19 + JNE LBB15_19 TESTQ R14, R14 - JNE LBB14_21 + JNE LBB15_21 XORL R14, R14 TESTQ DI, DI - JE LBB14_22 + JE LBB15_22 -LBB14_16: +LBB15_16: BSFQ DI, AX SUBQ R9, SI ADDQ SI, AX ADDQ $1, AX - JMP LBB14_11 + JMP LBB15_11 -LBB14_18: +LBB15_18: ADDQ R9, SI MOVQ $-1, -48(BP) XORL R14, R14 CMPQ R15, $32 - JAE LBB14_13 - JMP LBB14_23 + JAE LBB15_13 + JMP LBB15_23 -LBB14_19: +LBB15_19: CMPQ -48(BP), $-1 - JNE LBB14_21 + JNE LBB15_21 MOVQ SI, CX SUBQ R9, CX BSFQ AX, BX @@ -4502,7 +4800,7 @@ LBB14_19: MOVQ BX, -48(BP) MOVQ BX, 0(DX) -LBB14_21: +LBB15_21: MOVL R14, CX NOTL CX ANDL AX, CX @@ -4520,51 +4818,51 @@ LBB14_21: NOTL BX ANDL BX, DI TESTQ DI, DI - JNE LBB14_16 + JNE LBB15_16 -LBB14_22: +LBB15_22: ADDQ $32, SI ADDQ $-32, R15 -LBB14_23: +LBB15_23: TESTQ R14, R14 - JNE LBB14_37 + JNE LBB15_37 TESTQ R15, R15 - JE LBB14_36 + JE LBB15_36 -LBB14_25: +LBB15_25: MOVQ R9, DI NOTQ DI ADDQ $1, DI -LBB14_26: +LBB15_26: XORL AX, AX -LBB14_27: +LBB15_27: MOVQ AX, BX MOVBLZX 0(SI)(AX*1), CX CMPB CX, $34 - JE LBB14_35 + JE LBB15_35 CMPB CX, $92 - JE LBB14_30 + JE LBB15_30 LEAQ 1(BX), AX CMPQ R15, AX - JNE LBB14_27 - JMP LBB14_34 + JNE LBB15_27 + JMP LBB15_34 -LBB14_30: +LBB15_30: LEAQ -1(R15), CX MOVQ $-1, AX CMPQ CX, BX - JE LBB14_11 + JE LBB15_11 CMPQ -48(BP), $-1 - JNE LBB14_33 + JNE LBB15_33 LEAQ 0(DI)(SI*1), CX ADDQ BX, CX MOVQ CX, -48(BP) MOVQ CX, 0(DX) -LBB14_33: +LBB15_33: ADDQ BX, SI ADDQ $2, SI MOVQ R15, CX @@ -4573,59 +4871,59 @@ LBB14_33: ADDQ $-2, R15 CMPQ R15, BX MOVQ CX, R15 - JNE LBB14_26 - JMP LBB14_11 + JNE LBB15_26 + JMP LBB15_11 -LBB14_34: +LBB15_34: MOVQ $-1, AX CMPB CX, $34 - JNE LBB14_11 + JNE LBB15_11 -LBB14_35: +LBB15_35: ADDQ BX, SI ADDQ $1, SI -LBB14_36: +LBB15_36: SUBQ R9, SI MOVQ SI, AX - JMP LBB14_11 + JMP LBB15_11 -LBB14_37: +LBB15_37: TESTQ R15, R15 - JE LBB14_17 + JE LBB15_17 CMPQ -48(BP), $-1 - JNE LBB14_40 + JNE LBB15_40 MOVQ R9, AX NOTQ AX ADDQ SI, AX MOVQ AX, -48(BP) MOVQ AX, 0(DX) -LBB14_40: +LBB15_40: ADDQ $1, SI ADDQ $-1, R15 TESTQ R15, R15 - JNE LBB14_25 - JMP LBB14_36 + JNE LBB15_25 + JMP LBB15_36 -LBB14_17: +LBB15_17: MOVQ $-1, AX - JMP LBB14_11 + JMP LBB15_11 -LCPI15_0: +LCPI16_0: LONG $0x43300000 // .long 1127219200 LONG $0x45300000 // .long 1160773632 LONG $0x00000000 // .long 0 LONG $0x00000000 // .long 0 -LCPI15_1: +LCPI16_1: QUAD $0x4330000000000000 // .quad 0x4330000000000000 QUAD $0x4530000000000000 // .quad 0x4530000000000000 -LCPI15_2: +LCPI16_2: QUAD $0x430c6bf526340000 // .quad 0x430c6bf526340000 -LCPI15_3: +LCPI16_3: QUAD $0xc30c6bf526340000 // .quad 0xc30c6bf526340000 _vnumber: @@ -4650,215 +4948,215 @@ _vnumber: MOVQ 0(SI), CX MOVQ CX, 24(DX) CMPQ AX, R13 - JAE LBB15_52 + JAE LBB16_52 MOVB 0(R15)(AX*1), DI MOVL $1, DX CMPB DI, $45 - JNE LBB15_4 + JNE LBB16_4 ADDQ $1, AX CMPQ AX, R13 - JAE LBB15_52 + JAE LBB16_52 MOVB 0(R15)(AX*1), DI MOVL $-1, DX -LBB15_4: +LBB16_4: LEAL -48(DI), CX CMPB CX, $10 - JB LBB15_6 + JB LBB16_6 -LBB15_5: +LBB16_5: MOVQ AX, 0(R14) MOVQ $-2, 0(BX) - JMP LBB15_53 + JMP LBB16_53 -LBB15_6: +LBB16_6: CMPB DI, $48 - JNE LBB15_10 + JNE LBB16_10 LEAQ 1(AX), R8 CMPQ AX, R13 - JAE LBB15_22 + JAE LBB16_22 MOVB 0(R15)(R8*1), CX ADDB $-46, CX CMPB CX, $55 - JA LBB15_22 + JA LBB16_22 MOVBLZX CX, CX MOVQ $36028797027352577, SI BTQ CX, SI - JAE LBB15_22 + JAE LBB16_22 -LBB15_10: +LBB16_10: MOVL DX, -44(BP) MOVB $1, CX MOVL CX, -56(BP) CMPQ AX, R13 - JAE LBB15_21 + JAE LBB16_21 MOVL $4294967248, R9 ADDQ $1, AX XORL CX, CX XORL R8, R8 XORL R12, R12 -LBB15_12: +LBB16_12: CMPL R8, $18 - JG LBB15_14 + JG LBB16_14 LEAQ 0(R12)(R12*4), DX MOVBLZX DI, DI ADDL R9, DI LEAQ 0(DI)(DX*2), R12 ADDL $1, R8 - JMP LBB15_15 + JMP LBB16_15 -LBB15_14: +LBB16_14: ADDL $1, CX -LBB15_15: +LBB16_15: CMPQ R13, AX - JE LBB15_23 + JE LBB16_23 MOVBLZX 0(R15)(AX*1), DI LEAL -48(DI), DX ADDQ $1, AX CMPB DX, $10 - JB LBB15_12 + JB LBB16_12 CMPB DI, $46 - JNE LBB15_24 + JNE LBB16_24 MOVQ $8, 0(BX) CMPQ AX, R13 - JAE LBB15_52 + JAE LBB16_52 MOVB 0(R15)(AX*1), DX ADDB $-48, DX CMPB DX, $10 - JAE LBB15_5 + JAE LBB16_5 MOVL $0, -56(BP) - JMP LBB15_25 + JMP LBB16_25 -LBB15_21: +LBB16_21: XORL CX, CX XORL R8, R8 XORL R12, R12 - JMP LBB15_25 + JMP LBB16_25 -LBB15_22: +LBB16_22: MOVQ R8, 0(R14) - JMP LBB15_53 + JMP LBB16_53 -LBB15_23: +LBB16_23: MOVQ R13, AX - JMP LBB15_25 + JMP LBB16_25 -LBB15_24: +LBB16_24: ADDQ $-1, AX -LBB15_25: +LBB16_25: XORL DX, DX TESTL CX, CX SETGT DX MOVL DX, -68(BP) TESTQ R12, R12 - JNE LBB15_34 + JNE LBB16_34 TESTL CX, CX - JNE LBB15_34 + JNE LBB16_34 CMPQ AX, R13 - JAE LBB15_32 + JAE LBB16_32 MOVL AX, SI SUBL R13, SI XORL R8, R8 XORL CX, CX -LBB15_29: +LBB16_29: CMPB 0(R15)(AX*1), $48 - JNE LBB15_33 + JNE LBB16_33 ADDQ $1, AX ADDL $-1, CX CMPQ R13, AX - JNE LBB15_29 + JNE LBB16_29 XORL R12, R12 MOVL -56(BP), AX TESTB AX, AX - JNE LBB15_55 - JMP LBB15_60 + JNE LBB16_55 + JMP LBB16_60 -LBB15_32: +LBB16_32: XORL CX, CX XORL R8, R8 -LBB15_33: +LBB16_33: XORL R12, R12 -LBB15_34: +LBB16_34: CMPQ AX, R13 - JAE LBB15_40 + JAE LBB16_40 CMPL R8, $18 - JG LBB15_40 + JG LBB16_40 MOVL $4294967248, R9 -LBB15_37: +LBB16_37: MOVBLZX 0(R15)(AX*1), DI LEAL -48(DI), DX CMPB DX, $9 - JA LBB15_40 + JA LBB16_40 LEAQ 0(R12)(R12*4), DX ADDL R9, DI LEAQ 0(DI)(DX*2), R12 ADDL $-1, CX ADDQ $1, AX CMPQ AX, R13 - JAE LBB15_40 + JAE LBB16_40 LEAL 1(R8), DX CMPL R8, $18 MOVL DX, R8 - JL LBB15_37 + JL LBB16_37 -LBB15_40: +LBB16_40: CMPQ AX, R13 - JAE LBB15_54 + JAE LBB16_54 MOVB 0(R15)(AX*1), DX LEAL -48(DX), SI CMPB SI, $9 - JA LBB15_46 + JA LBB16_46 LEAQ -1(R13), SI -LBB15_43: +LBB16_43: CMPQ SI, AX - JE LBB15_59 + JE LBB16_59 MOVBLZX 1(R15)(AX*1), DX LEAL -48(DX), DI ADDQ $1, AX CMPB DI, $9 - JBE LBB15_43 + JBE LBB16_43 MOVL $1, -68(BP) -LBB15_46: +LBB16_46: ORB $32, DX CMPB DX, $101 - JNE LBB15_54 + JNE LBB16_54 LEAQ 1(AX), DI MOVQ $8, 0(BX) CMPQ DI, R13 - JAE LBB15_52 + JAE LBB16_52 MOVB 0(R15)(DI*1), SI CMPB SI, $45 - JE LBB15_50 + JE LBB16_50 MOVL $1, R8 CMPB SI, $43 - JNE LBB15_87 + JNE LBB16_87 -LBB15_50: +LBB16_50: ADDQ $2, AX CMPQ AX, R13 - JAE LBB15_52 + JAE LBB16_52 XORL DX, DX CMPB SI, $43 SETEQ DX LEAL 0(DX)(DX*1), R8 ADDL $-1, R8 MOVB 0(R15)(AX*1), SI - JMP LBB15_88 + JMP LBB16_88 -LBB15_52: +LBB16_52: MOVQ R13, 0(R14) MOVQ $-1, 0(BX) -LBB15_53: +LBB16_53: ADDQ $56, SP BYTE $0x5b // popq %rbx WORD $0x5c41 // popq %r12 @@ -4868,43 +5166,43 @@ LBB15_53: BYTE $0x5d // popq %rbp RET -LBB15_54: +LBB16_54: MOVL CX, SI MOVQ AX, R13 MOVL -56(BP), AX TESTB AX, AX - JE LBB15_60 + JE LBB16_60 -LBB15_55: +LBB16_55: TESTL SI, SI MOVL -44(BP), DX - JNE LBB15_58 + JNE LBB16_58 MOVQ $-9223372036854775808, AX MOVLQSX DX, CX TESTQ R12, R12 - JNS LBB15_69 + JNS LBB16_69 MOVQ R12, DI ANDQ CX, DI CMPQ DI, AX - JE LBB15_69 + JE LBB16_69 -LBB15_58: +LBB16_58: MOVQ $8, 0(BX) - JMP LBB15_61 + JMP LBB16_61 -LBB15_59: +LBB16_59: MOVL $1, -68(BP) MOVL CX, SI MOVL -56(BP), AX TESTB AX, AX - JNE LBB15_55 - JMP LBB15_60 + JNE LBB16_55 + JMP LBB16_60 -LBB15_69: +LBB16_69: LONG $0x6ef9c1c4; BYTE $0xc4 // vmovq %r12, %xmm0 IMULQ CX, R12 - QUAD $0xfffffcd10562f9c5 // vpunpckldq $-815(%rip), %xmm0, %xmm0 /* LCPI15_0(%rip) */ - QUAD $0xfffffcd9055cf9c5 // vsubpd $-807(%rip), %xmm0, %xmm0 /* LCPI15_1(%rip) */ + QUAD $0xfffffcd10562f9c5 // vpunpckldq $-815(%rip), %xmm0, %xmm0 /* LCPI16_0(%rip) */ + QUAD $0xfffffcd9055cf9c5 // vsubpd $-807(%rip), %xmm0, %xmm0 /* LCPI16_1(%rip) */ MOVQ R12, 16(BX) LONG $0x0579e3c4; WORD $0x01c8 // vpermilpd $1, %xmm0, %xmm1 LONG $0xc058f3c5 // vaddsd %xmm0, %xmm1, %xmm0 @@ -4912,21 +5210,21 @@ LBB15_69: LONG $0x7ef9e1c4; BYTE $0xc1 // vmovq %xmm0, %rcx ORQ AX, CX MOVQ CX, 8(BX) - JMP LBB15_86 + JMP LBB16_86 -LBB15_87: +LBB16_87: MOVQ DI, AX -LBB15_88: +LBB16_88: LEAL -48(SI), DI CMPB DI, $9 - JA LBB15_5 + JA LBB16_5 CMPQ AX, R13 - JAE LBB15_93 + JAE LBB16_93 LEAQ -1(R13), R9 XORL DI, DI -LBB15_91: +LBB16_91: MOVL DI, DX MOVBLZX SI, SI CMPL DI, $10000 @@ -4934,41 +5232,41 @@ LBB15_91: LEAL -48(SI)(DI*2), DI WORD $0x4d0f; BYTE $0xfa // cmovgel %edx, %edi CMPQ R9, AX - JE LBB15_94 + JE LBB16_94 MOVBLZX 1(R15)(AX*1), SI LEAL -48(SI), DX ADDQ $1, AX CMPB DX, $10 - JB LBB15_91 - JMP LBB15_95 + JB LBB16_91 + JMP LBB16_95 -LBB15_93: +LBB16_93: XORL DI, DI - JMP LBB15_95 + JMP LBB16_95 -LBB15_94: +LBB16_94: MOVQ R13, AX -LBB15_95: +LBB16_95: MOVQ DI, SI IMULL R8, SI ADDL CX, SI MOVQ AX, R13 -LBB15_60: +LBB16_60: MOVL -44(BP), DX -LBB15_61: +LBB16_61: MOVQ $0, -80(BP) LONG $0x6ef9c1c4; BYTE $0xc4 // vmovq %r12, %xmm0 - QUAD $0xfffffc320562f9c5 // vpunpckldq $-974(%rip), %xmm0, %xmm0 /* LCPI15_0(%rip) */ - QUAD $0xfffffc3a055cf9c5 // vsubpd $-966(%rip), %xmm0, %xmm0 /* LCPI15_1(%rip) */ + QUAD $0xfffffc320562f9c5 // vpunpckldq $-974(%rip), %xmm0, %xmm0 /* LCPI16_0(%rip) */ + QUAD $0xfffffc3a055cf9c5 // vsubpd $-966(%rip), %xmm0, %xmm0 /* LCPI16_1(%rip) */ LONG $0x0579e3c4; WORD $0x01c8 // vpermilpd $1, %xmm0, %xmm1 LONG $0xc058f3c5 // vaddsd %xmm0, %xmm1, %xmm0 LONG $0x4511fbc5; BYTE $0xc0 // vmovsd %xmm0, $-64(%rbp) MOVQ R12, AX SHRQ $52, AX - JNE LBB15_74 + JNE LBB16_74 LONG $0x7ef9e1c4; BYTE $0xc1 // vmovq %xmm0, %rcx MOVL DX, AX SHRL $31, AX @@ -4976,47 +5274,47 @@ LBB15_61: ORQ CX, AX MOVQ AX, -64(BP) TESTL SI, SI - JE LBB15_82 + JE LBB16_82 TESTQ R12, R12 - JE LBB15_82 + JE LBB16_82 LONG $0x6ef9e1c4; BYTE $0xc0 // vmovq %rax, %xmm0 LEAL -1(SI), AX CMPL AX, $36 - JA LBB15_67 + JA LBB16_67 CMPL SI, $23 - JL LBB15_70 + JL LBB16_70 LEAL -22(SI), AX - LONG $0x780d8d48; WORD $0x00bc; BYTE $0x00 // leaq $48248(%rip), %rcx /* _P10_TAB(%rip) */ + LONG $0xa00d8d48; WORD $0x00bc; BYTE $0x00 // leaq $48288(%rip), %rcx /* _P10_TAB(%rip) */ LONG $0x0459fbc5; BYTE $0xc1 // vmulsd (%rcx,%rax,8), %xmm0, %xmm0 LONG $0x4511fbc5; BYTE $0xc0 // vmovsd %xmm0, $-64(%rbp) MOVL $22, AX - JMP LBB15_71 + JMP LBB16_71 -LBB15_67: +LBB16_67: CMPL SI, $-22 - JB LBB15_74 + JB LBB16_74 NEGL SI - LONG $0x59058d48; WORD $0x00bc; BYTE $0x00 // leaq $48217(%rip), %rax /* _P10_TAB(%rip) */ + LONG $0x81058d48; WORD $0x00bc; BYTE $0x00 // leaq $48257(%rip), %rax /* _P10_TAB(%rip) */ LONG $0x045efbc5; BYTE $0xf0 // vdivsd (%rax,%rsi,8), %xmm0, %xmm0 LONG $0x4511fbc5; BYTE $0xc0 // vmovsd %xmm0, $-64(%rbp) - JMP LBB15_78 + JMP LBB16_78 -LBB15_70: +LBB16_70: MOVL SI, AX -LBB15_71: - QUAD $0xfffffbb7052ef9c5 // vucomisd $-1097(%rip), %xmm0 /* LCPI15_2(%rip) */ - JA LBB15_74 - QUAD $0xfffffbb50d10fbc5 // vmovsd $-1099(%rip), %xmm1 /* LCPI15_3(%rip) */ +LBB16_71: + QUAD $0xfffffbb7052ef9c5 // vucomisd $-1097(%rip), %xmm0 /* LCPI16_2(%rip) */ + JA LBB16_74 + QUAD $0xfffffbb50d10fbc5 // vmovsd $-1099(%rip), %xmm1 /* LCPI16_3(%rip) */ LONG $0xc82ef9c5 // vucomisd %xmm0, %xmm1 - JA LBB15_74 + JA LBB16_74 MOVL AX, AX - LONG $0x2a0d8d48; WORD $0x00bc; BYTE $0x00 // leaq $48170(%rip), %rcx /* _P10_TAB(%rip) */ + LONG $0x520d8d48; WORD $0x00bc; BYTE $0x00 // leaq $48210(%rip), %rcx /* _P10_TAB(%rip) */ LONG $0x0459fbc5; BYTE $0xc1 // vmulsd (%rcx,%rax,8), %xmm0, %xmm0 LONG $0x4511fbc5; BYTE $0xc0 // vmovsd %xmm0, $-64(%rbp) - JMP LBB15_78 + JMP LBB16_78 -LBB15_74: +LBB16_74: MOVQ R11, -96(BP) MOVQ R10, -56(BP) LEAQ -64(BP), CX @@ -5024,28 +5322,28 @@ LBB15_74: MOVQ SI, -88(BP) LONG $0xffea47e8; BYTE $0xff // callq _atof_eisel_lemire64 TESTB AX, AX - JE LBB15_80 + JE LBB16_80 MOVQ -88(BP), SI CMPL -68(BP), $0 - JE LBB15_81 + JE LBB16_81 ADDQ $1, R12 LEAQ -80(BP), CX MOVQ R12, DI MOVL -44(BP), DX LONG $0xffea26e8; BYTE $0xff // callq _atof_eisel_lemire64 TESTB AX, AX - JE LBB15_80 + JE LBB16_80 LONG $0x4d10fbc5; BYTE $0xb0 // vmovsd $-80(%rbp), %xmm1 LONG $0x4510fbc5; BYTE $0xc0 // vmovsd $-64(%rbp), %xmm0 LONG $0xc82ef9c5 // vucomisd %xmm0, %xmm1 - JNE LBB15_80 - JP LBB15_80 + JNE LBB16_80 + JP LBB16_80 -LBB15_78: +LBB16_78: LONG $0x7ef9e1c4; BYTE $0xc0 // vmovq %xmm0, %rax - JMP LBB15_82 + JMP LBB16_82 -LBB15_80: +LBB16_80: MOVQ 0(R14), AX ADDQ AX, R15 MOVQ R13, SI @@ -5056,29 +5354,29 @@ LBB15_80: LONG $0xffee23e8; BYTE $0xff // callq _atof_native LONG $0x4511fbc5; BYTE $0xc0 // vmovsd %xmm0, $-64(%rbp) LONG $0x7ef9e1c4; BYTE $0xc0 // vmovq %xmm0, %rax - JMP LBB15_83 + JMP LBB16_83 -LBB15_81: +LBB16_81: MOVQ -64(BP), AX -LBB15_82: +LBB16_82: LONG $0x6ef9e1c4; BYTE $0xc0 // vmovq %rax, %xmm0 -LBB15_83: +LBB16_83: MOVQ $-9223372036854775808, CX ADDQ $-1, CX ANDQ AX, CX MOVQ $9218868437227405312, AX CMPQ CX, AX - JNE LBB15_85 + JNE LBB16_85 MOVQ $-8, 0(BX) -LBB15_85: +LBB16_85: LONG $0x4311fbc5; BYTE $0x08 // vmovsd %xmm0, $8(%rbx) -LBB15_86: +LBB16_86: MOVQ R13, 0(R14) - JMP LBB15_53 + JMP LBB16_53 _vsigned: BYTE $0x55 // pushq %rbp @@ -5093,71 +5391,71 @@ _vsigned: MOVQ 0(SI), CX MOVQ CX, 24(DX) CMPQ AX, R11 - JAE LBB16_1 + JAE LBB17_1 MOVB 0(R8)(AX*1), CX MOVL $1, R9 CMPB CX, $45 - JNE LBB16_5 + JNE LBB17_5 ADDQ $1, AX CMPQ AX, R11 - JAE LBB16_1 + JAE LBB17_1 MOVB 0(R8)(AX*1), CX MOVQ $-1, R9 -LBB16_5: +LBB17_5: LEAL -48(CX), DI CMPB DI, $10 - JB LBB16_7 + JB LBB17_7 MOVQ AX, 0(SI) MOVQ $-2, 0(DX) BYTE $0x5b // popq %rbx BYTE $0x5d // popq %rbp RET -LBB16_1: +LBB17_1: MOVQ R11, 0(SI) MOVQ $-1, 0(DX) BYTE $0x5b // popq %rbx BYTE $0x5d // popq %rbp RET -LBB16_7: +LBB17_7: CMPB CX, $48 - JNE LBB16_12 + JNE LBB17_12 LEAQ 1(AX), DI CMPQ AX, R11 - JAE LBB16_11 + JAE LBB17_11 MOVB 0(R8)(DI*1), CX ADDB $-46, CX CMPB CX, $55 - JA LBB16_11 + JA LBB17_11 MOVBLZX CX, R10 MOVQ $36028797027352577, CX BTQ R10, CX - JAE LBB16_11 + JAE LBB17_11 -LBB16_12: +LBB17_12: CMPQ AX, R11 MOVQ R11, R10 LONG $0xd0470f4c // cmovaq %rax, %r10 XORL DI, DI -LBB16_13: +LBB17_13: CMPQ R10, AX - JE LBB16_23 + JE LBB17_23 MOVBQSX 0(R8)(AX*1), CX LEAL -48(CX), BX CMPB BX, $9 - JA LBB16_18 + JA LBB17_18 IMUL3Q $10, DI, DI - JO LBB16_17 + JO LBB17_17 ADDQ $1, AX ADDL $-48, CX IMULQ R9, CX ADDQ CX, DI - JNO LBB16_13 + JNO LBB17_13 -LBB16_17: +LBB17_17: ADDQ $-1, AX MOVQ AX, 0(SI) MOVQ $-5, 0(DX) @@ -5165,33 +5463,33 @@ LBB16_17: BYTE $0x5d // popq %rbp RET -LBB16_11: +LBB17_11: MOVQ DI, 0(SI) BYTE $0x5b // popq %rbx BYTE $0x5d // popq %rbp RET -LBB16_18: +LBB17_18: CMPQ AX, R11 - JAE LBB16_22 + JAE LBB17_22 CMPB CX, $46 - JE LBB16_25 + JE LBB17_25 CMPB CX, $69 - JE LBB16_25 + JE LBB17_25 CMPB CX, $101 - JNE LBB16_22 + JNE LBB17_22 -LBB16_25: +LBB17_25: MOVQ AX, 0(SI) MOVQ $-6, 0(DX) BYTE $0x5b // popq %rbx BYTE $0x5d // popq %rbp RET -LBB16_22: +LBB17_22: MOVQ AX, R10 -LBB16_23: +LBB17_23: MOVQ R10, 0(SI) MOVQ DI, 16(DX) BYTE $0x5b // popq %rbx @@ -5213,12 +5511,12 @@ _vunsigned: MOVQ 0(SI), AX MOVQ AX, 24(DX) CMPQ CX, R14 - JAE LBB17_1 + JAE LBB18_1 MOVB 0(R9)(CX*1), AX CMPB AX, $45 - JNE LBB17_4 + JNE LBB18_4 -LBB17_3: +LBB18_3: MOVQ CX, 0(SI) MOVQ $-6, 0(R8) BYTE $0x5b // popq %rbx @@ -5226,7 +5524,7 @@ LBB17_3: BYTE $0x5d // popq %rbp RET -LBB17_1: +LBB18_1: MOVQ R14, 0(SI) MOVQ $-1, 0(R8) BYTE $0x5b // popq %rbx @@ -5234,10 +5532,10 @@ LBB17_1: BYTE $0x5d // popq %rbp RET -LBB17_4: +LBB18_4: LEAL -48(AX), DX CMPB DX, $10 - JB LBB17_6 + JB LBB18_6 MOVQ CX, 0(SI) MOVQ $-2, 0(R8) BYTE $0x5b // popq %rbx @@ -5245,34 +5543,34 @@ LBB17_4: BYTE $0x5d // popq %rbp RET -LBB17_6: +LBB18_6: CMPB AX, $48 - JNE LBB17_10 + JNE LBB18_10 MOVB 1(R9)(CX*1), AX ADDB $-46, AX CMPB AX, $55 - JA LBB17_9 + JA LBB18_9 MOVBLZX AX, AX MOVQ $36028797027352577, DX BTQ AX, DX - JAE LBB17_9 + JAE LBB18_9 -LBB17_10: +LBB18_10: CMPQ R14, CX MOVQ CX, R10 LONG $0xd6470f4d // cmovaq %r14, %r10 XORL AX, AX MOVL $10, R11 -LBB17_11: +LBB18_11: CMPQ R10, CX - JE LBB17_22 + JE LBB18_22 MOVBLSX 0(R9)(CX*1), BX LEAL -48(BX), DX CMPB DX, $9 - JA LBB17_17 + JA LBB18_17 MULQ R11 - JO LBB17_16 + JO LBB18_16 ADDQ $1, CX ADDL $-48, BX XORL DI, DI @@ -5281,11 +5579,11 @@ LBB17_11: MOVQ DI, DX NEGQ DX XORQ DX, DI - JNE LBB17_16 + JNE LBB18_16 TESTQ DX, DX - JNS LBB17_11 + JNS LBB18_11 -LBB17_16: +LBB18_16: ADDQ $-1, CX MOVQ CX, 0(SI) MOVQ $-5, 0(R8) @@ -5294,20 +5592,20 @@ LBB17_16: BYTE $0x5d // popq %rbp RET -LBB17_17: +LBB18_17: CMPQ CX, R14 - JAE LBB17_21 + JAE LBB18_21 CMPB BX, $46 - JE LBB17_3 + JE LBB18_3 CMPB BX, $69 - JE LBB17_3 + JE LBB18_3 CMPB BX, $101 - JE LBB17_3 + JE LBB18_3 -LBB17_21: +LBB18_21: MOVQ CX, R10 -LBB17_22: +LBB18_22: MOVQ R10, 0(SI) MOVQ AX, 16(R8) BYTE $0x5b // popq %rbx @@ -5315,7 +5613,7 @@ LBB17_22: BYTE $0x5d // popq %rbp RET -LBB17_9: +LBB18_9: ADDQ $1, CX MOVQ CX, 0(SI) BYTE $0x5b // popq %rbx @@ -5346,31 +5644,31 @@ _fsm_exec: SUBQ $40, SP MOVL CX, -60(BP) CMPL 0(DI), $0 - JE LBB19_2 + JE LBB20_2 MOVQ DI, R12 MOVQ SI, -56(BP) MOVQ DX, -48(BP) MOVQ $-1, R14 - JMP LBB19_6 + JMP LBB20_6 -LBB19_2: +LBB20_2: MOVQ $-1, R13 - JMP LBB19_71 + JMP LBB20_71 -LBB19_3: +LBB20_3: LEAQ 3(AX), CX MOVQ -48(BP), DX MOVQ CX, 0(DX) TESTQ AX, AX - JLE LBB19_71 + JLE LBB20_71 -LBB19_4: +LBB20_4: MOVL 0(R12), CX MOVQ R14, R13 TESTL CX, CX - JE LBB19_71 + JE LBB20_71 -LBB19_6: +LBB20_6: MOVQ -56(BP), BX MOVQ 0(BX), DI MOVQ 8(BX), SI @@ -5380,49 +5678,49 @@ LBB19_6: MOVLQSX 0(R12), DX LEAQ -1(DX), CX CMPQ R14, $-1 - JNE LBB19_8 + JNE LBB20_8 MOVQ 0(R13), R14 ADDQ $-1, R14 -LBB19_8: +LBB20_8: MOVL 0(R12)(DX*4), SI ADDL $-1, SI CMPL SI, $5 - JA LBB19_13 - LONG $0x773d8d48; WORD $0x0004; BYTE $0x00 // leaq $1143(%rip), %rdi /* LJTI19_0(%rip) */ + JA LBB20_13 + LONG $0x773d8d48; WORD $0x0004; BYTE $0x00 // leaq $1143(%rip), %rdi /* LJTI20_0(%rip) */ MOVLQSX 0(DI)(SI*4), SI ADDQ DI, SI JMP SI -LBB19_10: +LBB20_10: MOVBLSX AX, AX CMPL AX, $44 - JE LBB19_28 + JE LBB20_28 CMPL AX, $93 - JE LBB19_12 - JMP LBB19_66 + JE LBB20_12 + JMP LBB20_66 -LBB19_13: +LBB20_13: MOVL CX, 0(R12) MOVBLSX AX, AX CMPL AX, $123 - JBE LBB19_24 - JMP LBB19_66 + JBE LBB20_24 + JMP LBB20_66 -LBB19_14: +LBB20_14: MOVBLSX AX, AX CMPL AX, $44 - JNE LBB19_15 + JNE LBB20_15 CMPL DX, $65535 - JG LBB19_70 + JG LBB20_70 LEAL 1(DX), AX MOVL AX, 0(R12) MOVL $3, 4(R12)(DX*4) - JMP LBB19_4 + JMP LBB20_4 -LBB19_16: +LBB20_16: CMPB AX, $34 - JNE LBB19_66 + JNE LBB20_66 MOVL $4, 0(R12)(DX*4) MOVQ 0(R13), R15 MOVQ BX, DI @@ -5432,36 +5730,36 @@ LBB19_16: MOVQ R13, BX MOVQ AX, R13 TESTQ AX, AX - JS LBB19_63 + JS LBB20_63 -LBB19_18: +LBB20_18: MOVQ R13, 0(BX) TESTQ R15, R15 - JG LBB19_4 - JMP LBB19_19 + JG LBB20_4 + JMP LBB20_19 -LBB19_20: +LBB20_20: CMPB AX, $58 - JNE LBB19_66 + JNE LBB20_66 MOVL $0, 0(R12)(DX*4) - JMP LBB19_4 + JMP LBB20_4 -LBB19_22: +LBB20_22: CMPB AX, $93 - JE LBB19_12 + JE LBB20_12 MOVL $1, 0(R12)(DX*4) MOVBLSX AX, AX CMPL AX, $123 - JA LBB19_66 + JA LBB20_66 -LBB19_24: +LBB20_24: MOVQ $-1, R13 - LONG $0xa80d8d48; WORD $0x0003; BYTE $0x00 // leaq $936(%rip), %rcx /* LJTI19_1(%rip) */ + LONG $0xa80d8d48; WORD $0x0003; BYTE $0x00 // leaq $936(%rip), %rcx /* LJTI20_1(%rip) */ MOVLQSX 0(CX)(AX*4), AX ADDQ CX, AX JMP AX -LBB19_27: +LBB20_27: MOVQ -48(BP), BX MOVQ 0(BX), R15 LEAQ -1(R15), R13 @@ -5481,45 +5779,45 @@ LBB19_27: ADDQ R15, DX MOVQ DX, 0(BX) TESTQ R13, R13 - JNS LBB19_4 - JMP LBB19_71 + JNS LBB20_4 + JMP LBB20_71 -LBB19_25: +LBB20_25: MOVBLSX AX, AX CMPL AX, $34 - JE LBB19_32 + JE LBB20_32 -LBB19_15: +LBB20_15: CMPL AX, $125 - JNE LBB19_66 + JNE LBB20_66 -LBB19_12: +LBB20_12: MOVL CX, 0(R12) MOVQ R14, R13 TESTL CX, CX - JNE LBB19_6 - JMP LBB19_71 + JNE LBB20_6 + JMP LBB20_71 -LBB19_28: +LBB20_28: CMPL DX, $65535 - JG LBB19_70 + JG LBB20_70 LEAL 1(DX), AX MOVL AX, 0(R12) MOVL $0, 4(R12)(DX*4) - JMP LBB19_4 + JMP LBB20_4 -LBB19_32: +LBB20_32: MOVL $2, 0(R12)(DX*4) CMPL -60(BP), $0 - JE LBB19_35 + JE LBB20_35 MOVQ BX, DI MOVQ R13, SI LONG $0x000597e8; BYTE $0x00 // callq _validate_string TESTQ AX, AX - JNS LBB19_37 - JMP LBB19_34 + JNS LBB20_37 + JMP LBB20_34 -LBB19_35: +LBB20_35: MOVQ 0(R13), R15 MOVQ BX, DI MOVQ R15, SI @@ -5528,31 +5826,31 @@ LBB19_35: MOVQ R13, BX MOVQ AX, R13 TESTQ AX, AX - JS LBB19_63 + JS LBB20_63 MOVQ R13, 0(BX) TESTQ R15, R15 - JLE LBB19_19 + JLE LBB20_19 -LBB19_37: +LBB20_37: MOVLQSX 0(R12), AX CMPQ AX, $65535 - JG LBB19_70 + JG LBB20_70 LEAL 1(AX), CX MOVL CX, 0(R12) MOVL $4, 4(R12)(AX*4) - JMP LBB19_4 + JMP LBB20_4 -LBB19_39: +LBB20_39: CMPL -60(BP), $0 - JE LBB19_62 + JE LBB20_62 MOVQ -56(BP), DI MOVQ -48(BP), SI LONG $0x000523e8; BYTE $0x00 // callq _validate_string TESTQ AX, AX - JNS LBB19_4 - JMP LBB19_34 + JNS LBB20_4 + JMP LBB20_34 -LBB19_41: +LBB20_41: MOVQ -48(BP), BX MOVQ 0(BX), R13 MOVQ -56(BP), AX @@ -5562,79 +5860,79 @@ LBB19_41: SUBQ R13, SI LONG $0x000c07e8; BYTE $0x00 // callq _do_skip_number TESTQ AX, AX - JS LBB19_65 + JS LBB20_65 ADDQ R13, AX MOVQ AX, 0(BX) TESTQ R13, R13 - JG LBB19_4 - JMP LBB19_43 + JG LBB20_4 + JMP LBB20_43 -LBB19_44: +LBB20_44: MOVLQSX 0(R12), AX CMPQ AX, $65535 - JG LBB19_70 + JG LBB20_70 LEAL 1(AX), CX MOVL CX, 0(R12) MOVL $5, 4(R12)(AX*4) - JMP LBB19_4 + JMP LBB20_4 -LBB19_46: +LBB20_46: MOVQ -48(BP), AX MOVQ 0(AX), AX MOVQ -56(BP), SI MOVQ 8(SI), CX LEAQ -4(CX), DX CMPQ AX, DX - JA LBB19_64 + JA LBB20_64 MOVQ 0(SI), CX MOVL 0(CX)(AX*1), DX CMPL DX, $1702063201 - JNE LBB19_67 + JNE LBB20_67 LEAQ 4(AX), CX MOVQ -48(BP), DX MOVQ CX, 0(DX) TESTQ AX, AX - JG LBB19_4 - JMP LBB19_49 + JG LBB20_4 + JMP LBB20_49 -LBB19_50: +LBB20_50: MOVQ -48(BP), AX MOVQ 0(AX), AX MOVQ -56(BP), SI MOVQ 8(SI), CX LEAQ -3(CX), DX CMPQ AX, DX - JA LBB19_64 + JA LBB20_64 MOVQ 0(SI), CX LEAQ -1(AX), R13 CMPL -1(CX)(AX*1), $1819047278 - JE LBB19_3 - JMP LBB19_52 + JE LBB20_3 + JMP LBB20_52 -LBB19_55: +LBB20_55: MOVQ -48(BP), AX MOVQ 0(AX), AX MOVQ -56(BP), SI MOVQ 8(SI), CX LEAQ -3(CX), DX CMPQ AX, DX - JA LBB19_64 + JA LBB20_64 MOVQ 0(SI), CX LEAQ -1(AX), R13 CMPL -1(CX)(AX*1), $1702195828 - JE LBB19_3 - JMP LBB19_57 + JE LBB20_3 + JMP LBB20_57 -LBB19_60: +LBB20_60: MOVLQSX 0(R12), AX CMPQ AX, $65535 - JG LBB19_70 + JG LBB20_70 LEAL 1(AX), CX MOVL CX, 0(R12) MOVL $6, 4(R12)(AX*4) - JMP LBB19_4 + JMP LBB20_4 -LBB19_62: +LBB20_62: MOVQ -48(BP), BX MOVQ 0(BX), R15 MOVQ -56(BP), DI @@ -5643,102 +5941,102 @@ LBB19_62: LONG $0xfff10ee8; BYTE $0xff // callq _advance_string MOVQ AX, R13 TESTQ AX, AX - JNS LBB19_18 + JNS LBB20_18 -LBB19_63: +LBB20_63: MOVQ -56(BP), AX MOVQ 8(AX), AX MOVQ AX, 0(BX) - JMP LBB19_71 + JMP LBB20_71 -LBB19_70: +LBB20_70: MOVQ $-7, R13 - JMP LBB19_71 + JMP LBB20_71 -LBB19_19: +LBB20_19: ADDQ $-1, R15 MOVQ R15, R13 - JMP LBB19_71 + JMP LBB20_71 -LBB19_34: +LBB20_34: MOVQ AX, R13 - JMP LBB19_71 + JMP LBB20_71 -LBB19_64: +LBB20_64: MOVQ -48(BP), AX MOVQ CX, 0(AX) - JMP LBB19_71 + JMP LBB20_71 -LBB19_65: +LBB20_65: NOTQ AX ADDQ AX, R13 MOVQ R13, 0(BX) - JMP LBB19_66 + JMP LBB20_66 -LBB19_43: +LBB20_43: ADDQ $-1, R13 - JMP LBB19_71 + JMP LBB20_71 -LBB19_67: +LBB20_67: MOVQ $-2, R13 CMPB DX, $97 - JNE LBB19_71 + JNE LBB20_71 ADDQ $1, AX MOVL $1702063201, DX MOVQ -48(BP), BX -LBB19_69: +LBB20_69: SHRL $8, DX MOVQ AX, 0(BX) MOVBLSX 0(CX)(AX*1), SI MOVBLZX DX, DI ADDQ $1, AX CMPL DI, SI - JE LBB19_69 - JMP LBB19_71 + JE LBB20_69 + JMP LBB20_71 -LBB19_52: +LBB20_52: MOVQ -48(BP), BX MOVQ R13, 0(BX) CMPB 0(CX)(R13*1), $110 - JNE LBB19_66 + JNE LBB20_66 MOVL $1819047278, DX -LBB19_54: +LBB20_54: SHRL $8, DX MOVQ AX, 0(BX) MOVBLSX 0(CX)(AX*1), SI MOVBLZX DX, DI ADDQ $1, AX CMPL DI, SI - JE LBB19_54 - JMP LBB19_66 + JE LBB20_54 + JMP LBB20_66 -LBB19_49: +LBB20_49: ADDQ $-1, AX MOVQ AX, R13 - JMP LBB19_71 + JMP LBB20_71 -LBB19_57: +LBB20_57: MOVQ -48(BP), BX MOVQ R13, 0(BX) CMPB 0(CX)(R13*1), $116 - JNE LBB19_66 + JNE LBB20_66 MOVL $1702195828, DX -LBB19_59: +LBB20_59: SHRL $8, DX MOVQ AX, 0(BX) MOVBLSX 0(CX)(AX*1), SI MOVBLZX DX, DI ADDQ $1, AX CMPL DI, SI - JE LBB19_59 + JE LBB20_59 -LBB19_66: +LBB20_66: MOVQ $-2, R13 -LBB19_71: +LBB20_71: MOVQ R13, AX ADDQ $40, SP BYTE $0x5b // popq %rbx @@ -5749,155 +6047,155 @@ LBB19_71: BYTE $0x5d // popq %rbp RET -// .set L19_0_set_10, LBB19_10-LJTI19_0 -// .set L19_0_set_14, LBB19_14-LJTI19_0 -// .set L19_0_set_16, LBB19_16-LJTI19_0 -// .set L19_0_set_20, LBB19_20-LJTI19_0 -// .set L19_0_set_22, LBB19_22-LJTI19_0 -// .set L19_0_set_25, LBB19_25-LJTI19_0 -LJTI19_0: - LONG $0xfffffb92 // .long L19_0_set_10 - LONG $0xfffffbc1 // .long L19_0_set_14 - LONG $0xfffffbee // .long L19_0_set_16 - LONG $0xfffffc31 // .long L19_0_set_20 - LONG $0xfffffc46 // .long L19_0_set_22 - LONG $0xfffffcce // .long L19_0_set_25 - - // .set L19_1_set_71, LBB19_71-LJTI19_1 - // .set L19_1_set_66, LBB19_66-LJTI19_1 - // .set L19_1_set_39, LBB19_39-LJTI19_1 - // .set L19_1_set_41, LBB19_41-LJTI19_1 - // .set L19_1_set_27, LBB19_27-LJTI19_1 - // .set L19_1_set_44, LBB19_44-LJTI19_1 - // .set L19_1_set_46, LBB19_46-LJTI19_1 - // .set L19_1_set_50, LBB19_50-LJTI19_1 - // .set L19_1_set_55, LBB19_55-LJTI19_1 - // .set L19_1_set_60, LBB19_60-LJTI19_1 -LJTI19_1: - LONG $0xffffffd6 // .long L19_1_set_71 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xfffffd72 // .long L19_1_set_39 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xfffffd97 // .long L19_1_set_41 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xfffffc61 // .long L19_1_set_27 - LONG $0xfffffc61 // .long L19_1_set_27 - LONG $0xfffffc61 // .long L19_1_set_27 - LONG $0xfffffc61 // .long L19_1_set_27 - LONG $0xfffffc61 // .long L19_1_set_27 - LONG $0xfffffc61 // .long L19_1_set_27 - LONG $0xfffffc61 // .long L19_1_set_27 - LONG $0xfffffc61 // .long L19_1_set_27 - LONG $0xfffffc61 // .long L19_1_set_27 - LONG $0xfffffc61 // .long L19_1_set_27 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xfffffdd1 // .long L19_1_set_44 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xfffffdf6 // .long L19_1_set_46 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xfffffe3d // .long L19_1_set_50 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xfffffe73 // .long L19_1_set_55 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xffffffcf // .long L19_1_set_66 - LONG $0xfffffea9 // .long L19_1_set_60 +// .set L20_0_set_10, LBB20_10-LJTI20_0 +// .set L20_0_set_14, LBB20_14-LJTI20_0 +// .set L20_0_set_16, LBB20_16-LJTI20_0 +// .set L20_0_set_20, LBB20_20-LJTI20_0 +// .set L20_0_set_22, LBB20_22-LJTI20_0 +// .set L20_0_set_25, LBB20_25-LJTI20_0 +LJTI20_0: + LONG $0xfffffb92 // .long L20_0_set_10 + LONG $0xfffffbc1 // .long L20_0_set_14 + LONG $0xfffffbee // .long L20_0_set_16 + LONG $0xfffffc31 // .long L20_0_set_20 + LONG $0xfffffc46 // .long L20_0_set_22 + LONG $0xfffffcce // .long L20_0_set_25 + + // .set L20_1_set_71, LBB20_71-LJTI20_1 + // .set L20_1_set_66, LBB20_66-LJTI20_1 + // .set L20_1_set_39, LBB20_39-LJTI20_1 + // .set L20_1_set_41, LBB20_41-LJTI20_1 + // .set L20_1_set_27, LBB20_27-LJTI20_1 + // .set L20_1_set_44, LBB20_44-LJTI20_1 + // .set L20_1_set_46, LBB20_46-LJTI20_1 + // .set L20_1_set_50, LBB20_50-LJTI20_1 + // .set L20_1_set_55, LBB20_55-LJTI20_1 + // .set L20_1_set_60, LBB20_60-LJTI20_1 +LJTI20_1: + LONG $0xffffffd6 // .long L20_1_set_71 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xfffffd72 // .long L20_1_set_39 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xfffffd97 // .long L20_1_set_41 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xfffffc61 // .long L20_1_set_27 + LONG $0xfffffc61 // .long L20_1_set_27 + LONG $0xfffffc61 // .long L20_1_set_27 + LONG $0xfffffc61 // .long L20_1_set_27 + LONG $0xfffffc61 // .long L20_1_set_27 + LONG $0xfffffc61 // .long L20_1_set_27 + LONG $0xfffffc61 // .long L20_1_set_27 + LONG $0xfffffc61 // .long L20_1_set_27 + LONG $0xfffffc61 // .long L20_1_set_27 + LONG $0xfffffc61 // .long L20_1_set_27 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xfffffdd1 // .long L20_1_set_44 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xfffffdf6 // .long L20_1_set_46 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xfffffe3d // .long L20_1_set_50 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xfffffe73 // .long L20_1_set_55 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xffffffcf // .long L20_1_set_66 + LONG $0xfffffea9 // .long L20_1_set_60 _skip_array: BYTE $0x55 // pushq %rbp @@ -5939,16 +6237,16 @@ _skip_string: MOVQ BX, SI LONG $0xffed96e8; BYTE $0xff // callq _advance_string TESTQ AX, AX - JS LBB22_2 + JS LBB23_2 ADDQ $-1, BX MOVQ AX, CX MOVQ BX, AX - JMP LBB22_3 + JMP LBB23_3 -LBB22_2: +LBB23_2: MOVQ 8(R15), CX -LBB22_3: +LBB23_3: MOVQ CX, 0(R14) ADDQ $8, SP BYTE $0x5b // popq %rbx @@ -5957,13 +6255,13 @@ LBB22_3: BYTE $0x5d // popq %rbp RET -LCPI23_0: +LCPI24_0: QUAD $0x2222222222222222; QUAD $0x2222222222222222 // .space 16, '""""""""""""""""' -LCPI23_1: +LCPI24_1: QUAD $0x5c5c5c5c5c5c5c5c; QUAD $0x5c5c5c5c5c5c5c5c // .space 16, '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -LCPI23_2: +LCPI24_2: QUAD $0x2020202020202020; QUAD $0x2020202020202020 // .space 16, ' ' _validate_string: @@ -5980,22 +6278,22 @@ _validate_string: MOVQ 8(DI), CX MOVQ CX, -72(BP) SUBQ DX, CX - JE LBB23_17 + JE LBB24_17 MOVQ 0(DI), AX MOVQ AX, -56(BP) ADDQ DX, AX CMPQ CX, $64 MOVQ DX, -64(BP) MOVQ AX, -80(BP) - JB LBB23_26 + JB LBB24_26 MOVQ $-1, -48(BP) XORL R13, R13 - QUAD $0xffffff78056f7ac5 // vmovdqu $-136(%rip), %xmm8 /* LCPI23_0(%rip) */ - QUAD $0xffffff800d6ffac5 // vmovdqu $-128(%rip), %xmm1 /* LCPI23_1(%rip) */ - QUAD $0xffffff88156ffac5 // vmovdqu $-120(%rip), %xmm2 /* LCPI23_2(%rip) */ + QUAD $0xffffff78056f7ac5 // vmovdqu $-136(%rip), %xmm8 /* LCPI24_0(%rip) */ + QUAD $0xffffff800d6ffac5 // vmovdqu $-128(%rip), %xmm1 /* LCPI24_1(%rip) */ + QUAD $0xffffff88156ffac5 // vmovdqu $-120(%rip), %xmm2 /* LCPI24_2(%rip) */ LONG $0xdb76e1c5 // vpcmpeqd %xmm3, %xmm3, %xmm3 -LBB23_3: +LBB24_3: MOVQ -56(BP), AX LONG $0x246ffac5; BYTE $0x10 // vmovdqu (%rax,%rdx), %xmm4 LONG $0x6c6ffac5; WORD $0x1010 // vmovdqu $16(%rax,%rdx), %xmm5 @@ -6046,35 +6344,35 @@ LBB23_3: SHLQ $16, BX ORQ R10, BX ORQ R9, SI - JNE LBB23_9 + JNE LBB24_9 TESTQ R13, R13 - JNE LBB23_11 + JNE LBB24_11 XORL R13, R13 -LBB23_6: +LBB24_6: LONG $0xc464e9c5 // vpcmpgtb %xmm4, %xmm2, %xmm0 LONG $0xe364d9c5 // vpcmpgtb %xmm3, %xmm4, %xmm4 LONG $0xc4dbf9c5 // vpand %xmm4, %xmm0, %xmm0 LONG $0xf0d7f9c5 // vpmovmskb %xmm0, %esi ORQ SI, BX TESTQ AX, AX - JNE LBB23_12 + JNE LBB24_12 TESTQ BX, BX - JNE LBB23_19 + JNE LBB24_19 ADDQ $-64, CX ADDQ $64, DX CMPQ CX, $63 - JA LBB23_3 - JMP LBB23_21 + JA LBB24_3 + JMP LBB24_21 -LBB23_9: +LBB24_9: CMPQ -48(BP), $-1 - JNE LBB23_11 + JNE LBB24_11 BSFQ SI, DI ADDQ DX, DI MOVQ DI, -48(BP) -LBB23_11: +LBB24_11: MOVQ R13, R9 NOTQ R9 ANDQ SI, R9 @@ -6094,24 +6392,24 @@ LBB23_11: ANDQ R8, R10 NOTQ R10 ANDQ R10, AX - JMP LBB23_6 + JMP LBB24_6 -LBB23_12: +LBB24_12: BSFQ AX, CX LEAQ 0(CX)(DX*1), R12 ADDQ $1, R12 TESTQ BX, BX - JE LBB23_14 + JE LBB24_14 -LBB23_13: +LBB24_13: BSFQ BX, AX CMPQ AX, CX - JBE LBB23_27 + JBE LBB24_27 -LBB23_14: +LBB24_14: MOVQ -64(BP), BX TESTQ R12, R12 - JS LBB23_16 + JS LBB24_16 LEAQ -1(BX), R14 MOVQ BX, SI NOTQ SI @@ -6124,18 +6422,18 @@ LBB23_14: MOVQ $-2, R12 LONG $0xe6480f4d // cmovsq %r14, %r12 MOVQ BX, CX - JMP LBB23_18 + JMP LBB24_18 -LBB23_16: +LBB24_16: CMPQ R12, $-1 MOVQ -48(BP), CX - JNE LBB23_18 + JNE LBB24_18 -LBB23_17: +LBB24_17: MOVQ $-1, R12 MOVQ -72(BP), CX -LBB23_18: +LBB24_18: MOVQ -88(BP), AX MOVQ CX, 0(AX) MOVQ R12, AX @@ -6148,36 +6446,36 @@ LBB23_18: BYTE $0x5d // popq %rbp RET -LBB23_19: +LBB24_19: MOVQ $-2, R12 MOVQ -48(BP), CX CMPQ CX, $-1 - JNE LBB23_18 + JNE LBB24_18 -LBB23_20: +LBB24_20: BSFQ BX, CX ADDQ DX, CX - JMP LBB23_18 + JMP LBB24_18 -LBB23_21: +LBB24_21: ADDQ -56(BP), DX CMPQ CX, $32 - JB LBB23_33 + JB LBB24_33 -LBB23_22: +LBB24_22: LONG $0x026ffac5 // vmovdqu (%rdx), %xmm0 LONG $0x4a6ffac5; BYTE $0x10 // vmovdqu $16(%rdx), %xmm1 - QUAD $0xfffffd39156ffac5 // vmovdqu $-711(%rip), %xmm2 /* LCPI23_0(%rip) */ + QUAD $0xfffffd39156ffac5 // vmovdqu $-711(%rip), %xmm2 /* LCPI24_0(%rip) */ LONG $0xda74f9c5 // vpcmpeqb %xmm2, %xmm0, %xmm3 LONG $0xdbd779c5 // vpmovmskb %xmm3, %r11d LONG $0xd274f1c5 // vpcmpeqb %xmm2, %xmm1, %xmm2 LONG $0xc2d7f9c5 // vpmovmskb %xmm2, %eax - QUAD $0xfffffd31156ffac5 // vmovdqu $-719(%rip), %xmm2 /* LCPI23_1(%rip) */ + QUAD $0xfffffd31156ffac5 // vmovdqu $-719(%rip), %xmm2 /* LCPI24_1(%rip) */ LONG $0xda74f9c5 // vpcmpeqb %xmm2, %xmm0, %xmm3 LONG $0xc3d779c5 // vpmovmskb %xmm3, %r8d LONG $0xd274f1c5 // vpcmpeqb %xmm2, %xmm1, %xmm2 LONG $0xf2d7f9c5 // vpmovmskb %xmm2, %esi - QUAD $0xfffffd29156ffac5 // vmovdqu $-727(%rip), %xmm2 /* LCPI23_2(%rip) */ + QUAD $0xfffffd29156ffac5 // vmovdqu $-727(%rip), %xmm2 /* LCPI24_2(%rip) */ LONG $0xd864e9c5 // vpcmpgtb %xmm0, %xmm2, %xmm3 LONG $0xe476d9c5 // vpcmpeqd %xmm4, %xmm4, %xmm4 LONG $0xc464f9c5 // vpcmpgtb %xmm4, %xmm0, %xmm0 @@ -6192,49 +6490,49 @@ LBB23_22: SHLQ $16, SI SHLQ $16, BX ORQ SI, R8 - JNE LBB23_28 + JNE LBB24_28 TESTQ R13, R13 - JNE LBB23_30 + JNE LBB24_30 XORL R13, R13 ORQ R9, BX TESTQ R11, R11 - JE LBB23_31 + JE LBB24_31 -LBB23_25: +LBB24_25: SUBQ -56(BP), DX BSFQ R11, CX LEAQ 0(DX)(CX*1), R12 ADDQ $1, R12 TESTQ BX, BX - JNE LBB23_13 - JMP LBB23_14 + JNE LBB24_13 + JMP LBB24_14 -LBB23_26: +LBB24_26: MOVQ $-1, -48(BP) XORL R13, R13 MOVQ AX, DX CMPQ CX, $32 - JAE LBB23_22 - JMP LBB23_33 + JAE LBB24_22 + JMP LBB24_33 -LBB23_27: +LBB24_27: ADDQ DX, AX MOVQ -48(BP), CX CMPQ CX, $-1 LONG $0xc8440f48 // cmoveq %rax, %rcx MOVQ $-2, R12 - JMP LBB23_18 + JMP LBB24_18 -LBB23_28: +LBB24_28: CMPQ -48(BP), $-1 - JNE LBB23_30 + JNE LBB24_30 MOVQ DX, R10 SUBQ -56(BP), R10 BSFQ R8, DI ADDQ R10, DI MOVQ DI, -48(BP) -LBB23_30: +LBB24_30: MOVL R13, SI NOTL SI ANDL R8, SI @@ -6253,48 +6551,48 @@ LBB23_30: ANDL AX, R11 ORQ R9, BX TESTQ R11, R11 - JNE LBB23_25 + JNE LBB24_25 -LBB23_31: +LBB24_31: TESTQ BX, BX - JNE LBB23_47 + JNE LBB24_47 ADDQ $32, DX ADDQ $-32, CX -LBB23_33: +LBB24_33: MOVQ -56(BP), SI NOTQ SI TESTQ R13, R13 - JNE LBB23_49 + JNE LBB24_49 TESTQ CX, CX - JE LBB23_44 + JE LBB24_44 -LBB23_35: +LBB24_35: LEAQ 1(SI), R9 MOVQ -48(BP), AX -LBB23_36: +LBB24_36: MOVQ AX, -48(BP) XORL BX, BX -LBB23_37: +LBB24_37: MOVBLZX 0(DX)(BX*1), AX CMPB AX, $34 - JE LBB23_43 + JE LBB24_43 CMPB AX, $92 - JE LBB23_41 + JE LBB24_41 CMPB AX, $31 - JBE LBB23_51 + JBE LBB24_51 ADDQ $1, BX CMPQ CX, BX - JNE LBB23_37 - JMP LBB23_45 + JNE LBB24_37 + JMP LBB24_45 -LBB23_41: +LBB24_41: LEAQ -1(CX), AX CMPQ AX, BX MOVQ -48(BP), AX - JE LBB23_17 + JE LBB24_17 LEAQ 0(R9)(DX*1), R8 ADDQ BX, R8 CMPQ AX, $-1 @@ -6307,35 +6605,35 @@ LBB23_41: ADDQ $-2, CX CMPQ CX, BX MOVQ R8, CX - JNE LBB23_36 - JMP LBB23_17 + JNE LBB24_36 + JMP LBB24_17 -LBB23_43: +LBB24_43: ADDQ BX, DX ADDQ $1, DX -LBB23_44: +LBB24_44: SUBQ -56(BP), DX MOVQ DX, R12 - JMP LBB23_14 + JMP LBB24_14 -LBB23_45: +LBB24_45: CMPB AX, $34 - JNE LBB23_17 + JNE LBB24_17 ADDQ CX, DX - JMP LBB23_44 + JMP LBB24_44 -LBB23_47: +LBB24_47: MOVQ $-2, R12 CMPQ -48(BP), $-1 - JNE LBB23_48 + JNE LBB24_48 SUBQ -56(BP), DX - JMP LBB23_20 + JMP LBB24_20 -LBB23_49: +LBB24_49: TESTQ CX, CX MOVQ -48(BP), DI - JE LBB23_17 + JE LBB24_17 LEAQ 0(DX)(SI*1), AX CMPQ DI, $-1 LONG $0xf8440f48 // cmoveq %rax, %rdi @@ -6343,23 +6641,23 @@ LBB23_49: ADDQ $1, DX ADDQ $-1, CX TESTQ CX, CX - JNE LBB23_35 - JMP LBB23_44 + JNE LBB24_35 + JMP LBB24_44 -LBB23_51: +LBB24_51: MOVQ $-2, R12 CMPQ -48(BP), $-1 - JE LBB23_54 + JE LBB24_54 -LBB23_48: +LBB24_48: MOVQ -48(BP), CX - JMP LBB23_18 + JMP LBB24_18 -LBB23_54: +LBB24_54: ADDQ DX, SI LEAQ 0(BX)(SI*1), CX ADDQ $1, CX - JMP LBB23_18 + JMP LBB24_18 _utf8_validate: BYTE $0x55 // pushq %rbp @@ -6369,143 +6667,143 @@ _utf8_validate: BYTE $0x53 // pushq %rbx MOVQ $-1, AX TESTQ SI, SI - JLE LBB24_28 - LONG $0xdf058d4c; WORD $0x00ac; BYTE $0x00 // leaq $44255(%rip), %r8 /* _first(%rip) */ - LONG $0xd80d8d4c; WORD $0x00ad; BYTE $0x00 // leaq $44504(%rip), %r9 /* _ranges(%rip) */ - LONG $0x2f158d4c; WORD $0x0001; BYTE $0x00 // leaq $303(%rip), %r10 /* LJTI24_0(%rip) */ + JLE LBB25_28 + LONG $0x07058d4c; WORD $0x00ad; BYTE $0x00 // leaq $44295(%rip), %r8 /* _first(%rip) */ + LONG $0x000d8d4c; WORD $0x00ae; BYTE $0x00 // leaq $44544(%rip), %r9 /* _ranges(%rip) */ + LONG $0x2f158d4c; WORD $0x0001; BYTE $0x00 // leaq $303(%rip), %r10 /* LJTI25_0(%rip) */ MOVQ DI, R11 -LBB24_2: +LBB25_2: CMPB 0(R11), $0 - JS LBB24_3 + JS LBB25_3 MOVQ SI, DX MOVQ R11, CX CMPQ SI, $16 - JL LBB24_15 + JL LBB25_15 XORL BX, BX XORL DX, DX -LBB24_6: +LBB25_6: LONG $0x6f7ac1c4; WORD $0x1b04 // vmovdqu (%r11,%rbx), %xmm0 LONG $0xc8d7f9c5 // vpmovmskb %xmm0, %ecx TESTL CX, CX - JNE LBB24_7 + JNE LBB25_7 ADDQ $16, BX LEAQ 0(SI)(DX*1), CX ADDQ $-16, CX ADDQ $-16, DX ADDQ $16, CX CMPQ CX, $31 - JG LBB24_6 + JG LBB25_6 MOVQ R11, CX SUBQ DX, CX MOVQ SI, DX SUBQ BX, DX -LBB24_15: +LBB25_15: TESTQ DX, DX - JLE LBB24_28 + JLE LBB25_28 ADDQ $1, DX MOVQ CX, BX SUBQ R11, BX -LBB24_17: +LBB25_17: CMPB 0(CX), $0 - JS LBB24_8 + JS LBB25_8 ADDQ $1, CX ADDQ $-1, DX ADDQ $1, BX CMPQ DX, $1 - JG LBB24_17 - JMP LBB24_28 + JG LBB25_17 + JMP LBB25_28 -LBB24_3: +LBB25_3: XORL BX, BX -LBB24_8: +LBB25_8: CMPQ BX, $-1 - JE LBB24_28 + JE LBB25_28 -LBB24_9: +LBB25_9: SUBQ BX, SI - JLE LBB24_28 + JLE LBB25_28 LEAQ 0(R11)(BX*1), R14 MOVBLZX 0(R11)(BX*1), R11 MOVBLZX 0(R11)(R8*1), BX MOVL BX, DX ANDL $7, DX CMPQ SI, DX - JB LBB24_26 + JB LBB25_26 CMPB DX, $4 - JA LBB24_26 + JA LBB25_26 MOVL $1, R15 MOVBLZX DX, CX MOVLQSX 0(R10)(CX*4), CX ADDQ R10, CX JMP CX -LBB24_19: +LBB25_19: MOVB 3(R14), CX TESTB CX, CX - JNS LBB24_26 + JNS LBB25_26 CMPB CX, $-65 - JA LBB24_26 + JA LBB25_26 -LBB24_21: +LBB25_21: MOVB 2(R14), CX TESTB CX, CX - JNS LBB24_26 + JNS LBB25_26 CMPB CX, $-65 - JA LBB24_26 + JA LBB25_26 -LBB24_23: +LBB25_23: SHRQ $4, BX MOVB 1(R14), CX CMPB CX, 0(R9)(BX*2) - JB LBB24_26 + JB LBB25_26 CMPB 1(R9)(BX*2), CX - JB LBB24_26 + JB LBB25_26 MOVQ DX, R15 TESTB R11, R11 - JNS LBB24_26 + JNS LBB25_26 -LBB24_27: +LBB25_27: ADDQ R15, R14 MOVQ R14, R11 SUBQ R15, SI - JG LBB24_2 - JMP LBB24_28 + JG LBB25_2 + JMP LBB25_28 -LBB24_7: +LBB25_7: BSFW CX, CX MOVWLZX CX, BX SUBQ DX, BX CMPQ BX, $-1 - JNE LBB24_9 - JMP LBB24_28 + JNE LBB25_9 + JMP LBB25_28 -LBB24_26: +LBB25_26: SUBQ DI, R14 MOVQ R14, AX -LBB24_28: +LBB25_28: BYTE $0x5b // popq %rbx WORD $0x5e41 // popq %r14 WORD $0x5f41 // popq %r15 BYTE $0x5d // popq %rbp RET -// .set L24_0_set_27, LBB24_27-LJTI24_0 -// .set L24_0_set_26, LBB24_26-LJTI24_0 -// .set L24_0_set_23, LBB24_23-LJTI24_0 -// .set L24_0_set_21, LBB24_21-LJTI24_0 -// .set L24_0_set_19, LBB24_19-LJTI24_0 -LJTI24_0: - LONG $0xffffffcc // .long L24_0_set_27 - LONG $0xfffffff3 // .long L24_0_set_26 - LONG $0xffffffaf // .long L24_0_set_23 - LONG $0xffffffa2 // .long L24_0_set_21 - LONG $0xffffff95 // .long L24_0_set_19 +// .set L25_0_set_27, LBB25_27-LJTI25_0 +// .set L25_0_set_26, LBB25_26-LJTI25_0 +// .set L25_0_set_23, LBB25_23-LJTI25_0 +// .set L25_0_set_21, LBB25_21-LJTI25_0 +// .set L25_0_set_19, LBB25_19-LJTI25_0 +LJTI25_0: + LONG $0xffffffcc // .long L25_0_set_27 + LONG $0xfffffff3 // .long L25_0_set_26 + LONG $0xffffffaf // .long L25_0_set_23 + LONG $0xffffffa2 // .long L25_0_set_21 + LONG $0xffffff95 // .long L25_0_set_19 _skip_negative: BYTE $0x55 // pushq %rbp @@ -6521,44 +6819,44 @@ _skip_negative: MOVQ AX, DI LONG $0x000099e8; BYTE $0x00 // callq _do_skip_number TESTQ AX, AX - JS LBB25_1 + JS LBB26_1 ADDQ BX, AX MOVQ AX, 0(R14) ADDQ $-1, BX - JMP LBB25_3 + JMP LBB26_3 -LBB25_1: +LBB26_1: NOTQ AX ADDQ AX, BX MOVQ BX, 0(R14) MOVQ $-2, BX -LBB25_3: +LBB26_3: MOVQ BX, AX BYTE $0x5b // popq %rbx WORD $0x5e41 // popq %r14 BYTE $0x5d // popq %rbp RET -LCPI26_0: +LCPI27_0: QUAD $0x2f2f2f2f2f2f2f2f; QUAD $0x2f2f2f2f2f2f2f2f // .space 16, '////////////////' -LCPI26_1: +LCPI27_1: QUAD $0x3a3a3a3a3a3a3a3a; QUAD $0x3a3a3a3a3a3a3a3a // .space 16, '::::::::::::::::' -LCPI26_2: +LCPI27_2: QUAD $0x2b2b2b2b2b2b2b2b; QUAD $0x2b2b2b2b2b2b2b2b // .space 16, '++++++++++++++++' -LCPI26_3: +LCPI27_3: QUAD $0x2d2d2d2d2d2d2d2d; QUAD $0x2d2d2d2d2d2d2d2d // .space 16, '----------------' -LCPI26_4: +LCPI27_4: QUAD $0xdfdfdfdfdfdfdfdf; QUAD $0xdfdfdfdfdfdfdfdf // .space 16, '\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf' -LCPI26_5: +LCPI27_5: QUAD $0x2e2e2e2e2e2e2e2e; QUAD $0x2e2e2e2e2e2e2e2e // .space 16, '................' -LCPI26_6: +LCPI27_6: QUAD $0x4545454545454545; QUAD $0x4545454545454545 // .space 16, 'EEEEEEEEEEEEEEEE' _do_skip_number: @@ -6568,38 +6866,38 @@ _do_skip_number: WORD $0x5641 // pushq %r14 BYTE $0x53 // pushq %rbx TESTQ SI, SI - JE LBB26_1 + JE LBB27_1 CMPB 0(DI), $48 - JNE LBB26_6 + JNE LBB27_6 MOVL $1, AX CMPQ SI, $1 - JE LBB26_55 + JE LBB27_55 MOVB 1(DI), CX ADDB $-46, CX CMPB CX, $55 - JA LBB26_55 + JA LBB27_55 MOVBLZX CX, CX MOVQ $36028797027352577, DX BTQ CX, DX - JAE LBB26_55 + JAE LBB27_55 -LBB26_6: +LBB27_6: CMPQ SI, $16 - JB LBB26_7 + JB LBB27_7 MOVQ $-1, R10 XORL AX, AX - QUAD $0xffffff29056f7ac5 // vmovdqu $-215(%rip), %xmm8 /* LCPI26_0(%rip) */ - QUAD $0xffffff310d6f7ac5 // vmovdqu $-207(%rip), %xmm9 /* LCPI26_1(%rip) */ - QUAD $0xffffff39156f7ac5 // vmovdqu $-199(%rip), %xmm10 /* LCPI26_2(%rip) */ - QUAD $0xffffff411d6f7ac5 // vmovdqu $-191(%rip), %xmm11 /* LCPI26_3(%rip) */ - QUAD $0xffffff49256ffac5 // vmovdqu $-183(%rip), %xmm4 /* LCPI26_4(%rip) */ - QUAD $0xffffff512d6ffac5 // vmovdqu $-175(%rip), %xmm5 /* LCPI26_5(%rip) */ - QUAD $0xffffff59356ffac5 // vmovdqu $-167(%rip), %xmm6 /* LCPI26_6(%rip) */ + QUAD $0xffffff29056f7ac5 // vmovdqu $-215(%rip), %xmm8 /* LCPI27_0(%rip) */ + QUAD $0xffffff310d6f7ac5 // vmovdqu $-207(%rip), %xmm9 /* LCPI27_1(%rip) */ + QUAD $0xffffff39156f7ac5 // vmovdqu $-199(%rip), %xmm10 /* LCPI27_2(%rip) */ + QUAD $0xffffff411d6f7ac5 // vmovdqu $-191(%rip), %xmm11 /* LCPI27_3(%rip) */ + QUAD $0xffffff49256ffac5 // vmovdqu $-183(%rip), %xmm4 /* LCPI27_4(%rip) */ + QUAD $0xffffff512d6ffac5 // vmovdqu $-175(%rip), %xmm5 /* LCPI27_5(%rip) */ + QUAD $0xffffff59356ffac5 // vmovdqu $-167(%rip), %xmm6 /* LCPI27_6(%rip) */ MOVQ $-1, R9 MOVQ $-1, R8 MOVQ SI, R14 -LBB26_9: +LBB27_9: LONG $0x3c6ffac5; BYTE $0x07 // vmovdqu (%rdi,%rax), %xmm7 LONG $0x6441c1c4; BYTE $0xc0 // vpcmpgtb %xmm8, %xmm7, %xmm0 LONG $0xcf64b1c5 // vpcmpgtb %xmm7, %xmm9, %xmm1 @@ -6620,7 +6918,7 @@ LBB26_9: NOTL CX BSFL CX, CX CMPL CX, $16 - JE LBB26_11 + JE LBB27_11 MOVL $-1, BX SHLL CX, BX NOTL BX @@ -6629,167 +6927,167 @@ LBB26_9: ANDL R11, BX MOVL BX, R11 -LBB26_11: +LBB27_11: LEAL -1(DX), BX ANDL DX, BX - JNE LBB26_12 + JNE LBB27_12 LEAL -1(R15), BX ANDL R15, BX - JNE LBB26_12 + JNE LBB27_12 LEAL -1(R11), BX ANDL R11, BX - JNE LBB26_12 + JNE LBB27_12 TESTL DX, DX - JE LBB26_19 + JE LBB27_19 BSFL DX, DX CMPQ R8, $-1 - JNE LBB26_56 + JNE LBB27_56 ADDQ AX, DX MOVQ DX, R8 -LBB26_19: +LBB27_19: TESTL R15, R15 - JE LBB26_22 + JE LBB27_22 BSFL R15, DX CMPQ R9, $-1 - JNE LBB26_56 + JNE LBB27_56 ADDQ AX, DX MOVQ DX, R9 -LBB26_22: +LBB27_22: TESTL R11, R11 - JE LBB26_25 + JE LBB27_25 BSFL R11, DX CMPQ R10, $-1 - JNE LBB26_56 + JNE LBB27_56 ADDQ AX, DX MOVQ DX, R10 -LBB26_25: +LBB27_25: CMPL CX, $16 - JNE LBB26_57 + JNE LBB27_57 ADDQ $-16, R14 ADDQ $16, AX CMPQ R14, $15 - JA LBB26_9 + JA LBB27_9 LEAQ 0(DI)(AX*1), CX MOVQ CX, R11 CMPQ AX, SI - JE LBB26_41 + JE LBB27_41 -LBB26_28: +LBB27_28: LEAQ 0(CX)(R14*1), R11 MOVQ CX, SI SUBQ DI, SI XORL AX, AX - LONG $0x3b3d8d4c; WORD $0x0001; BYTE $0x00 // leaq $315(%rip), %r15 /* LJTI26_0(%rip) */ - JMP LBB26_29 + LONG $0x3b3d8d4c; WORD $0x0001; BYTE $0x00 // leaq $315(%rip), %r15 /* LJTI27_0(%rip) */ + JMP LBB27_29 -LBB26_31: +LBB27_31: CMPL DX, $101 - JNE LBB26_40 + JNE LBB27_40 -LBB26_32: +LBB27_32: CMPQ R9, $-1 - JNE LBB26_58 + JNE LBB27_58 LEAQ 0(SI)(AX*1), R9 -LBB26_39: +LBB27_39: ADDQ $1, AX CMPQ R14, AX - JE LBB26_41 + JE LBB27_41 -LBB26_29: +LBB27_29: MOVBLSX 0(CX)(AX*1), DX LEAL -48(DX), BX CMPL BX, $10 - JB LBB26_39 + JB LBB27_39 LEAL -43(DX), BX CMPL BX, $26 - JA LBB26_31 + JA LBB27_31 MOVLQSX 0(R15)(BX*4), DX ADDQ R15, DX JMP DX -LBB26_37: +LBB27_37: CMPQ R10, $-1 - JNE LBB26_58 + JNE LBB27_58 LEAQ 0(SI)(AX*1), R10 - JMP LBB26_39 + JMP LBB27_39 -LBB26_35: +LBB27_35: CMPQ R8, $-1 - JNE LBB26_58 + JNE LBB27_58 LEAQ 0(SI)(AX*1), R8 - JMP LBB26_39 + JMP LBB27_39 -LBB26_1: +LBB27_1: MOVQ $-1, AX - JMP LBB26_55 + JMP LBB27_55 -LBB26_57: +LBB27_57: MOVL CX, R11 ADDQ DI, R11 ADDQ AX, R11 -LBB26_41: +LBB27_41: MOVQ $-1, AX TESTQ R8, R8 - JNE LBB26_42 - JMP LBB26_55 + JNE LBB27_42 + JMP LBB27_55 -LBB26_40: +LBB27_40: ADDQ AX, CX MOVQ CX, R11 MOVQ $-1, AX TESTQ R8, R8 - JE LBB26_55 + JE LBB27_55 -LBB26_42: +LBB27_42: TESTQ R10, R10 - JE LBB26_55 + JE LBB27_55 TESTQ R9, R9 - JE LBB26_55 + JE LBB27_55 SUBQ DI, R11 LEAQ -1(R11), AX CMPQ R8, AX - JE LBB26_47 + JE LBB27_47 CMPQ R10, AX - JE LBB26_47 + JE LBB27_47 CMPQ R9, AX - JE LBB26_47 + JE LBB27_47 TESTQ R10, R10 - JLE LBB26_51 + JLE LBB27_51 LEAQ -1(R10), AX CMPQ R9, AX - JE LBB26_51 + JE LBB27_51 NOTQ R10 MOVQ R10, AX - JMP LBB26_55 + JMP LBB27_55 -LBB26_47: +LBB27_47: NEGQ R11 MOVQ R11, AX -LBB26_55: +LBB27_55: BYTE $0x5b // popq %rbx WORD $0x5e41 // popq %r14 WORD $0x5f41 // popq %r15 BYTE $0x5d // popq %rbp RET -LBB26_51: +LBB27_51: MOVQ R8, AX ORQ R9, AX SETPL AX - JS LBB26_54 + JS LBB27_54 CMPQ R8, R9 - JL LBB26_54 + JL LBB27_54 NOTQ R8 MOVQ R8, AX - JMP LBB26_55 + JMP LBB27_55 -LBB26_54: +LBB27_54: LEAQ -1(R9), CX CMPQ R8, CX NOTQ R9 @@ -6797,66 +7095,66 @@ LBB26_54: TESTB AX, AX LONG $0xcb440f4d // cmoveq %r11, %r9 MOVQ R9, AX - JMP LBB26_55 + JMP LBB27_55 -LBB26_12: +LBB27_12: BSFL BX, CX - JMP LBB26_13 + JMP LBB27_13 -LBB26_58: +LBB27_58: SUBQ CX, DI NOTQ AX ADDQ DI, AX - JMP LBB26_55 + JMP LBB27_55 -LBB26_56: +LBB27_56: MOVL DX, CX -LBB26_13: +LBB27_13: NOTQ AX SUBQ CX, AX - JMP LBB26_55 + JMP LBB27_55 -LBB26_7: +LBB27_7: MOVQ $-1, R8 MOVQ DI, CX MOVQ SI, R14 MOVQ $-1, R9 MOVQ $-1, R10 - JMP LBB26_28 - -// .set L26_0_set_37, LBB26_37-LJTI26_0 -// .set L26_0_set_40, LBB26_40-LJTI26_0 -// .set L26_0_set_35, LBB26_35-LJTI26_0 -// .set L26_0_set_32, LBB26_32-LJTI26_0 -LJTI26_0: - LONG $0xffffff00 // .long L26_0_set_37 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff00 // .long L26_0_set_37 - LONG $0xffffff10 // .long L26_0_set_35 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xffffff40 // .long L26_0_set_40 - LONG $0xfffffecc // .long L26_0_set_32 + JMP LBB27_28 + +// .set L27_0_set_37, LBB27_37-LJTI27_0 +// .set L27_0_set_40, LBB27_40-LJTI27_0 +// .set L27_0_set_35, LBB27_35-LJTI27_0 +// .set L27_0_set_32, LBB27_32-LJTI27_0 +LJTI27_0: + LONG $0xffffff00 // .long L27_0_set_37 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff00 // .long L27_0_set_37 + LONG $0xffffff10 // .long L27_0_set_35 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xffffff40 // .long L27_0_set_40 + LONG $0xfffffecc // .long L27_0_set_32 _skip_positive: BYTE $0x55 // pushq %rbp @@ -6908,22 +7206,22 @@ _skip_number: SETEQ AX ADDQ AX, BX SUBQ AX, SI - JE LBB28_6 + JE LBB29_6 CMPQ R15, SI - JAE LBB28_3 + JAE LBB29_3 MOVB 0(BX), AX ADDB $-48, AX CMPB AX, $9 - JA LBB28_8 + JA LBB29_8 -LBB28_3: +LBB29_3: MOVQ BX, DI LONG $0xfffc01e8; BYTE $0xff // callq _do_skip_number TESTQ AX, AX - JS LBB28_7 + JS LBB29_7 ADDQ AX, BX -LBB28_5: +LBB29_5: SUBQ R12, BX MOVQ BX, 0(R14) MOVQ R15, AX @@ -6934,17 +7232,17 @@ LBB28_5: BYTE $0x5d // popq %rbp RET -LBB28_6: +LBB29_6: MOVQ $-1, R15 - JMP LBB28_5 + JMP LBB29_5 -LBB28_7: +LBB29_7: NOTQ AX ADDQ AX, BX -LBB28_8: +LBB29_8: MOVQ $-2, R15 - JMP LBB28_5 + JMP LBB29_5 _validate_one: BYTE $0x55 // pushq %rbp @@ -6962,60 +7260,60 @@ _find_non_ascii: BYTE $0x55 // pushq %rbp WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp CMPQ SI, $16 - JL LBB30_1 + JL LBB31_1 XORL AX, AX XORL DX, DX -LBB30_9: +LBB31_9: LONG $0x046ffac5; BYTE $0x07 // vmovdqu (%rdi,%rax), %xmm0 LONG $0xc8d7f9c5 // vpmovmskb %xmm0, %ecx TESTL CX, CX - JNE LBB30_10 + JNE LBB31_10 ADDQ $16, AX LEAQ 0(SI)(DX*1), CX ADDQ $-16, CX ADDQ $-16, DX ADDQ $16, CX CMPQ CX, $31 - JG LBB30_9 + JG LBB31_9 MOVQ DI, CX SUBQ DX, CX SUBQ AX, SI MOVQ $-1, AX TESTQ SI, SI - JG LBB30_5 - JMP LBB30_12 + JG LBB31_5 + JMP LBB31_12 -LBB30_1: +LBB31_1: MOVQ DI, CX MOVQ $-1, AX TESTQ SI, SI - JLE LBB30_12 + JLE LBB31_12 -LBB30_5: +LBB31_5: ADDQ $1, SI MOVQ CX, DX SUBQ DI, DX -LBB30_6: +LBB31_6: CMPB 0(CX), $0 - JS LBB30_7 + JS LBB31_7 ADDQ $1, CX ADDQ $-1, SI ADDQ $1, DX CMPQ SI, $1 - JG LBB30_6 + JG LBB31_6 -LBB30_12: +LBB31_12: BYTE $0x5d // popq %rbp RET -LBB30_7: +LBB31_7: MOVQ DX, AX BYTE $0x5d // popq %rbp RET -LBB30_10: +LBB31_10: BSFW CX, AX MOVWLZX AX, AX SUBQ DX, AX @@ -7031,7 +7329,7 @@ _print_mantissa: ADDQ SI, R14 MOVQ DI, AX SHRQ $32, AX - JE LBB31_2 + JE LBB32_2 MOVQ $-6067343680855748867, DX MOVQ DI, AX MULQ DX @@ -7063,7 +7361,7 @@ _print_mantissa: LONG $0x64fa6b41 // imull $100, %r10d, %edi SUBL DI, AX MOVWLZX AX, R11 - LONG $0x753d8d48; WORD $0x0059; BYTE $0x00 // leaq $22901(%rip), %rdi /* _Digits(%rip) */ + LONG $0x8c3d8d48; WORD $0x0059; BYTE $0x00 // leaq $22924(%rip), %rdi /* _Digits(%rip) */ MOVWLZX 0(DI)(R8*2), AX MOVW AX, -2(R14) MOVWLZX 0(DI)(R9*2), AX @@ -7075,13 +7373,13 @@ _print_mantissa: ADDQ $-8, R14 MOVQ DX, DI -LBB31_2: +LBB32_2: CMPL DI, $10000 - JB LBB31_3 + JB LBB32_3 MOVL $3518437209, R8 - LONG $0x2d0d8d4c; WORD $0x0059; BYTE $0x00 // leaq $22829(%rip), %r9 /* _Digits(%rip) */ + LONG $0x440d8d4c; WORD $0x0059; BYTE $0x00 // leaq $22852(%rip), %r9 /* _Digits(%rip) */ -LBB31_5: +LBB32_5: MOVL DI, AX IMULQ R8, AX SHRQ $45, AX @@ -7098,11 +7396,11 @@ LBB31_5: ADDQ $-4, R14 CMPL DI, $99999999 MOVL AX, DI - JA LBB31_5 + JA LBB32_5 CMPL AX, $100 - JB LBB31_8 + JB LBB32_8 -LBB31_7: +LBB32_7: MOVWLZX AX, CX SHRL $2, CX LONG $0x147bc969; WORD $0x0000 // imull $5243, %ecx, %ecx @@ -7110,17 +7408,17 @@ LBB31_7: WORD $0xd16b; BYTE $0x64 // imull $100, %ecx, %edx SUBL DX, AX MOVWLZX AX, AX - LONG $0xc6158d48; WORD $0x0058; BYTE $0x00 // leaq $22726(%rip), %rdx /* _Digits(%rip) */ + LONG $0xdd158d48; WORD $0x0058; BYTE $0x00 // leaq $22749(%rip), %rdx /* _Digits(%rip) */ MOVWLZX 0(DX)(AX*2), AX MOVW AX, -2(R14) ADDQ $-2, R14 MOVL CX, AX -LBB31_8: +LBB32_8: CMPL AX, $10 - JB LBB31_10 + JB LBB32_10 MOVL AX, AX - LONG $0xa90d8d48; WORD $0x0058; BYTE $0x00 // leaq $22697(%rip), %rcx /* _Digits(%rip) */ + LONG $0xc00d8d48; WORD $0x0058; BYTE $0x00 // leaq $22720(%rip), %rcx /* _Digits(%rip) */ MOVWLZX 0(CX)(AX*2), AX MOVW AX, -2(R14) BYTE $0x5b // popq %rbx @@ -7128,13 +7426,13 @@ LBB31_8: BYTE $0x5d // popq %rbp RET -LBB31_3: +LBB32_3: MOVL DI, AX CMPL AX, $100 - JAE LBB31_7 - JMP LBB31_8 + JAE LBB32_7 + JMP LBB32_8 -LBB31_10: +LBB32_10: ADDB $48, AX MOVB AX, 0(SI) BYTE $0x5b // popq %rbx @@ -7142,6 +7440,14 @@ LBB31_10: BYTE $0x5d // popq %rbp RET +_write_syscall: + MOVQ SI, DX + MOVQ DI, SI + MOVQ $1, DI + MOVQ $33554436, AX + SYSCALL + RET + _left_shift: BYTE $0x55 // pushq %rbp WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp @@ -7150,44 +7456,44 @@ _left_shift: BYTE $0x53 // pushq %rbx MOVL SI, CX IMUL3Q $104, CX, R14 - LONG $0xcb158d48; WORD $0x008a; BYTE $0x00 // leaq $35531(%rip), %rdx /* _LSHIFT_TAB(%rip) */ + LONG $0xdc158d48; WORD $0x008a; BYTE $0x00 // leaq $35548(%rip), %rdx /* _LSHIFT_TAB(%rip) */ MOVL 0(R14)(DX*1), R8 MOVQ 0(DI), R11 MOVLQSX 16(DI), R9 MOVL R9, R10 TESTQ R9, R9 - JE LBB32_1 + JE LBB34_1 LEAQ 0(R14)(DX*1), SI ADDQ $4, SI XORL BX, BX -LBB32_3: +LBB34_3: MOVBLZX 0(SI)(BX*1), AX TESTB AX, AX - JE LBB32_10 + JE LBB34_10 CMPB 0(R11)(BX*1), AX - JNE LBB32_5 + JNE LBB34_5 ADDQ $1, BX CMPQ R9, BX - JNE LBB32_3 + JNE LBB34_3 MOVL R9, SI ADDQ R14, DX CMPB 4(SI)(DX*1), $0 - JNE LBB32_9 - JMP LBB32_10 + JNE LBB34_9 + JMP LBB34_10 -LBB32_1: +LBB34_1: XORL SI, SI ADDQ R14, DX CMPB 4(SI)(DX*1), $0 - JE LBB32_10 + JE LBB34_10 -LBB32_9: +LBB34_9: ADDL $-1, R8 -LBB32_10: +LBB34_10: TESTL R10, R10 - JLE LBB32_25 + JLE LBB34_25 LEAL 0(R8)(R10*1), AX MOVLQSX AX, R15 ADDL $-1, R9 @@ -7195,7 +7501,7 @@ LBB32_10: XORL DX, DX MOVQ $-3689348814741910323, R14 -LBB32_12: +LBB34_12: MOVL R9, AX MOVBQSX 0(R11)(AX*1), SI ADDQ $-48, SI @@ -7209,90 +7515,90 @@ LBB32_12: MOVQ SI, AX SUBQ BX, AX CMPQ 8(DI), R15 - JBE LBB32_18 + JBE LBB34_18 ADDB $48, AX MOVB AX, 0(R11)(R15*1) - JMP LBB32_20 + JMP LBB34_20 -LBB32_18: +LBB34_18: TESTQ AX, AX - JE LBB32_20 + JE LBB34_20 MOVL $1, 28(DI) -LBB32_20: +LBB34_20: CMPQ R10, $2 - JL LBB32_14 + JL LBB34_14 ADDQ $-1, R10 MOVQ 0(DI), R11 ADDL $-1, R9 ADDQ $-1, R15 - JMP LBB32_12 + JMP LBB34_12 -LBB32_14: +LBB34_14: CMPQ SI, $10 - JAE LBB32_15 + JAE LBB34_15 -LBB32_25: +LBB34_25: MOVLQSX 16(DI), CX MOVLQSX R8, AX ADDQ CX, AX MOVL AX, 16(DI) MOVQ 8(DI), CX CMPQ CX, AX - JA LBB32_27 + JA LBB34_27 MOVL CX, 16(DI) MOVL CX, AX -LBB32_27: +LBB34_27: ADDL R8, 20(DI) TESTL AX, AX - JLE LBB32_31 + JLE LBB34_31 MOVQ 0(DI), CX MOVL AX, DX ADDQ $1, DX ADDL $-1, AX -LBB32_29: +LBB34_29: MOVL AX, SI CMPB 0(CX)(SI*1), $48 - JNE LBB32_33 + JNE LBB34_33 MOVL AX, 16(DI) ADDQ $-1, DX ADDL $-1, AX CMPQ DX, $1 - JG LBB32_29 - JMP LBB32_32 + JG LBB34_29 + JMP LBB34_32 -LBB32_31: - JNE LBB32_33 +LBB34_31: + JNE LBB34_33 -LBB32_32: +LBB34_32: MOVL $0, 20(DI) -LBB32_33: +LBB34_33: BYTE $0x5b // popq %rbx WORD $0x5e41 // popq %r14 WORD $0x5f41 // popq %r15 BYTE $0x5d // popq %rbp RET -LBB32_15: +LBB34_15: ADDL R8, R9 MOVLQSX R9, SI ADDQ $-1, SI - JMP LBB32_16 + JMP LBB34_16 -LBB32_17: +LBB34_17: ADDB $48, AX MOVQ 0(DI), BX MOVB AX, 0(BX)(SI*1) -LBB32_24: +LBB34_24: ADDQ $-1, SI CMPQ CX, $9 - JBE LBB32_25 + JBE LBB34_25 -LBB32_16: +LBB34_16: MOVQ DX, CX MOVQ DX, AX MULQ R14 @@ -7302,15 +7608,15 @@ LBB32_16: MOVQ CX, AX SUBQ BX, AX CMPQ 8(DI), SI - JA LBB32_17 + JA LBB34_17 TESTQ AX, AX - JE LBB32_24 + JE LBB34_24 MOVL $1, 28(DI) - JMP LBB32_24 + JMP LBB34_24 -LBB32_5: - JL LBB32_9 - JMP LBB32_10 +LBB34_5: + JL LBB34_9 + JMP LBB34_10 _right_shift: BYTE $0x55 // pushq %rbp @@ -7324,9 +7630,9 @@ _right_shift: LONG $0xd84f0f45 // cmovgl %r8d, %r11d XORL AX, AX -LBB33_1: +LBB35_1: CMPQ R11, DX - JE LBB33_2 + JE LBB35_2 LEAQ 0(AX)(AX*4), AX MOVQ 0(DI), SI MOVBQSX 0(SI)(DX*1), SI @@ -7336,10 +7642,10 @@ LBB33_1: MOVQ AX, SI SHRQ CX, SI TESTQ SI, SI - JE LBB33_1 + JE LBB35_1 MOVL DX, R11 -LBB33_7: +LBB35_7: MOVL 20(DI), DX SUBL R11, DX ADDL $1, DX @@ -7349,12 +7655,12 @@ LBB33_7: NOTQ R9 XORL R10, R10 CMPL R11, R8 - JGE LBB33_10 + JGE LBB35_10 MOVLQSX R11, R8 MOVQ 0(DI), SI XORL R10, R10 -LBB33_9: +LBB35_9: MOVQ AX, DX SHRQ CX, DX ANDQ R9, AX @@ -7370,87 +7676,87 @@ LBB33_9: ADDQ $-48, AX MOVLQSX 16(DI), DX CMPQ BX, DX - JL LBB33_9 - JMP LBB33_10 + JL LBB35_9 + JMP LBB35_10 -LBB33_12: +LBB35_12: ADDB $48, SI MOVQ 0(DI), BX MOVB SI, 0(BX)(DX*1) ADDL $1, DX MOVL DX, R10 -LBB33_15: +LBB35_15: ADDQ AX, AX LEAQ 0(AX)(AX*4), AX -LBB33_10: +LBB35_10: TESTQ AX, AX - JE LBB33_16 + JE LBB35_16 MOVQ AX, SI SHRQ CX, SI ANDQ R9, AX MOVLQSX R10, DX CMPQ 8(DI), DX - JA LBB33_12 + JA LBB35_12 TESTQ SI, SI - JE LBB33_15 + JE LBB35_15 MOVL $1, 28(DI) - JMP LBB33_15 + JMP LBB35_15 -LBB33_16: +LBB35_16: MOVL R10, 16(DI) TESTL R10, R10 - JLE LBB33_20 + JLE LBB35_20 MOVQ 0(DI), AX MOVL R10, CX ADDQ $1, CX ADDL $-1, R10 -LBB33_18: +LBB35_18: MOVL R10, DX CMPB 0(AX)(DX*1), $48 - JNE LBB33_22 + JNE LBB35_22 MOVL R10, 16(DI) ADDQ $-1, CX ADDL $-1, R10 CMPQ CX, $1 - JG LBB33_18 - JMP LBB33_21 + JG LBB35_18 + JMP LBB35_21 -LBB33_2: +LBB35_2: TESTQ AX, AX - JE LBB33_23 + JE LBB35_23 MOVQ AX, DX SHRQ CX, DX TESTQ DX, DX - JNE LBB33_7 + JNE LBB35_7 -LBB33_4: +LBB35_4: ADDQ AX, AX LEAQ 0(AX)(AX*4), AX ADDL $1, R11 MOVQ AX, DX SHRQ CX, DX TESTQ DX, DX - JE LBB33_4 - JMP LBB33_7 + JE LBB35_4 + JMP LBB35_7 -LBB33_20: - JE LBB33_21 +LBB35_20: + JE LBB35_21 -LBB33_22: +LBB35_22: BYTE $0x5b // popq %rbx BYTE $0x5d // popq %rbp RET -LBB33_21: +LBB35_21: MOVL $0, 20(DI) BYTE $0x5b // popq %rbx BYTE $0x5d // popq %rbp RET -LBB33_23: +LBB35_23: MOVL $0, 16(DI) BYTE $0x5b // popq %rbx BYTE $0x5d // popq %rbp @@ -10233,6 +10539,10 @@ _VecShiftShuffles: QUAD $0x0e0d0c0b0a090807; QUAD $0xffffffffffffff0f // .ascii 16, '\x07\x08\t\n\x0b\x0c\r\x0e\x0f\xff\xff\xff\xff\xff\xff\xff' QUAD $0x0f0e0d0c0b0a0908; QUAD $0xffffffffffffffff // .ascii 16, '\x08\t\n\x0b\x0c\r\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff' +_LB_dcf8f5f1: // _printhex.tab + QUAD $0x3736353433323130; QUAD $0x6665646362613938 // .asciz 16, '0123456789abcdef' + BYTE $0x00 // .asciz 1, '\x00' + __SingleQuoteTab: QUAD $0x0000000000000006 // .quad 6 QUAD $0x000030303030755c // .asciz 8, '\\u0000\x00\x00' @@ -11677,7 +11987,7 @@ _html_escape: MOVQ nb+8(FP), SI MOVQ dp+16(FP), DX MOVQ dn+24(FP), CX - CALL ·__native_entry__+8833(SB) // _html_escape + CALL ·__native_entry__+9809(SB) // _html_escape MOVQ AX, ret+32(FP) RET @@ -11761,7 +12071,7 @@ _quote: MOVQ dp+16(FP), DX MOVQ dn+24(FP), CX MOVQ flags+32(FP), R8 - CALL ·__native_entry__+4498(SB) // _quote + CALL ·__native_entry__+5483(SB) // _quote MOVQ AX, ret+40(FP) RET @@ -11782,7 +12092,7 @@ _skip_array: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ m+16(FP), DX - CALL ·__native_entry__+18066(SB) // _skip_array + CALL ·__native_entry__+19042(SB) // _skip_array MOVQ AX, ret+24(FP) RET @@ -11802,7 +12112,7 @@ _entry: _skip_number: MOVQ s+0(FP), DI MOVQ p+8(FP), SI - CALL ·__native_entry__+21017(SB) // _skip_number + CALL ·__native_entry__+21993(SB) // _skip_number MOVQ AX, ret+16(FP) RET @@ -11823,7 +12133,7 @@ _skip_object: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ m+16(FP), DX - CALL ·__native_entry__+18103(SB) // _skip_object + CALL ·__native_entry__+19079(SB) // _skip_object MOVQ AX, ret+24(FP) RET @@ -11844,7 +12154,7 @@ _skip_one: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ m+16(FP), DX - CALL ·__native_entry__+16219(SB) // _skip_one + CALL ·__native_entry__+17195(SB) // _skip_one MOVQ AX, ret+24(FP) RET @@ -11887,7 +12197,7 @@ _unquote: MOVQ dp+16(FP), DX MOVQ ep+24(FP), CX MOVQ flags+32(FP), R8 - CALL ·__native_entry__+5808(SB) // _unquote + CALL ·__native_entry__+6784(SB) // _unquote MOVQ AX, ret+40(FP) RET @@ -11908,7 +12218,7 @@ _validate_one: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ m+16(FP), DX - CALL ·__native_entry__+21134(SB) // _validate_one + CALL ·__native_entry__+22110(SB) // _validate_one MOVQ AX, ret+24(FP) RET @@ -11931,7 +12241,7 @@ _value: MOVQ p+16(FP), DX MOVQ v+24(FP), CX MOVQ allow_control+32(FP), R8 - CALL ·__native_entry__+11422(SB) // _value + CALL ·__native_entry__+12398(SB) // _value MOVQ AX, ret+40(FP) RET @@ -11952,7 +12262,7 @@ _vnumber: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ v+16(FP), DX - LEAQ ·__native_entry__+14332(SB), AX // _vnumber + LEAQ ·__native_entry__+15308(SB), AX // _vnumber JMP AX _stack_grow: @@ -11972,7 +12282,7 @@ _vsigned: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ v+16(FP), DX - LEAQ ·__native_entry__+15637(SB), AX // _vsigned + LEAQ ·__native_entry__+16613(SB), AX // _vsigned JMP AX _stack_grow: @@ -11992,7 +12302,7 @@ _vstring: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ v+16(FP), DX - LEAQ ·__native_entry__+13314(SB), AX // _vstring + LEAQ ·__native_entry__+14290(SB), AX // _vstring JMP AX _stack_grow: @@ -12012,7 +12322,7 @@ _vunsigned: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ v+16(FP), DX - LEAQ ·__native_entry__+15917(SB), AX // _vunsigned + LEAQ ·__native_entry__+16893(SB), AX // _vunsigned JMP AX _stack_grow: diff --git a/internal/native/avx/native_subr_amd64.go b/internal/native/avx/native_subr_amd64.go index 77d3f7c72..f1141f147 100644 --- a/internal/native/avx/native_subr_amd64.go +++ b/internal/native/avx/native_subr_amd64.go @@ -10,23 +10,23 @@ func __native_entry__() uintptr var ( _subr__f64toa = __native_entry__() + 570 - _subr__html_escape = __native_entry__() + 8833 + _subr__html_escape = __native_entry__() + 9809 _subr__i64toa = __native_entry__() + 3205 _subr__lspace = __native_entry__() + 251 _subr__lzero = __native_entry__() + 13 - _subr__quote = __native_entry__() + 4498 - _subr__skip_array = __native_entry__() + 18066 - _subr__skip_number = __native_entry__() + 21017 - _subr__skip_object = __native_entry__() + 18103 - _subr__skip_one = __native_entry__() + 16219 + _subr__quote = __native_entry__() + 5483 + _subr__skip_array = __native_entry__() + 19042 + _subr__skip_number = __native_entry__() + 21993 + _subr__skip_object = __native_entry__() + 19079 + _subr__skip_one = __native_entry__() + 17195 _subr__u64toa = __native_entry__() + 3300 - _subr__unquote = __native_entry__() + 5808 - _subr__validate_one = __native_entry__() + 21134 - _subr__value = __native_entry__() + 11422 - _subr__vnumber = __native_entry__() + 14332 - _subr__vsigned = __native_entry__() + 15637 - _subr__vstring = __native_entry__() + 13314 - _subr__vunsigned = __native_entry__() + 15917 + _subr__unquote = __native_entry__() + 6784 + _subr__validate_one = __native_entry__() + 22110 + _subr__value = __native_entry__() + 12398 + _subr__vnumber = __native_entry__() + 15308 + _subr__vsigned = __native_entry__() + 16613 + _subr__vstring = __native_entry__() + 14290 + _subr__vunsigned = __native_entry__() + 16893 ) const ( diff --git a/internal/native/avx2/native_amd64.s b/internal/native/avx2/native_amd64.s index abe50c7b0..2169ee3ee 100644 --- a/internal/native/avx2/native_amd64.s +++ b/internal/native/avx2/native_amd64.s @@ -325,7 +325,7 @@ LBB2_8: LONG $0x4ff56941; WORD $0x1293; BYTE $0x00 // imull $1217359, %r13d, %esi MOVQ R13, AX SHLQ $4, AX - LONG $0x650d8d48; WORD $0x008d; BYTE $0x00 // leaq $36197(%rip), %rcx /* _DOUBLE_POW5_INV_SPLIT(%rip) */ + LONG $0x7a0d8d48; WORD $0x0091; BYTE $0x00 // leaq $37242(%rip), %rcx /* _DOUBLE_POW5_INV_SPLIT(%rip) */ MOVQ R10, R12 ORQ $2, R12 MOVQ 0(AX)(CX*1), R11 @@ -411,7 +411,7 @@ LBB2_22: SHRL $19, SI MOVLQSX AX, DI SHLQ $4, DI - LONG $0x981d8d4c; WORD $0x00a1; BYTE $0x00 // leaq $41368(%rip), %r11 /* _DOUBLE_POW5_SPLIT(%rip) */ + LONG $0xad1d8d4c; WORD $0x00a5; BYTE $0x00 // leaq $42413(%rip), %r11 /* _DOUBLE_POW5_SPLIT(%rip) */ MOVQ R10, CX ORQ $2, CX MOVQ 0(DI)(R11*1), R9 @@ -810,7 +810,7 @@ LBB2_67: LEAQ 1(R12), BX MOVQ BX, SI MOVL R15, DX - LONG $0x0056ebe8; BYTE $0x00 // callq _print_mantissa + LONG $0x005ae9e8; BYTE $0x00 // callq _print_mantissa MOVB 1(R12), AX MOVB AX, 0(R12) MOVL $1, AX @@ -839,7 +839,7 @@ LBB2_74: LEAL 0(CX)(CX*1), AX LEAL 0(AX)(AX*4), AX SUBL AX, R14 - LONG $0xa8058d48; WORD $0x00b0; BYTE $0x00 // leaq $45224(%rip), %rax /* _Digits(%rip) */ + LONG $0xbd058d48; WORD $0x00b4; BYTE $0x00 // leaq $46269(%rip), %rax /* _Digits(%rip) */ MOVWLZX 0(AX)(CX*2), AX MOVL BX, CX MOVW AX, 0(R12)(CX*1) @@ -874,7 +874,7 @@ LBB2_75: CMPL R14, $10 JL LBB2_77 MOVLQSX R14, AX - LONG $0x3f0d8d48; WORD $0x00b0; BYTE $0x00 // leaq $45119(%rip), %rcx /* _Digits(%rip) */ + LONG $0x540d8d48; WORD $0x00b4; BYTE $0x00 // leaq $46164(%rip), %rcx /* _Digits(%rip) */ MOVWLZX 0(CX)(AX*2), AX MOVL BX, CX MOVW AX, 0(R12)(CX*1) @@ -893,7 +893,7 @@ LBB2_80: MOVL BX, SI ADDQ -56(BP), SI MOVL R15, DX - LONG $0x0055eae8; BYTE $0x00 // callq _print_mantissa + LONG $0x0059e8e8; BYTE $0x00 // callq _print_mantissa TESTL R12, R12 JE LBB2_81 LEAL 0(R12)(BX*1), AX @@ -974,7 +974,7 @@ LBB2_85: ADDQ BX, R12 MOVQ R12, SI MOVL R15, DX - LONG $0x00550ee8; BYTE $0x00 // callq _print_mantissa + LONG $0x00590ce8; BYTE $0x00 // callq _print_mantissa ADDL BX, R15 MOVL R15, BX MOVL -44(BP), R9 @@ -1128,7 +1128,7 @@ _u64toa: ADDQ AX, AX CMPL SI, $1000 JB LBB4_3 - LONG $0x2a0d8d48; WORD $0x00ad; BYTE $0x00 // leaq $44330(%rip), %rcx /* _Digits(%rip) */ + LONG $0x3f0d8d48; WORD $0x00b1; BYTE $0x00 // leaq $45375(%rip), %rcx /* _Digits(%rip) */ MOVB 0(DX)(CX*1), CX MOVB CX, 0(DI) MOVL $1, CX @@ -1142,14 +1142,14 @@ LBB4_3: LBB4_4: MOVWLZX DX, DX ORQ $1, DX - LONG $0x09358d48; WORD $0x00ad; BYTE $0x00 // leaq $44297(%rip), %rsi /* _Digits(%rip) */ + LONG $0x1e358d48; WORD $0x00b1; BYTE $0x00 // leaq $45342(%rip), %rsi /* _Digits(%rip) */ MOVB 0(DX)(SI*1), DX MOVL CX, SI ADDL $1, CX MOVB DX, 0(DI)(SI*1) LBB4_6: - LONG $0xf7158d48; WORD $0x00ac; BYTE $0x00 // leaq $44279(%rip), %rdx /* _Digits(%rip) */ + LONG $0x0c158d48; WORD $0x00b1; BYTE $0x00 // leaq $45324(%rip), %rdx /* _Digits(%rip) */ MOVB 0(AX)(DX*1), DX MOVL CX, SI ADDL $1, CX @@ -1158,7 +1158,7 @@ LBB4_6: LBB4_7: MOVWLZX AX, AX ORQ $1, AX - LONG $0xde158d48; WORD $0x00ac; BYTE $0x00 // leaq $44254(%rip), %rdx /* _Digits(%rip) */ + LONG $0xf3158d48; WORD $0x00b0; BYTE $0x00 // leaq $45299(%rip), %rdx /* _Digits(%rip) */ MOVB 0(AX)(DX*1), AX MOVL CX, DX ADDL $1, CX @@ -1205,7 +1205,7 @@ LBB4_8: ADDQ R11, R11 CMPL SI, $10000000 JB LBB4_11 - LONG $0x46058d48; WORD $0x00ac; BYTE $0x00 // leaq $44102(%rip), %rax /* _Digits(%rip) */ + LONG $0x5b058d48; WORD $0x00b0; BYTE $0x00 // leaq $45147(%rip), %rax /* _Digits(%rip) */ MOVB 0(R10)(AX*1), AX MOVB AX, 0(DI) MOVL $1, CX @@ -1219,14 +1219,14 @@ LBB4_11: LBB4_12: MOVL R10, AX ORQ $1, AX - LONG $0x21358d48; WORD $0x00ac; BYTE $0x00 // leaq $44065(%rip), %rsi /* _Digits(%rip) */ + LONG $0x36358d48; WORD $0x00b0; BYTE $0x00 // leaq $45110(%rip), %rsi /* _Digits(%rip) */ MOVB 0(AX)(SI*1), AX MOVL CX, SI ADDL $1, CX MOVB AX, 0(DI)(SI*1) LBB4_14: - LONG $0x0f058d48; WORD $0x00ac; BYTE $0x00 // leaq $44047(%rip), %rax /* _Digits(%rip) */ + LONG $0x24058d48; WORD $0x00b0; BYTE $0x00 // leaq $45092(%rip), %rax /* _Digits(%rip) */ MOVB 0(R9)(AX*1), AX MOVL CX, SI ADDL $1, CX @@ -1235,7 +1235,7 @@ LBB4_14: LBB4_15: MOVWLZX R9, AX ORQ $1, AX - LONG $0xf4358d48; WORD $0x00ab; BYTE $0x00 // leaq $44020(%rip), %rsi /* _Digits(%rip) */ + LONG $0x09358d48; WORD $0x00b0; BYTE $0x00 // leaq $45065(%rip), %rsi /* _Digits(%rip) */ MOVB 0(AX)(SI*1), AX MOVL CX, DX MOVB AX, 0(DI)(DX*1) @@ -1317,7 +1317,7 @@ LBB4_16: MOVL $16, CX SUBL AX, CX SHLQ $4, AX - LONG $0x6a158d48; WORD $0x00ab; BYTE $0x00 // leaq $43882(%rip), %rdx /* _VecShiftShuffles(%rip) */ + LONG $0x7f158d48; WORD $0x00af; BYTE $0x00 // leaq $44927(%rip), %rdx /* _VecShiftShuffles(%rip) */ LONG $0x0071e2c4; WORD $0x1004 // vpshufb (%rax,%rdx), %xmm1, %xmm0 LONG $0x077ffac5 // vmovdqu %xmm0, (%rdi) MOVL CX, AX @@ -1343,7 +1343,7 @@ LBB4_20: CMPL DX, $99 JA LBB4_22 MOVL DX, AX - LONG $0x4d0d8d48; WORD $0x00aa; BYTE $0x00 // leaq $43597(%rip), %rcx /* _Digits(%rip) */ + LONG $0x620d8d48; WORD $0x00ae; BYTE $0x00 // leaq $44642(%rip), %rcx /* _Digits(%rip) */ MOVWLZX 0(CX)(AX*2), AX MOVW AX, 0(DI) MOVL $2, CX @@ -1366,7 +1366,7 @@ LBB4_22: WORD $0xc96b; BYTE $0x64 // imull $100, %ecx, %ecx SUBL CX, AX MOVWLZX AX, AX - LONG $0x040d8d48; WORD $0x00aa; BYTE $0x00 // leaq $43524(%rip), %rcx /* _Digits(%rip) */ + LONG $0x190d8d48; WORD $0x00ae; BYTE $0x00 // leaq $44569(%rip), %rcx /* _Digits(%rip) */ MOVWLZX 0(CX)(AX*2), AX MOVW AX, 1(DI) MOVL $3, CX @@ -1376,7 +1376,7 @@ LBB4_24: WORD $0xc86b; BYTE $0x64 // imull $100, %eax, %ecx SUBL CX, DX MOVWLZX AX, AX - LONG $0xe60d8d48; WORD $0x00a9; BYTE $0x00 // leaq $43494(%rip), %rcx /* _Digits(%rip) */ + LONG $0xfb0d8d48; WORD $0x00ad; BYTE $0x00 // leaq $44539(%rip), %rcx /* _Digits(%rip) */ MOVWLZX 0(CX)(AX*2), AX MOVW AX, 0(DI) MOVWLZX DX, AX @@ -1439,24 +1439,325 @@ LBB4_25: RET LCPI5_0: + QUAD $0x0000000000000000; QUAD $0x0000000000000000 // .space 16, '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + QUAD $0x0000000000000000; QUAD $0x0000000000000000 // .space 16, '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + +_xprintf: + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + WORD $0x5741 // pushq %r15 + WORD $0x5641 // pushq %r14 + WORD $0x5541 // pushq %r13 + WORD $0x5441 // pushq %r12 + BYTE $0x53 // pushq %rbx + ANDQ $-32, SP + SUBQ $576, SP + MOVQ DI, R15 + MOVQ SI, 376(SP) + MOVQ DX, 384(SP) + MOVQ CX, 392(SP) + MOVQ R8, 400(SP) + MOVQ R9, 408(SP) + TESTB AX, AX + JE LBB5_52 + QUAD $0x0001a0248429f8c5; BYTE $0x00 // vmovaps %xmm0, $416(%rsp) + QUAD $0x0001b0248c29f8c5; BYTE $0x00 // vmovaps %xmm1, $432(%rsp) + QUAD $0x0001c0249429f8c5; BYTE $0x00 // vmovaps %xmm2, $448(%rsp) + QUAD $0x0001d0249c29f8c5; BYTE $0x00 // vmovaps %xmm3, $464(%rsp) + QUAD $0x0001e024a429f8c5; BYTE $0x00 // vmovaps %xmm4, $480(%rsp) + QUAD $0x0001f024ac29f8c5; BYTE $0x00 // vmovaps %xmm5, $496(%rsp) + QUAD $0x00020024b429f8c5; BYTE $0x00 // vmovaps %xmm6, $512(%rsp) + QUAD $0x00021024bc29f8c5; BYTE $0x00 // vmovaps %xmm7, $528(%rsp) + +LBB5_52: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + QUAD $0x000140248429fcc5; BYTE $0x00 // vmovaps %ymm0, $320(%rsp) + QUAD $0x000120248429fcc5; BYTE $0x00 // vmovaps %ymm0, $288(%rsp) + QUAD $0x000100248429fcc5; BYTE $0x00 // vmovaps %ymm0, $256(%rsp) + QUAD $0x0000e0248429fcc5; BYTE $0x00 // vmovaps %ymm0, $224(%rsp) + QUAD $0x0000c0248429fcc5; BYTE $0x00 // vmovaps %ymm0, $192(%rsp) + QUAD $0x0000a0248429fcc5; BYTE $0x00 // vmovaps %ymm0, $160(%rsp) + QUAD $0x000080248429fcc5; BYTE $0x00 // vmovaps %ymm0, $128(%rsp) + LONG $0x4429fcc5; WORD $0x6024 // vmovaps %ymm0, $96(%rsp) + MOVQ $206158430216, AX + MOVQ AX, 32(SP) + LEAQ 16(BP), AX + MOVQ AX, 40(SP) + LEAQ 368(SP), AX + MOVQ AX, 48(SP) + XORL AX, AX + LEAQ 96(SP), BX + LEAQ 64(SP), R14 + MOVQ $7378697629483820647, R13 + LONG $0x1b258d4c; WORD $0x00ad; BYTE $0x00 // leaq $44315(%rip), %r12 /* _printhex.tab(%rip) */ + JMP LBB5_1 + +LBB5_17: + MOVB $37, 64(SP) + MOVL $1, SI + MOVQ R14, DI + +LBB5_25: + WORD $0xf8c5; BYTE $0x77 // vzeroupper + LONG $0x00530ce8; BYTE $0x00 // callq _write_syscall + XORL AX, AX + +LBB5_1: + MOVB 0(R15), CX + CMPB CX, $37 + JE LBB5_4 + TESTB CX, CX + JE LBB5_44 + ADDQ $1, R15 + MOVB CX, 96(SP)(AX*1) + ADDQ $1, AX + JMP LBB5_1 + +LBB5_4: + MOVB $0, 96(SP)(AX*1) + CMPB 96(SP), $0 + JE LBB5_5 + XORL AX, AX + +LBB5_7: + LEAQ 1(AX), SI + CMPB 97(SP)(AX*1), $0 + MOVQ SI, AX + JNE LBB5_7 + JMP LBB5_8 + +LBB5_5: + XORL SI, SI + +LBB5_8: + MOVQ BX, DI + WORD $0xf8c5; BYTE $0x77 // vzeroupper + LONG $0x0052c1e8; BYTE $0x00 // callq _write_syscall + MOVBLSX 1(R15), CX + ADDQ $2, R15 + CMPL CX, $114 + JG LBB5_13 + CMPL CX, $37 + JE LBB5_17 + MOVL $0, AX + CMPL CX, $100 + JNE LBB5_1 + MOVL 32(SP), CX + CMPQ CX, $40 + JA LBB5_26 + MOVQ CX, AX + ADDQ 48(SP), AX + ADDL $8, CX + MOVL CX, 32(SP) + JMP LBB5_27 + +LBB5_13: + CMPL CX, $115 + JE LBB5_18 + MOVL $0, AX + CMPL CX, $120 + JNE LBB5_1 + MOVL 32(SP), CX + CMPQ CX, $40 + JA LBB5_36 + MOVQ CX, AX + ADDQ 48(SP), AX + ADDL $8, CX + MOVL CX, 32(SP) + MOVQ 0(AX), CX + TESTQ CX, CX + JE LBB5_38 + +LBB5_39: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0x4429fcc5; WORD $0x4024 // vmovaps %ymm0, $64(%rsp) + LEAQ 95(SP), DI + MOVQ CX, DX + +LBB5_40: + MOVQ DI, AX + MOVL CX, SI + ANDL $15, SI + MOVBLZX 0(SI)(R12*1), BX + ADDQ $-1, DI + MOVB BX, -1(AX) + SHRQ $4, DX + CMPQ CX, $15 + MOVQ DX, CX + JA LBB5_40 + XORL SI, SI + +LBB5_42: + ADDQ $1, SI + CMPB 0(AX), $0 + LEAQ 1(AX), AX + JNE LBB5_42 + JMP LBB5_43 + +LBB5_18: + MOVL 32(SP), CX + CMPQ CX, $40 + JA LBB5_20 + MOVQ CX, AX + ADDQ 48(SP), AX + ADDL $8, CX + MOVL CX, 32(SP) + MOVQ 0(AX), DI + CMPB 0(DI), $0 + JE LBB5_22 + +LBB5_23: + XORL AX, AX + +LBB5_24: + LEAQ 1(AX), SI + CMPB 1(DI)(AX*1), $0 + MOVQ SI, AX + JNE LBB5_24 + JMP LBB5_25 + +LBB5_26: + MOVQ 40(SP), AX + LEAQ 8(AX), CX + MOVQ CX, 40(SP) + +LBB5_27: + MOVQ 0(AX), R8 + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0x4429fcc5; WORD $0x4024 // vmovaps %ymm0, $64(%rsp) + TESTQ R8, R8 + JE LBB5_28 + MOVQ R8, CX + NEGQ CX + LONG $0xc84c0f49 // cmovlq %r8, %rcx + LEAQ 94(SP), DI + +LBB5_30: + MOVQ CX, AX + IMULQ R13 + MOVQ DX, AX + SHRQ $63, AX + SARQ $2, DX + ADDQ AX, DX + LEAL 0(DX)(DX*1), AX + LEAL 0(AX)(AX*4), AX + MOVL CX, SI + SUBL AX, SI + ADDB $48, SI + MOVB SI, 0(DI) + ADDQ $9, CX + ADDQ $-1, DI + CMPQ CX, $18 + MOVQ DX, CX + JA LBB5_30 + TESTQ R8, R8 + JS LBB5_33 + ADDQ $1, DI + JMP LBB5_34 + +LBB5_36: + MOVQ 40(SP), AX + LEAQ 8(AX), CX + MOVQ CX, 40(SP) + MOVQ 0(AX), CX + TESTQ CX, CX + JNE LBB5_39 + +LBB5_38: + MOVB $48, 64(SP) + MOVL $1, SI + MOVQ R14, DI + +LBB5_43: + WORD $0xf8c5; BYTE $0x77 // vzeroupper + LONG $0x00511ee8; BYTE $0x00 // callq _write_syscall + XORL AX, AX + LEAQ 96(SP), BX + JMP LBB5_1 + +LBB5_20: + MOVQ 40(SP), AX + LEAQ 8(AX), CX + MOVQ CX, 40(SP) + MOVQ 0(AX), DI + CMPB 0(DI), $0 + JNE LBB5_23 + +LBB5_22: + XORL SI, SI + JMP LBB5_25 + +LBB5_28: + MOVB $48, 31(SP) + MOVL $1, SI + LEAQ 31(SP), DI + JMP LBB5_25 + +LBB5_33: + MOVB $45, 0(DI) + +LBB5_34: + XORL SI, SI + +LBB5_35: + CMPB 1(DI)(SI*1), $0 + LEAQ 1(SI), SI + JNE LBB5_35 + JMP LBB5_25 + +LBB5_44: + TESTQ AX, AX + JE LBB5_50 + MOVB $0, 96(SP)(AX*1) + CMPB 96(SP), $0 + JE LBB5_46 + XORL AX, AX + +LBB5_48: + LEAQ 1(AX), SI + CMPB 97(SP)(AX*1), $0 + MOVQ SI, AX + JNE LBB5_48 + JMP LBB5_49 + +LBB5_46: + XORL SI, SI + +LBB5_49: + LEAQ 96(SP), DI + WORD $0xf8c5; BYTE $0x77 // vzeroupper + LONG $0x005096e8; BYTE $0x00 // callq _write_syscall + +LBB5_50: + LEAQ -40(BP), SP + BYTE $0x5b // popq %rbx + WORD $0x5c41 // popq %r12 + WORD $0x5d41 // popq %r13 + WORD $0x5e41 // popq %r14 + WORD $0x5f41 // popq %r15 + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + RET + +LCPI6_0: QUAD $0x2020202020202020; QUAD $0x2020202020202020 // .space 16, ' ' QUAD $0x2020202020202020; QUAD $0x2020202020202020 // .space 16, ' ' -LCPI5_1: +LCPI6_1: QUAD $0x2222222222222222; QUAD $0x2222222222222222 // .space 16, '""""""""""""""""' QUAD $0x2222222222222222; QUAD $0x2222222222222222 // .space 16, '""""""""""""""""' -LCPI5_2: +LCPI6_2: QUAD $0x5c5c5c5c5c5c5c5c; QUAD $0x5c5c5c5c5c5c5c5c // .space 16, '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' QUAD $0x5c5c5c5c5c5c5c5c; QUAD $0x5c5c5c5c5c5c5c5c // .space 16, '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -LCPI5_3: +LCPI6_3: QUAD $0x2020202020202020; QUAD $0x2020202020202020 // .space 16, ' ' -LCPI5_4: +LCPI6_4: QUAD $0x2222222222222222; QUAD $0x2222222222222222 // .space 16, '""""""""""""""""' -LCPI5_5: +LCPI6_5: QUAD $0x5c5c5c5c5c5c5c5c; QUAD $0x5c5c5c5c5c5c5c5c // .space 16, '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' _quote: @@ -1468,584 +1769,602 @@ _quote: WORD $0x5441 // pushq %r12 BYTE $0x53 // pushq %rbx SUBQ $40, SP - ANDL $1, R8 - TESTQ R8, R8 - LONG $0x911d8d4c; WORD $0x00a9; BYTE $0x00 // leaq $43409(%rip), %r11 /* __SingleQuoteTab(%rip) */ - LONG $0x8a358d4c; WORD $0x00b9; BYTE $0x00 // leaq $47498(%rip), %r14 /* __DoubleQuoteTab(%rip) */ - LONG $0xf3440f4d // cmoveq %r11, %r14 - MOVQ CX, R10 - MOVQ SI, R9 - ORQ $6, R8 - IMULQ SI, R8 - CMPQ R8, CX - JBE LBB5_1 - MOVQ DX, R12 - MOVQ DI, R8 - TESTQ R9, R9 - JE LBB5_118 - MOVQ 0(R10), R11 - QUAD $0xffffff150d6f7ec5 // vmovdqu $-235(%rip), %ymm9 /* LCPI5_0(%rip) */ - QUAD $0xffffff2d156f7ec5 // vmovdqu $-211(%rip), %ymm10 /* LCPI5_1(%rip) */ - QUAD $0xffffff451d6f7ec5 // vmovdqu $-187(%rip), %ymm11 /* LCPI5_2(%rip) */ + MOVQ CX, R13 + MOVQ SI, R10 + MOVQ 0(CX), SI + TESTB $1, R8 + LONG $0xd8058d4c; WORD $0x00a9; BYTE $0x00 // leaq $43480(%rip), %r8 /* __SingleQuoteTab(%rip) */ + LONG $0xd13d8d4c; WORD $0x00b9; BYTE $0x00 // leaq $47569(%rip), %r15 /* __DoubleQuoteTab(%rip) */ + LONG $0xf8440f4d // cmoveq %r8, %r15 + LEAQ 0(R10*8), AX + CMPQ SI, AX + JGE LBB6_96 + MOVQ DX, R11 + MOVQ DI, R12 + TESTQ R10, R10 + JE LBB6_118 + MOVQ SI, R14 + QUAD $0xffffff150d6f7ec5 // vmovdqu $-235(%rip), %ymm9 /* LCPI6_0(%rip) */ + QUAD $0xffffff2d156f7ec5 // vmovdqu $-211(%rip), %ymm10 /* LCPI6_1(%rip) */ + QUAD $0xffffff451d6f7ec5 // vmovdqu $-187(%rip), %ymm11 /* LCPI6_2(%rip) */ LONG $0x763d41c4; BYTE $0xc0 // vpcmpeqd %ymm8, %ymm8, %ymm8 MOVQ DI, AX MOVQ DX, -80(BP) - MOVQ DX, R12 - MOVQ R14, -48(BP) + MOVQ DX, R11 + MOVQ R15, -64(BP) -LBB5_12: - MOVQ AX, R8 - CMPQ R9, $32 +LBB6_3: + CMPQ R10, $32 SETGE CX - MOVQ R11, R14 - MOVQ R12, SI - MOVQ R9, R15 - MOVQ AX, R13 - JL LBB5_32 - CMPQ R11, $32 - JL LBB5_32 - XORL SI, SI - MOVQ R9, DX - MOVQ R11, BX + MOVQ R14, -56(BP) + MOVQ AX, -48(BP) + MOVQ R13, DX + JL LBB6_15 + MOVQ R14, R13 + MOVQ AX, R9 + MOVQ R11, R12 + MOVQ R10, R15 + CMPQ R14, $32 + JL LBB6_10 + XORL R12, R12 + MOVQ R10, AX + MOVQ -56(BP), BX -LBB5_15: - LONG $0x6f7ec1c4; WORD $0x3004 // vmovdqu (%r8,%rsi), %ymm0 +LBB6_6: + MOVQ -48(BP), CX + LONG $0x6f7ea1c4; WORD $0x2104 // vmovdqu (%rcx,%r12), %ymm0 LONG $0xc864b5c5 // vpcmpgtb %ymm0, %ymm9, %ymm1 LONG $0xd074adc5 // vpcmpeqb %ymm0, %ymm10, %ymm2 LONG $0xd874a5c5 // vpcmpeqb %ymm0, %ymm11, %ymm3 LONG $0xd2ebe5c5 // vpor %ymm2, %ymm3, %ymm2 - LONG $0x7f7ec1c4; WORD $0x3404 // vmovdqu %ymm0, (%r12,%rsi) + LONG $0x7f7e81c4; WORD $0x2304 // vmovdqu %ymm0, (%r11,%r12) LONG $0x647dc1c4; BYTE $0xc0 // vpcmpgtb %ymm8, %ymm0, %ymm0 LONG $0xc0dbf5c5 // vpand %ymm0, %ymm1, %ymm0 LONG $0xc0ebedc5 // vpor %ymm0, %ymm2, %ymm0 - LONG $0xc0d7fdc5 // vpmovmskb %ymm0, %eax - TESTL AX, AX - JNE LBB5_16 - LEAQ -32(DX), R15 - LEAQ -32(BX), R14 - ADDQ $32, SI - CMPQ DX, $64 + LONG $0xc8d7fdc5 // vpmovmskb %ymm0, %ecx + TESTL CX, CX + JNE LBB6_14 + LEAQ -32(AX), R15 + LEAQ -32(BX), R13 + ADDQ $32, R12 + CMPQ AX, $64 SETGE CX - JL LBB5_31 - MOVQ R15, DX + JL LBB6_9 + MOVQ R15, AX CMPQ BX, $63 - MOVQ R14, BX - JG LBB5_15 + MOVQ R13, BX + JG LBB6_6 -LBB5_31: - LEAQ 0(R8)(SI*1), R13 - ADDQ R12, SI +LBB6_9: + MOVQ -48(BP), AX + LEAQ 0(AX)(R12*1), R9 + ADDQ R11, R12 -LBB5_32: +LBB6_10: TESTB CX, CX - MOVQ R8, -64(BP) - JE LBB5_58 - LONG $0x6f7ec1c4; WORD $0x0045 // vmovdqu (%r13), %ymm0 - LONG $0xc864b5c5 // vpcmpgtb %ymm0, %ymm9, %ymm1 - LONG $0xd074adc5 // vpcmpeqb %ymm0, %ymm10, %ymm2 - LONG $0xd874a5c5 // vpcmpeqb %ymm0, %ymm11, %ymm3 - LONG $0xd2ebe5c5 // vpor %ymm2, %ymm3, %ymm2 - LONG $0x647dc1c4; BYTE $0xc0 // vpcmpgtb %ymm8, %ymm0, %ymm0 - LONG $0xc0dbf5c5 // vpand %ymm0, %ymm1, %ymm0 - LONG $0xc0ebedc5 // vpor %ymm0, %ymm2, %ymm0 - LONG $0xc0d7fdc5 // vpmovmskb %ymm0, %eax - MOVQ $4294967296, CX - ORQ CX, AX - BSFQ AX, CX - LONG $0x6f7ac1c4; WORD $0x0045 // vmovdqu (%r13), %xmm0 - LONG $0x16f9e3c4; WORD $0x01c0 // vpextrq $1, %xmm0, %rax - LONG $0x7ef9e1c4; BYTE $0xc2 // vmovq %xmm0, %rdx - CMPQ CX, R14 - JLE LBB5_34 - MOVQ R9, BX - CMPQ R14, $16 - JB LBB5_47 - MOVQ DX, 0(SI) - MOVQ AX, 8(SI) - LEAQ 16(R13), R9 - ADDQ $16, SI - LEAQ -16(R14), CX - JMP LBB5_49 + JE LBB6_16 + +LBB6_11: + LONG $0x6f7ec1c4; BYTE $0x01 // vmovdqu (%r9), %ymm0 + LONG $0xc864b5c5 // vpcmpgtb %ymm0, %ymm9, %ymm1 + LONG $0xd074adc5 // vpcmpeqb %ymm0, %ymm10, %ymm2 + LONG $0xd874a5c5 // vpcmpeqb %ymm0, %ymm11, %ymm3 + LONG $0xd2ebe5c5 // vpor %ymm2, %ymm3, %ymm2 + LONG $0x647dc1c4; BYTE $0xc0 // vpcmpgtb %ymm8, %ymm0, %ymm0 + LONG $0xc0dbf5c5 // vpand %ymm0, %ymm1, %ymm0 + LONG $0xc0ebedc5 // vpor %ymm0, %ymm2, %ymm0 + LONG $0xc0d7fdc5 // vpmovmskb %ymm0, %eax + MOVQ $4294967296, CX + ORQ CX, AX + BSFQ AX, R8 + LONG $0x6f7ac1c4; BYTE $0x01 // vmovdqu (%r9), %xmm0 + LONG $0x16f9e3c4; WORD $0x01c0 // vpextrq $1, %xmm0, %rax + LONG $0x7ef9e1c4; BYTE $0xc1 // vmovq %xmm0, %rcx + CMPQ R8, R13 + JLE LBB6_22 + CMPQ R13, $16 + JB LBB6_37 + MOVQ CX, 0(R12) + MOVQ AX, 8(R12) + LEAQ 16(R9), BX + ADDQ $16, R12 + LEAQ -16(R13), CX + CMPQ CX, $8 + JAE LBB6_52 + +LBB6_39: + CMPQ CX, $4 + JB LBB6_40 + +LBB6_53: + MOVL 0(BX), AX + MOVL AX, 0(R12) + ADDQ $4, BX + ADDQ $4, R12 + ADDQ $-4, CX + CMPQ CX, $2 + JAE LBB6_54 + +LBB6_41: + TESTQ CX, CX + JE LBB6_43 -LBB5_58: +LBB6_42: + MOVB 0(BX), AX + MOVB AX, 0(R12) + +LBB6_43: + ADDQ R9, R13 + NOTQ R13 + MOVQ -48(BP), R12 + ADDQ R12, R13 + MOVQ R13, R9 + MOVQ -64(BP), R15 + JMP LBB6_44 + +LBB6_14: + BSFL CX, R9 + ADDQ R12, R9 + MOVQ -64(BP), R15 + MOVQ -48(BP), R12 + +LBB6_44: + MOVQ -56(BP), R14 + MOVQ DX, R13 + JMP LBB6_79 + +LBB6_15: + MOVQ R14, R13 + MOVQ AX, R9 + MOVQ R11, R12 + MOVQ R10, R15 + TESTB CX, CX + JNE LBB6_11 + +LBB6_16: WORD $0xf8c5; BYTE $0x77 // vzeroupper CMPQ R15, $16 - SETGE CX - MOVQ R10, -56(BP) - MOVQ R9, -72(BP) - JL LBB5_59 - CMPQ R14, $16 - JL LBB5_59 - SUBQ R13, R8 - QUAD $0xfffffdc30d6f7ec5 // vmovdqu $-573(%rip), %ymm9 /* LCPI5_0(%rip) */ - QUAD $0xfffffddb156f7ec5 // vmovdqu $-549(%rip), %ymm10 /* LCPI5_1(%rip) */ - QUAD $0xfffffdf31d6f7ec5 // vmovdqu $-525(%rip), %ymm11 /* LCPI5_2(%rip) */ + SETGE R8 + MOVQ DX, -72(BP) + JL LBB6_24 + CMPQ R13, $16 + JL LBB6_24 + MOVQ R10, BX + MOVQ -48(BP), R10 + SUBQ R9, R10 + QUAD $0xfffffd310d6f7ec5 // vmovdqu $-719(%rip), %ymm9 /* LCPI6_0(%rip) */ + QUAD $0xfffffd49156f7ec5 // vmovdqu $-695(%rip), %ymm10 /* LCPI6_1(%rip) */ + QUAD $0xfffffd611d6f7ec5 // vmovdqu $-671(%rip), %ymm11 /* LCPI6_2(%rip) */ LONG $0x763d41c4; BYTE $0xc0 // vpcmpeqd %ymm8, %ymm8, %ymm8 - QUAD $0xfffffe063d6ffac5 // vmovdqu $-506(%rip), %xmm7 /* LCPI5_3(%rip) */ - QUAD $0xfffffe0e256ffac5 // vmovdqu $-498(%rip), %xmm4 /* LCPI5_4(%rip) */ - QUAD $0xfffffe162d6ffac5 // vmovdqu $-490(%rip), %xmm5 /* LCPI5_5(%rip) */ + QUAD $0xfffffd743d6ffac5 // vmovdqu $-652(%rip), %xmm7 /* LCPI6_3(%rip) */ + QUAD $0xfffffd7c256ffac5 // vmovdqu $-644(%rip), %xmm4 /* LCPI6_4(%rip) */ + QUAD $0xfffffd842d6ffac5 // vmovdqu $-636(%rip), %xmm5 /* LCPI6_5(%rip) */ LONG $0xf676c9c5 // vpcmpeqd %xmm6, %xmm6, %xmm6 -LBB5_62: - LONG $0x6f7ac1c4; WORD $0x0045 // vmovdqu (%r13), %xmm0 +LBB6_19: + LONG $0x6f7ac1c4; BYTE $0x01 // vmovdqu (%r9), %xmm0 LONG $0xc864c1c5 // vpcmpgtb %xmm0, %xmm7, %xmm1 LONG $0xd474f9c5 // vpcmpeqb %xmm4, %xmm0, %xmm2 LONG $0xdd74f9c5 // vpcmpeqb %xmm5, %xmm0, %xmm3 LONG $0xd2ebe1c5 // vpor %xmm2, %xmm3, %xmm2 - LONG $0x067ffac5 // vmovdqu %xmm0, (%rsi) + LONG $0x7f7ac1c4; WORD $0x2404 // vmovdqu %xmm0, (%r12) LONG $0xc664f9c5 // vpcmpgtb %xmm6, %xmm0, %xmm0 LONG $0xc0dbf1c5 // vpand %xmm0, %xmm1, %xmm0 LONG $0xc0ebe9c5 // vpor %xmm0, %xmm2, %xmm0 LONG $0xc0d7f9c5 // vpmovmskb %xmm0, %eax TESTL AX, AX - JNE LBB5_63 - ADDQ $16, R13 - ADDQ $16, SI - LEAQ -16(R15), R9 - LEAQ -16(R14), R10 + JNE LBB6_36 + ADDQ $16, R9 + ADDQ $16, R12 + LEAQ -16(R15), AX + LEAQ -16(R13), DX CMPQ R15, $32 - SETGE CX - JL LBB5_66 - ADDQ $-16, R8 - MOVQ R9, R15 - CMPQ R14, $31 - MOVQ R10, R14 - JG LBB5_62 - JMP LBB5_66 + SETGE R8 + JL LBB6_25 + ADDQ $-16, R10 + MOVQ AX, R15 + CMPQ R13, $31 + MOVQ DX, R13 + JG LBB6_19 + JMP LBB6_25 + +LBB6_22: + CMPL R8, $16 + JB LBB6_45 + MOVQ CX, 0(R12) + MOVQ AX, 8(R12) + LEAQ 16(R9), CX + ADDQ $16, R12 + LEAQ -16(R8), BX + MOVQ -64(BP), R15 + MOVQ DX, R13 + CMPQ BX, $8 + JAE LBB6_55 -LBB5_16: - BSFL AX, R13 - ADDQ SI, R13 - MOVQ -48(BP), R14 - MOVQ $12884901889, R15 - JMP LBB5_101 +LBB6_47: + CMPQ BX, $4 + JB LBB6_48 -LBB5_34: - MOVQ R9, R8 - CMPL CX, $16 - JB LBB5_35 - MOVQ DX, 0(SI) - MOVQ AX, 8(SI) - LEAQ 16(R13), R9 - ADDQ $16, SI - LEAQ -16(CX), BX - JMP LBB5_37 - -LBB5_59: - MOVQ R14, R10 - MOVQ R15, R9 - QUAD $0xfffffce50d6f7ec5 // vmovdqu $-795(%rip), %ymm9 /* LCPI5_0(%rip) */ - QUAD $0xfffffcfd156f7ec5 // vmovdqu $-771(%rip), %ymm10 /* LCPI5_1(%rip) */ - QUAD $0xfffffd151d6f7ec5 // vmovdqu $-747(%rip), %ymm11 /* LCPI5_2(%rip) */ +LBB6_56: + MOVL 0(CX), AX + MOVL AX, 0(R12) + ADDQ $4, CX + ADDQ $4, R12 + ADDQ $-4, BX + CMPQ BX, $2 + JAE LBB6_57 + +LBB6_49: + TESTQ BX, BX + JE LBB6_51 + +LBB6_50: + MOVB 0(CX), AX + MOVB AX, 0(R12) + +LBB6_51: + MOVQ -48(BP), R12 + SUBQ R12, R9 + ADDQ R8, R9 + JMP LBB6_78 + +LBB6_24: + MOVQ R10, BX + MOVQ R13, DX + MOVQ R15, AX + QUAD $0xfffffc190d6f7ec5 // vmovdqu $-999(%rip), %ymm9 /* LCPI6_0(%rip) */ + QUAD $0xfffffc31156f7ec5 // vmovdqu $-975(%rip), %ymm10 /* LCPI6_1(%rip) */ + QUAD $0xfffffc491d6f7ec5 // vmovdqu $-951(%rip), %ymm11 /* LCPI6_2(%rip) */ LONG $0x763d41c4; BYTE $0xc0 // vpcmpeqd %ymm8, %ymm8, %ymm8 - QUAD $0xfffffd283d6ffac5 // vmovdqu $-728(%rip), %xmm7 /* LCPI5_3(%rip) */ - QUAD $0xfffffd30256ffac5 // vmovdqu $-720(%rip), %xmm4 /* LCPI5_4(%rip) */ - QUAD $0xfffffd382d6ffac5 // vmovdqu $-712(%rip), %xmm5 /* LCPI5_5(%rip) */ + QUAD $0xfffffc5c3d6ffac5 // vmovdqu $-932(%rip), %xmm7 /* LCPI6_3(%rip) */ + QUAD $0xfffffc64256ffac5 // vmovdqu $-924(%rip), %xmm4 /* LCPI6_4(%rip) */ + QUAD $0xfffffc6c2d6ffac5 // vmovdqu $-916(%rip), %xmm5 /* LCPI6_5(%rip) */ LONG $0xf676c9c5 // vpcmpeqd %xmm6, %xmm6, %xmm6 -LBB5_66: - TESTB CX, CX - JE LBB5_67 - LONG $0x6f7ac1c4; WORD $0x0045 // vmovdqu (%r13), %xmm0 - LONG $0xc864c1c5 // vpcmpgtb %xmm0, %xmm7, %xmm1 - LONG $0xd474f9c5 // vpcmpeqb %xmm4, %xmm0, %xmm2 - LONG $0xdd74f9c5 // vpcmpeqb %xmm5, %xmm0, %xmm3 - LONG $0xd2ebe1c5 // vpor %xmm2, %xmm3, %xmm2 - LONG $0xde64f9c5 // vpcmpgtb %xmm6, %xmm0, %xmm3 - LONG $0xcbdbf1c5 // vpand %xmm3, %xmm1, %xmm1 - LONG $0xc9ebe9c5 // vpor %xmm1, %xmm2, %xmm1 - LONG $0xc1d7f9c5 // vpmovmskb %xmm1, %eax +LBB6_25: + TESTB R8, R8 + JE LBB6_29 + LONG $0x6f7ac1c4; BYTE $0x01 // vmovdqu (%r9), %xmm0 + LONG $0xc864c1c5 // vpcmpgtb %xmm0, %xmm7, %xmm1 + LONG $0xd474f9c5 // vpcmpeqb %xmm4, %xmm0, %xmm2 + LONG $0xdd74f9c5 // vpcmpeqb %xmm5, %xmm0, %xmm3 + LONG $0xd2ebe1c5 // vpor %xmm2, %xmm3, %xmm2 + LONG $0xde64f9c5 // vpcmpgtb %xmm6, %xmm0, %xmm3 + LONG $0xcbdbf1c5 // vpand %xmm3, %xmm1, %xmm1 + LONG $0xc9ebe9c5 // vpor %xmm1, %xmm2, %xmm1 + LONG $0xc1d7f9c5 // vpmovmskb %xmm1, %eax ORL $65536, AX BSFL AX, R8 - LONG $0x7ef9e1c4; BYTE $0xc0 // vmovq %xmm0, %rax - CMPQ R10, R8 - MOVQ $12884901889, R15 - JGE LBB5_74 - CMPQ R10, $8 - MOVQ -72(BP), R9 - MOVQ -48(BP), R14 - JB LBB5_85 - MOVQ AX, 0(SI) - LEAQ 8(R13), R8 - ADDQ $8, SI - LEAQ -8(R10), CX - JMP LBB5_87 - -LBB5_67: - TESTQ R9, R9 - LONG $0x79358d4c; WORD $0x00a6; BYTE $0x00 // leaq $42617(%rip), %r14 /* __SingleQuoteTab(%rip) */ - MOVQ $12884901889, R15 - JLE LBB5_68 - TESTQ R10, R10 - JLE LBB5_68 - XORL DX, DX - XORL CX, CX - MOVQ -64(BP), R8 + LONG $0x7ef9e1c4; BYTE $0xc0 // vmovq %xmm0, %rax + CMPQ DX, R8 + MOVQ -64(BP), R15 + JGE LBB6_58 + CMPQ DX, $8 + MOVQ -72(BP), R13 + JB LBB6_66 + MOVQ AX, 0(R12) + LEAQ 8(R9), R8 + ADDQ $8, R12 + LEAQ -8(DX), CX + JMP LBB6_67 -LBB5_71: - MOVBLZX 0(R13)(DX*1), BX - MOVQ BX, AX - SHLQ $4, AX - CMPQ 0(AX)(R14*1), $0 - JNE LBB5_72 - LEAQ 0(R9)(CX*1), R8 - MOVB BX, 0(SI)(DX*1) - LEAQ -1(CX), AX +LBB6_29: + TESTQ AX, AX + MOVQ -64(BP), R15 + JLE LBB6_60 + TESTQ DX, DX + JLE LBB6_60 + MOVQ AX, R14 + XORL AX, AX + XORL SI, SI + MOVQ BX, R10 + +LBB6_32: + MOVQ DX, R13 + MOVBLZX 0(R9)(AX*1), CX + MOVQ CX, BX + SHLQ $4, BX + LONG $0xd7158d48; WORD $0x00a5; BYTE $0x00 // leaq $42455(%rip), %rdx /* __SingleQuoteTab(%rip) */ + CMPQ 0(BX)(DX*1), $0 + JNE LBB6_63 + LEAQ 0(R14)(SI*1), R8 + MOVB CX, 0(R12)(AX*1) + LEAQ -1(SI), BX CMPQ R8, $2 - MOVQ -64(BP), R8 - JL LBB5_96 - ADDQ R10, CX - ADDQ $1, DX - CMPQ CX, $1 - MOVQ AX, CX - JG LBB5_71 + JL LBB6_35 + MOVQ R13, DX + ADDQ R13, SI + ADDQ $1, AX + CMPQ SI, $1 + MOVQ BX, SI + JG LBB6_32 -LBB5_96: - SUBQ AX, R13 - ADDQ AX, R9 - MOVQ -56(BP), R10 - MOVQ -48(BP), R14 - JMP LBB5_97 +LBB6_35: + SUBQ BX, R9 + MOVQ R14, AX + ADDQ BX, AX + JMP LBB6_61 -LBB5_63: +LBB6_36: BSFW AX, AX - MOVWLZX AX, R13 - SUBQ R8, R13 - MOVQ -56(BP), R10 - MOVQ -72(BP), R9 - MOVQ -48(BP), R14 - MOVQ -64(BP), R8 - MOVQ $12884901889, R15 - JMP LBB5_101 - -LBB5_47: - MOVQ R13, R9 - MOVQ R14, CX - -LBB5_49: - MOVQ $12884901889, R15 + MOVWLZX AX, R9 + SUBQ R10, R9 + MOVQ -72(BP), R13 + MOVQ BX, R10 + MOVQ -64(BP), R15 + MOVQ -48(BP), R12 + JMP LBB6_78 + +LBB6_37: + MOVQ R9, BX + MOVQ R13, CX CMPQ CX, $8 - JAE LBB5_50 - CMPQ CX, $4 - JAE LBB5_52 - -LBB5_53: - CMPQ CX, $2 - JAE LBB5_54 - -LBB5_55: - TESTQ CX, CX - JE LBB5_57 + JB LBB6_39 -LBB5_56: - MOVB 0(R9), AX - MOVB AX, 0(SI) - -LBB5_57: - ADDQ R13, R14 - NOTQ R14 - ADDQ R8, R14 - MOVQ R14, R13 - MOVQ BX, R9 - MOVQ -48(BP), R14 - JMP LBB5_101 - -LBB5_35: - MOVQ R13, R9 - MOVQ CX, BX - -LBB5_37: - MOVQ -48(BP), R14 - MOVQ $12884901889, R15 - CMPQ BX, $8 - JAE LBB5_38 - CMPQ BX, $4 - JAE LBB5_40 - -LBB5_41: - CMPQ BX, $2 - JAE LBB5_42 - -LBB5_43: - TESTQ BX, BX - JE LBB5_45 - -LBB5_44: - MOVB 0(R9), AX - MOVB AX, 0(SI) - -LBB5_45: - MOVQ -64(BP), AX - SUBQ AX, R13 - ADDQ CX, R13 - MOVQ R8, R9 - MOVQ AX, R8 - JMP LBB5_101 - -LBB5_50: - MOVQ 0(R9), AX - MOVQ AX, 0(SI) - ADDQ $8, R9 - ADDQ $8, SI +LBB6_52: + MOVQ 0(BX), AX + MOVQ AX, 0(R12) + ADDQ $8, BX + ADDQ $8, R12 ADDQ $-8, CX CMPQ CX, $4 - JB LBB5_53 + JAE LBB6_53 -LBB5_52: - MOVL 0(R9), AX - MOVL AX, 0(SI) - ADDQ $4, R9 - ADDQ $4, SI - ADDQ $-4, CX +LBB6_40: CMPQ CX, $2 - JB LBB5_55 + JB LBB6_41 -LBB5_54: - MOVWLZX 0(R9), AX - MOVW AX, 0(SI) - ADDQ $2, R9 - ADDQ $2, SI +LBB6_54: + MOVWLZX 0(BX), AX + MOVW AX, 0(R12) + ADDQ $2, BX + ADDQ $2, R12 ADDQ $-2, CX TESTQ CX, CX - JNE LBB5_56 - JMP LBB5_57 + JNE LBB6_42 + JMP LBB6_43 -LBB5_38: - MOVQ 0(R9), AX - MOVQ AX, 0(SI) - ADDQ $8, R9 - ADDQ $8, SI +LBB6_45: + MOVQ R9, CX + MOVQ R8, BX + MOVQ -64(BP), R15 + MOVQ DX, R13 + CMPQ BX, $8 + JB LBB6_47 + +LBB6_55: + MOVQ 0(CX), AX + MOVQ AX, 0(R12) + ADDQ $8, CX + ADDQ $8, R12 ADDQ $-8, BX CMPQ BX, $4 - JB LBB5_41 + JAE LBB6_56 -LBB5_40: - MOVL 0(R9), AX - MOVL AX, 0(SI) - ADDQ $4, R9 - ADDQ $4, SI - ADDQ $-4, BX +LBB6_48: CMPQ BX, $2 - JB LBB5_43 + JB LBB6_49 -LBB5_42: - MOVWLZX 0(R9), AX - MOVW AX, 0(SI) - ADDQ $2, R9 - ADDQ $2, SI +LBB6_57: + MOVWLZX 0(CX), AX + MOVW AX, 0(R12) + ADDQ $2, CX + ADDQ $2, R12 ADDQ $-2, BX TESTQ BX, BX - JNE LBB5_44 - JMP LBB5_45 + JNE LBB6_50 + JMP LBB6_51 -LBB5_74: +LBB6_58: CMPL R8, $8 - MOVQ -48(BP), R14 - JB LBB5_75 - MOVQ AX, 0(SI) - LEAQ 8(R13), R9 - ADDQ $8, SI - LEAQ -8(R8), BX - JMP LBB5_77 + MOVQ -72(BP), R13 + JB LBB6_72 + MOVQ AX, 0(R12) + LEAQ 8(R9), R10 + ADDQ $8, R12 + LEAQ -8(R8), CX + JMP LBB6_73 -LBB5_68: - MOVQ -56(BP), R10 - MOVQ -48(BP), R14 - MOVQ -64(BP), R8 +LBB6_60: + MOVQ BX, R10 -LBB5_97: - TESTQ R9, R9 - JE LBB5_98 - NOTQ R13 - ADDQ R8, R13 - JMP LBB5_100 - -LBB5_72: - SUBQ R8, R13 - SUBQ CX, R13 - MOVQ -56(BP), R10 - MOVQ -72(BP), R9 - MOVQ -48(BP), R14 - JMP LBB5_101 +LBB6_61: + MOVQ -48(BP), R12 + MOVQ -56(BP), R14 + TESTQ AX, AX + JE LBB6_64 + NOTQ R9 + ADDQ R12, R9 + JMP LBB6_65 + +LBB6_63: + MOVQ -48(BP), R12 + SUBQ R12, R9 + SUBQ SI, R9 + MOVQ -72(BP), R13 + JMP LBB6_78 -LBB5_98: - SUBQ R8, R13 - JMP LBB5_100 +LBB6_64: + SUBQ R12, R9 -LBB5_85: - MOVQ R13, R8 - MOVQ R10, CX +LBB6_65: + MOVQ -72(BP), R13 + JMP LBB6_79 + +LBB6_66: + MOVQ R9, R8 + MOVQ DX, CX -LBB5_87: +LBB6_67: + MOVQ BX, R10 CMPQ CX, $4 - JAE LBB5_88 + JB LBB6_68 + MOVL 0(R8), AX + MOVL AX, 0(R12) + ADDQ $4, R8 + ADDQ $4, R12 + ADDQ $-4, CX CMPQ CX, $2 - JAE LBB5_90 + JAE LBB6_93 -LBB5_91: +LBB6_69: TESTQ CX, CX - JE LBB5_93 + JE LBB6_71 -LBB5_92: +LBB6_70: MOVB 0(R8), AX - MOVB AX, 0(SI) + MOVB AX, 0(R12) -LBB5_93: - ADDQ R13, R10 - NOTQ R10 - MOVQ -64(BP), R8 - ADDQ R8, R10 - MOVQ R10, R13 - MOVQ -56(BP), R10 - JMP LBB5_101 +LBB6_71: + ADDQ R9, DX + NOTQ DX + MOVQ -48(BP), R12 + ADDQ R12, DX + MOVQ DX, R9 + JMP LBB6_78 -LBB5_75: - MOVQ R13, R9 - MOVQ R8, BX +LBB6_68: + CMPQ CX, $2 + JB LBB6_69 -LBB5_77: - MOVQ -56(BP), R10 - CMPQ BX, $4 - JAE LBB5_78 - CMPQ BX, $2 - JAE LBB5_80 +LBB6_93: + MOVWLZX 0(R8), AX + MOVW AX, 0(R12) + ADDQ $2, R8 + ADDQ $2, R12 + ADDQ $-2, CX + TESTQ CX, CX + JNE LBB6_70 + JMP LBB6_71 -LBB5_81: - TESTQ BX, BX - JE LBB5_83 +LBB6_72: + MOVQ R9, R10 + MOVQ R8, CX -LBB5_82: - MOVB 0(R9), AX - MOVB AX, 0(SI) +LBB6_73: + CMPQ CX, $4 + JB LBB6_74 + MOVL 0(R10), AX + MOVL AX, 0(R12) + ADDQ $4, R10 + ADDQ $4, R12 + ADDQ $-4, CX + CMPQ CX, $2 + JAE LBB6_95 -LBB5_83: - MOVQ -64(BP), AX - SUBQ AX, R13 - ADDQ R8, R13 - MOVQ AX, R8 +LBB6_75: + TESTQ CX, CX + JE LBB6_77 -LBB5_100: - MOVQ -72(BP), R9 +LBB6_76: + MOVB 0(R10), AX + MOVB AX, 0(R12) -LBB5_101: - TESTQ R13, R13 - JS LBB5_102 - ADDQ R13, R12 - CMPQ R9, R13 - JE LBB5_117 - SUBQ R13, R11 - JMP LBB5_105 - -LBB5_115: - ADDQ SI, R12 - ADDQ $1, R13 - CMPQ R9, R13 - JE LBB5_117 - -LBB5_105: - MOVBLZX 0(R8)(R13*1), DX - SHLQ $4, DX - MOVQ 0(R14)(DX*1), AX - TESTL AX, AX - JE LBB5_116 - MOVLQSX AX, SI - SUBQ SI, R11 - JL LBB5_107 - SHLQ $32, AX - LEAQ 0(R14)(DX*1), CX - ADDQ $8, CX - CMPQ AX, R15 - JL LBB5_109 - MOVL 0(CX), AX - MOVL AX, 0(R12) - LEAQ 0(R14)(DX*1), CX - ADDQ $12, CX - LEAQ 4(R12), BX - LEAQ -4(SI), DX - CMPQ DX, $2 - JGE LBB5_112 - JMP LBB5_113 - -LBB5_109: - MOVQ R12, BX - MOVQ SI, DX - CMPQ DX, $2 - JL LBB5_113 +LBB6_77: + MOVQ -48(BP), R12 + SUBQ R12, R9 + ADDQ R8, R9 + MOVQ BX, R10 -LBB5_112: - MOVWLZX 0(CX), AX - MOVW AX, 0(BX) - ADDQ $2, CX - ADDQ $2, BX - ADDQ $-2, DX +LBB6_78: + MOVQ -56(BP), R14 -LBB5_113: - TESTQ DX, DX - JLE LBB5_115 - MOVBLZX 0(CX), AX - MOVB AX, 0(BX) - JMP LBB5_115 - -LBB5_116: - LEAQ 0(R8)(R13*1), AX - SUBQ R13, R9 - JNE LBB5_12 - JMP LBB5_117 - -LBB5_88: - MOVL 0(R8), AX - MOVL AX, 0(SI) - ADDQ $4, R8 - ADDQ $4, SI - ADDQ $-4, CX - CMPQ CX, $2 - JB LBB5_91 +LBB6_79: + TESTQ R9, R9 + JS LBB6_121 + ADDQ R9, R11 + CMPQ R10, R9 + JE LBB6_117 + SUBQ R9, R14 + JMP LBB6_83 -LBB5_90: - MOVWLZX 0(R8), AX - MOVW AX, 0(SI) - ADDQ $2, R8 - ADDQ $2, SI - ADDQ $-2, CX - TESTQ CX, CX - JNE LBB5_92 - JMP LBB5_93 +LBB6_82: + ADDQ CX, R11 + ADDQ $1, R9 + CMPQ R10, R9 + JE LBB6_117 -LBB5_78: - MOVL 0(R9), AX - MOVL AX, 0(SI) - ADDQ $4, R9 - ADDQ $4, SI - ADDQ $-4, BX +LBB6_83: + MOVBLZX 0(R12)(R9*1), AX + SHLQ $4, AX + MOVQ 0(R15)(AX*1), BX + TESTL BX, BX + JE LBB6_91 + MOVLQSX BX, CX + SUBQ CX, R14 + JL LBB6_119 + SHLQ $32, BX + LEAQ 0(R15)(AX*1), SI + ADDQ $8, SI + MOVQ $12884901889, DX + CMPQ BX, DX + JL LBB6_87 + MOVL 0(SI), SI + MOVL SI, 0(R11) + LEAQ 0(R15)(AX*1), SI + ADDQ $12, SI + LEAQ 4(R11), R8 + LEAQ -4(CX), BX + CMPQ BX, $2 + JGE LBB6_88 + JMP LBB6_89 + +LBB6_87: + MOVQ R11, R8 + MOVQ CX, BX CMPQ BX, $2 - JB LBB5_81 + JL LBB6_89 -LBB5_80: - MOVWLZX 0(R9), AX - MOVW AX, 0(SI) - ADDQ $2, R9 +LBB6_88: + MOVWLZX 0(SI), AX + MOVW AX, 0(R8) ADDQ $2, SI + ADDQ $2, R8 ADDQ $-2, BX + +LBB6_89: TESTQ BX, BX - JNE LBB5_82 - JMP LBB5_83 + JLE LBB6_82 + MOVBLZX 0(SI), AX + MOVB AX, 0(R8) + JMP LBB6_82 -LBB5_1: - QUAD $0xfffff8500d6f7ec5 // vmovdqu $-1968(%rip), %ymm9 /* LCPI5_0(%rip) */ - QUAD $0xfffff868156f7ec5 // vmovdqu $-1944(%rip), %ymm10 /* LCPI5_1(%rip) */ - QUAD $0xfffff8801d6f7ec5 // vmovdqu $-1920(%rip), %ymm11 /* LCPI5_2(%rip) */ +LBB6_91: + LEAQ 0(R12)(R9*1), AX + SUBQ R9, R10 + JNE LBB6_3 + JMP LBB6_117 + +LBB6_74: + CMPQ CX, $2 + JB LBB6_75 + +LBB6_95: + MOVWLZX 0(R10), AX + MOVW AX, 0(R12) + ADDQ $2, R10 + ADDQ $2, R12 + ADDQ $-2, CX + TESTQ CX, CX + JNE LBB6_76 + JMP LBB6_77 + +LBB6_96: + QUAD $0xfffff82f0d6f7ec5 // vmovdqu $-2001(%rip), %ymm9 /* LCPI6_0(%rip) */ + QUAD $0xfffff847156f7ec5 // vmovdqu $-1977(%rip), %ymm10 /* LCPI6_1(%rip) */ + QUAD $0xfffff85f1d6f7ec5 // vmovdqu $-1953(%rip), %ymm11 /* LCPI6_2(%rip) */ LONG $0x763d41c4; BYTE $0xc0 // vpcmpeqd %ymm8, %ymm8, %ymm8 MOVQ DX, SI - MOVQ R9, BX + MOVQ R10, BX -LBB5_2: +LBB6_97: CMPQ BX, $32 - JL LBB5_19 - MOVQ BX, R8 - NEGQ R8 + JL LBB6_102 + MOVQ BX, CX + NEGQ CX ADDQ $32, BX -LBB5_4: +LBB6_99: LONG $0x076ffec5 // vmovdqu (%rdi), %ymm0 LONG $0xc864b5c5 // vpcmpgtb %ymm0, %ymm9, %ymm1 LONG $0xd074adc5 // vpcmpeqb %ymm0, %ymm10, %ymm2 @@ -2057,33 +2376,33 @@ LBB5_4: LONG $0xc0ebedc5 // vpor %ymm0, %ymm2, %ymm0 LONG $0xc0d7fdc5 // vpmovmskb %ymm0, %eax TESTL AX, AX - JNE LBB5_5 + JNE LBB6_111 ADDQ $32, DI ADDQ $32, SI - ADDQ $32, R8 + ADDQ $32, CX ADDQ $-32, BX CMPQ BX, $63 - JG LBB5_4 - NEGQ R8 - MOVQ R8, BX + JG LBB6_99 + NEGQ CX + MOVQ CX, BX -LBB5_19: +LBB6_102: WORD $0xf8c5; BYTE $0x77 // vzeroupper CMPQ BX, $16 - JL LBB5_20 - MOVQ BX, R8 - NEGQ R8 + JL LBB6_107 + MOVQ BX, CX + NEGQ CX ADDQ $16, BX - QUAD $0xfffff7b90d6f7ec5 // vmovdqu $-2119(%rip), %ymm9 /* LCPI5_0(%rip) */ - QUAD $0xfffff7d1156f7ec5 // vmovdqu $-2095(%rip), %ymm10 /* LCPI5_1(%rip) */ - QUAD $0xfffff7e91d6f7ec5 // vmovdqu $-2071(%rip), %ymm11 /* LCPI5_2(%rip) */ + QUAD $0xfffff7980d6f7ec5 // vmovdqu $-2152(%rip), %ymm9 /* LCPI6_0(%rip) */ + QUAD $0xfffff7b0156f7ec5 // vmovdqu $-2128(%rip), %ymm10 /* LCPI6_1(%rip) */ + QUAD $0xfffff7c81d6f7ec5 // vmovdqu $-2104(%rip), %ymm11 /* LCPI6_2(%rip) */ LONG $0x763d41c4; BYTE $0xc0 // vpcmpeqd %ymm8, %ymm8, %ymm8 - QUAD $0xfffff7fc3d6ffac5 // vmovdqu $-2052(%rip), %xmm7 /* LCPI5_3(%rip) */ - QUAD $0xfffff804256ffac5 // vmovdqu $-2044(%rip), %xmm4 /* LCPI5_4(%rip) */ - QUAD $0xfffff80c2d6ffac5 // vmovdqu $-2036(%rip), %xmm5 /* LCPI5_5(%rip) */ + QUAD $0xfffff7db3d6ffac5 // vmovdqu $-2085(%rip), %xmm7 /* LCPI6_3(%rip) */ + QUAD $0xfffff7e3256ffac5 // vmovdqu $-2077(%rip), %xmm4 /* LCPI6_4(%rip) */ + QUAD $0xfffff7eb2d6ffac5 // vmovdqu $-2069(%rip), %xmm5 /* LCPI6_5(%rip) */ LONG $0xf676c9c5 // vpcmpeqd %xmm6, %xmm6, %xmm6 -LBB5_27: +LBB6_104: LONG $0x076ffac5 // vmovdqu (%rdi), %xmm0 LONG $0xc864c1c5 // vpcmpgtb %xmm0, %xmm7, %xmm1 LONG $0xd474f9c5 // vpcmpeqb %xmm4, %xmm0, %xmm2 @@ -2095,66 +2414,66 @@ LBB5_27: LONG $0xc0ebe9c5 // vpor %xmm0, %xmm2, %xmm0 LONG $0xc0d7f9c5 // vpmovmskb %xmm0, %eax TESTL AX, AX - JNE LBB5_28 + JNE LBB6_112 ADDQ $16, DI ADDQ $16, SI - ADDQ $16, R8 + ADDQ $16, CX ADDQ $-16, BX CMPQ BX, $31 - JG LBB5_27 - NEGQ R8 - MOVQ R8, BX - JMP LBB5_23 + JG LBB6_104 + NEGQ CX + MOVQ CX, BX + JMP LBB6_108 -LBB5_20: - QUAD $0xfffff7360d6f7ec5 // vmovdqu $-2250(%rip), %ymm9 /* LCPI5_0(%rip) */ - QUAD $0xfffff74e156f7ec5 // vmovdqu $-2226(%rip), %ymm10 /* LCPI5_1(%rip) */ - QUAD $0xfffff7661d6f7ec5 // vmovdqu $-2202(%rip), %ymm11 /* LCPI5_2(%rip) */ +LBB6_107: + QUAD $0xfffff7150d6f7ec5 // vmovdqu $-2283(%rip), %ymm9 /* LCPI6_0(%rip) */ + QUAD $0xfffff72d156f7ec5 // vmovdqu $-2259(%rip), %ymm10 /* LCPI6_1(%rip) */ + QUAD $0xfffff7451d6f7ec5 // vmovdqu $-2235(%rip), %ymm11 /* LCPI6_2(%rip) */ LONG $0x763d41c4; BYTE $0xc0 // vpcmpeqd %ymm8, %ymm8, %ymm8 -LBB5_23: +LBB6_108: TESTQ BX, BX - JLE LBB5_9 + JLE LBB6_116 -LBB5_24: +LBB6_109: MOVBLZX 0(DI), AX MOVQ AX, CX SHLQ $4, CX - CMPQ 0(CX)(R11*1), $0 - JNE LBB5_7 + CMPQ 0(CX)(R8*1), $0 + JNE LBB6_114 LEAQ -1(BX), CX ADDQ $1, DI MOVB AX, 0(SI) ADDQ $1, SI CMPQ BX, $1 MOVQ CX, BX - JG LBB5_24 - JMP LBB5_9 + JG LBB6_109 + JMP LBB6_116 -LBB5_5: +LBB6_111: BSFL AX, AX - JMP LBB5_6 + JMP LBB6_113 -LBB5_28: +LBB6_112: BSFW AX, AX MOVWLZX AX, AX -LBB5_6: +LBB6_113: ADDQ AX, DI - ADDQ AX, R8 - NEGQ R8 + ADDQ AX, CX + NEGQ CX ADDQ AX, SI - MOVQ R8, BX - TESTQ R8, R8 - JLE LBB5_9 + MOVQ CX, BX + TESTQ CX, CX + JLE LBB6_116 -LBB5_7: +LBB6_114: MOVBLZX 0(DI), CX SHLQ $4, CX - MOVQ 0(R14)(CX*1), AX + MOVQ 0(R15)(CX*1), AX TESTL AX, AX - JE LBB5_2 - MOVQ 8(R14)(CX*1), CX + JE LBB6_97 + MOVQ 8(R15)(CX*1), CX MOVQ CX, 0(SI) ADDQ $1, DI LEAQ -1(BX), CX @@ -2162,34 +2481,34 @@ LBB5_7: ADDQ AX, SI CMPQ BX, $1 MOVQ CX, BX - JG LBB5_7 + JG LBB6_114 -LBB5_9: +LBB6_116: SUBQ DX, SI - MOVQ SI, 0(R10) - JMP LBB5_119 + MOVQ SI, 0(R13) + JMP LBB6_120 -LBB5_117: - ADDQ R13, R8 +LBB6_117: + ADDQ R9, R12 MOVQ -80(BP), DX -LBB5_118: - SUBQ DX, R12 - MOVQ R12, 0(R10) - SUBQ DI, R8 - MOVQ R8, R9 - JMP LBB5_119 - -LBB5_107: - SUBQ -80(BP), R12 - MOVQ R12, 0(R10) - SUBQ R8, DI - NOTQ R13 - ADDQ DI, R13 - MOVQ R13, R9 +LBB6_118: + SUBQ DX, R11 + MOVQ R11, 0(R13) + SUBQ DI, R12 + MOVQ R12, R10 + JMP LBB6_120 -LBB5_119: - MOVQ R9, AX +LBB6_119: + SUBQ -80(BP), R11 + MOVQ R11, 0(R13) + SUBQ R12, DI + NOTQ R9 + ADDQ DI, R9 + MOVQ R9, R10 + +LBB6_120: + MOVQ R10, AX ADDQ $40, SP BYTE $0x5b // popq %rbx WORD $0x5c41 // popq %r12 @@ -2200,22 +2519,22 @@ LBB5_119: WORD $0xf8c5; BYTE $0x77 // vzeroupper RET -LBB5_102: +LBB6_121: MOVQ -80(BP), AX - ADDQ R13, AX + ADDQ R9, AX NOTQ AX - ADDQ R12, AX - MOVQ AX, 0(R10) - SUBQ R8, DI - ADDQ R13, DI - MOVQ DI, R9 - JMP LBB5_119 + ADDQ R11, AX + MOVQ AX, 0(R13) + SUBQ R12, DI + ADDQ R9, DI + MOVQ DI, R10 + JMP LBB6_120 -LCPI6_0: +LCPI7_0: QUAD $0x5c5c5c5c5c5c5c5c; QUAD $0x5c5c5c5c5c5c5c5c // .space 16, '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' QUAD $0x5c5c5c5c5c5c5c5c; QUAD $0x5c5c5c5c5c5c5c5c // .space 16, '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -LCPI6_1: +LCPI7_1: QUAD $0x5c5c5c5c5c5c5c5c; QUAD $0x5c5c5c5c5c5c5c5c // .space 16, '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' _unquote: @@ -2228,152 +2547,152 @@ _unquote: BYTE $0x53 // pushq %rbx SUBQ $24, SP TESTQ SI, SI - JE LBB6_125 + JE LBB7_125 MOVQ CX, -48(BP) MOVQ R8, AX MOVQ R8, -64(BP) MOVL R8, R9 ANDL $1, R9 - QUAD $0xffffff9c0d6ffec5 // vmovdqu $-100(%rip), %ymm1 /* LCPI6_0(%rip) */ - QUAD $0xffffffb4156ffac5 // vmovdqu $-76(%rip), %xmm2 /* LCPI6_1(%rip) */ + QUAD $0xffffff9c0d6ffec5 // vmovdqu $-100(%rip), %ymm1 /* LCPI7_0(%rip) */ + QUAD $0xffffffb4156ffac5 // vmovdqu $-76(%rip), %xmm2 /* LCPI7_1(%rip) */ MOVQ DI, R11 MOVQ SI, R13 MOVQ DX, R8 -LBB6_22: +LBB7_22: CMPB 0(R11), $92 - JNE LBB6_24 + JNE LBB7_24 XORL R14, R14 - JMP LBB6_41 + JMP LBB7_41 -LBB6_24: +LBB7_24: MOVQ R13, R15 MOVQ R8, AX MOVQ R11, R14 CMPQ R13, $32 - JL LBB6_29 + JL LBB7_29 XORL AX, AX MOVQ R13, BX -LBB6_26: +LBB7_26: LONG $0x6f7ec1c4; WORD $0x0304 // vmovdqu (%r11,%rax), %ymm0 LONG $0x7f7ec1c4; WORD $0x0004 // vmovdqu %ymm0, (%r8,%rax) LONG $0xc174fdc5 // vpcmpeqb %ymm1, %ymm0, %ymm0 LONG $0xc8d7fdc5 // vpmovmskb %ymm0, %ecx TESTL CX, CX - JNE LBB6_39 + JNE LBB7_39 LEAQ -32(BX), R15 ADDQ $32, AX CMPQ BX, $63 MOVQ R15, BX - JG LBB6_26 + JG LBB7_26 LEAQ 0(R11)(AX*1), R14 ADDQ R8, AX -LBB6_29: +LBB7_29: WORD $0xf8c5; BYTE $0x77 // vzeroupper CMPQ R15, $16 - JL LBB6_33 + JL LBB7_33 MOVQ R11, R12 SUBQ R14, R12 - QUAD $0xffffff1e0d6ffec5 // vmovdqu $-226(%rip), %ymm1 /* LCPI6_0(%rip) */ - QUAD $0xffffff36156ffac5 // vmovdqu $-202(%rip), %xmm2 /* LCPI6_1(%rip) */ + QUAD $0xffffff1e0d6ffec5 // vmovdqu $-226(%rip), %ymm1 /* LCPI7_0(%rip) */ + QUAD $0xffffff36156ffac5 // vmovdqu $-202(%rip), %xmm2 /* LCPI7_1(%rip) */ -LBB6_31: +LBB7_31: LONG $0x6f7ac1c4; BYTE $0x06 // vmovdqu (%r14), %xmm0 LONG $0x007ffac5 // vmovdqu %xmm0, (%rax) LONG $0xc274f9c5 // vpcmpeqb %xmm2, %xmm0, %xmm0 LONG $0xd8d7f9c5 // vpmovmskb %xmm0, %ebx TESTL BX, BX - JNE LBB6_40 + JNE LBB7_40 ADDQ $16, R14 ADDQ $16, AX LEAQ -16(R15), R10 ADDQ $-16, R12 CMPQ R15, $31 MOVQ R10, R15 - JG LBB6_31 - JMP LBB6_34 + JG LBB7_31 + JMP LBB7_34 -LBB6_33: +LBB7_33: MOVQ R15, R10 - QUAD $0xfffffedb0d6ffec5 // vmovdqu $-293(%rip), %ymm1 /* LCPI6_0(%rip) */ - QUAD $0xfffffef3156ffac5 // vmovdqu $-269(%rip), %xmm2 /* LCPI6_1(%rip) */ + QUAD $0xfffffedb0d6ffec5 // vmovdqu $-293(%rip), %ymm1 /* LCPI7_0(%rip) */ + QUAD $0xfffffef3156ffac5 // vmovdqu $-269(%rip), %xmm2 /* LCPI7_1(%rip) */ -LBB6_34: +LBB7_34: TESTQ R10, R10 - JE LBB6_126 + JE LBB7_126 XORL BX, BX -LBB6_36: +LBB7_36: MOVBLZX 0(R14)(BX*1), CX CMPB CX, $92 - JE LBB6_38 + JE LBB7_38 MOVB CX, 0(AX)(BX*1) ADDQ $1, BX CMPQ R10, BX - JNE LBB6_36 - JMP LBB6_126 + JNE LBB7_36 + JMP LBB7_126 -LBB6_38: +LBB7_38: SUBQ R11, R14 ADDQ BX, R14 CMPQ R14, $-1 - JNE LBB6_41 - JMP LBB6_126 + JNE LBB7_41 + JMP LBB7_126 -LBB6_39: +LBB7_39: BSFL CX, R14 ADDQ AX, R14 CMPQ R14, $-1 - JNE LBB6_41 - JMP LBB6_126 + JNE LBB7_41 + JMP LBB7_126 -LBB6_40: +LBB7_40: BSFW BX, AX MOVWLZX AX, R14 SUBQ R12, R14 CMPQ R14, $-1 - JE LBB6_126 + JE LBB7_126 -LBB6_41: +LBB7_41: LEAQ 2(R14), AX SUBQ AX, R13 - JS LBB6_164 + JS LBB7_164 ADDQ R14, R11 ADDQ $2, R11 TESTQ R9, R9 - JNE LBB6_55 + JNE LBB7_55 -LBB6_43: +LBB7_43: ADDQ R14, R8 MOVBLZX -1(R11), AX - LONG $0xca0d8d48; WORD $0x00be; BYTE $0x00 // leaq $48842(%rip), %rcx /* __UnquoteTab(%rip) */ + LONG $0xf20d8d48; WORD $0x00be; BYTE $0x00 // leaq $48882(%rip), %rcx /* __UnquoteTab(%rip) */ MOVB 0(AX)(CX*1), AX CMPB AX, $-1 - JE LBB6_46 + JE LBB7_46 TESTB AX, AX - JE LBB6_142 + JE LBB7_142 MOVB AX, 0(R8) ADDQ $1, R8 TESTQ R13, R13 - JNE LBB6_22 - JMP LBB6_141 + JNE LBB7_22 + JMP LBB7_141 -LBB6_46: +LBB7_46: CMPQ R13, $3 - JLE LBB6_164 + JLE LBB7_164 MOVL 0(R11), R14 MOVL R14, BX NOTL BX LEAL -808464432(R14), AX ANDL $-2139062144, BX TESTL AX, BX - JNE LBB6_128 + JNE LBB7_128 LEAL 421075225(R14), AX ORL R14, AX TESTL $-2139062144, AX - JNE LBB6_128 + JNE LBB7_128 MOVL R14, AX ANDL $2139062143, AX MOVL $-1061109568, CX @@ -2382,13 +2701,13 @@ LBB6_46: LEAL 1179010630(AX), DX ANDL BX, CX TESTL DX, CX - JNE LBB6_128 + JNE LBB7_128 MOVL $-522133280, CX SUBL AX, CX ADDL $960051513, AX ANDL CX, BX TESTL AX, BX - JNE LBB6_128 + JNE LBB7_128 MOVQ R15, DX BSWAPL R14 MOVL R14, AX @@ -2409,39 +2728,39 @@ LBB6_46: LEAQ 4(R11), R12 LEAQ -4(R13), CX CMPL R14, $128 - JB LBB6_63 + JB LBB7_63 TESTQ R9, R9 - JNE LBB6_65 + JNE LBB7_65 TESTB $2, -64(BP) - JE LBB6_86 + JE LBB7_86 XORL BX, BX -LBB6_2: +LBB7_2: CMPL R14, $2048 - JB LBB6_84 + JB LBB7_84 MOVL R14, AX ANDL $-2048, AX CMPL AX, $55296 - JNE LBB6_81 + JNE LBB7_81 CMPQ CX, $6 - JL LBB6_15 + JL LBB7_15 CMPL R14, $56319 - JA LBB6_15 + JA LBB7_15 CMPB 4(R11)(BX*1), $92 - JNE LBB6_15 + JNE LBB7_15 CMPB 5(R11)(BX*1), $117 - JNE LBB6_15 + JNE LBB7_15 MOVL 6(R11)(BX*1), R12 MOVL R12, R10 NOTL R10 LEAL -808464432(R12), AX ANDL $-2139062144, R10 TESTL AX, R10 - JNE LBB6_143 + JNE LBB7_143 LEAL 421075225(R12), AX ORL R12, AX TESTL $-2139062144, AX - JNE LBB6_143 + JNE LBB7_143 MOVL R12, AX ANDL $2139062143, AX MOVL $-1061109568, DX @@ -2452,13 +2771,13 @@ LBB6_2: MOVL -52(BP), DX ANDL R10, DX TESTL DX, -56(BP) - JNE LBB6_143 + JNE LBB7_143 MOVL $-522133280, DX SUBL AX, DX ADDL $960051513, AX ANDL DX, R10 TESTL AX, R10 - JNE LBB6_143 + JNE LBB7_143 BSWAPL R12 MOVL R12, AX SHRL $4, AX @@ -2477,7 +2796,7 @@ LBB6_2: ORL DX, R10 ANDL $16515072, AX CMPL AX, $14417920 - JE LBB6_18 + JE LBB7_18 MOVQ R15, DX MOVW $-16401, 0(R8) MOVB $-67, 2(R8) @@ -2486,84 +2805,84 @@ LBB6_2: ADDQ $-6, CX MOVL R10, R14 CMPL R10, $127 - JA LBB6_2 + JA LBB7_2 -LBB6_14: +LBB7_14: LEAQ 0(R11)(BX*1), R12 ADDQ $4, R12 - JMP LBB6_64 + JMP LBB7_64 -LBB6_55: +LBB7_55: TESTL R13, R13 - JE LBB6_164 + JE LBB7_164 CMPB -1(R11), $92 - JNE LBB6_156 + JNE LBB7_156 CMPB 0(R11), $92 - JNE LBB6_62 + JNE LBB7_62 CMPL R13, $1 - JLE LBB6_164 + JLE LBB7_164 MOVB 1(R11), AX CMPB AX, $34 - JE LBB6_61 + JE LBB7_61 CMPB AX, $92 - JNE LBB6_158 + JNE LBB7_158 -LBB6_61: +LBB7_61: ADDQ $1, R11 ADDQ $-1, R13 -LBB6_62: +LBB7_62: ADDQ $1, R11 ADDQ $-1, R13 - JMP LBB6_43 + JMP LBB7_43 -LBB6_63: +LBB7_63: MOVL R14, R10 -LBB6_64: +LBB7_64: MOVB R10, 0(R8) ADDQ $1, R8 MOVQ CX, R13 MOVQ R12, R11 TESTQ R13, R13 - JNE LBB6_22 - JMP LBB6_141 + JNE LBB7_22 + JMP LBB7_141 -LBB6_65: +LBB7_65: TESTB $2, -64(BP) - JE LBB6_98 + JE LBB7_98 XORL BX, BX -LBB6_67: +LBB7_67: CMPL R14, $2048 - JB LBB6_84 + JB LBB7_84 MOVL R14, AX ANDL $-2048, AX CMPL AX, $55296 - JNE LBB6_81 + JNE LBB7_81 TESTQ CX, CX - JLE LBB6_162 + JLE LBB7_162 CMPB 4(R11)(BX*1), $92 - JNE LBB6_107 + JNE LBB7_107 CMPQ CX, $7 - JL LBB6_105 + JL LBB7_105 CMPL R14, $56319 - JA LBB6_105 + JA LBB7_105 CMPB 5(R11)(BX*1), $92 - JNE LBB6_105 + JNE LBB7_105 CMPB 6(R11)(BX*1), $117 - JNE LBB6_105 + JNE LBB7_105 MOVL 7(R11)(BX*1), R12 MOVL R12, R10 NOTL R10 LEAL -808464432(R12), AX ANDL $-2139062144, R10 TESTL AX, R10 - JNE LBB6_159 + JNE LBB7_159 LEAL 421075225(R12), AX ORL R12, AX TESTL $-2139062144, AX - JNE LBB6_159 + JNE LBB7_159 MOVL R12, AX ANDL $2139062143, AX MOVL $-1061109568, DX @@ -2574,13 +2893,13 @@ LBB6_67: MOVL -52(BP), DX ANDL R10, DX TESTL DX, -56(BP) - JNE LBB6_159 + JNE LBB7_159 MOVL $-522133280, DX SUBL AX, DX ADDL $960051513, AX ANDL DX, R10 TESTL AX, R10 - JNE LBB6_159 + JNE LBB7_159 BSWAPL R12 MOVL R12, AX SHRL $4, AX @@ -2599,7 +2918,7 @@ LBB6_67: ORL DX, R10 ANDL $16515072, AX CMPL AX, $14417920 - JE LBB6_108 + JE LBB7_108 MOVW $-16401, 0(R8) MOVB $-67, 2(R8) ADDQ $3, R8 @@ -2608,14 +2927,14 @@ LBB6_67: MOVL R10, R14 CMPL R10, $128 MOVQ R15, DX - JAE LBB6_67 - JMP LBB6_14 + JAE LBB7_67 + JMP LBB7_14 -LBB6_81: +LBB7_81: LEAQ 0(R11)(BX*1), R12 ADDQ $4, R12 -LBB6_82: +LBB7_82: MOVL R14, AX SHRL $12, AX ORB $-32, AX @@ -2632,14 +2951,14 @@ LBB6_82: MOVQ CX, R13 MOVQ R12, R11 TESTQ R13, R13 - JNE LBB6_22 - JMP LBB6_141 + JNE LBB7_22 + JMP LBB7_141 -LBB6_84: +LBB7_84: LEAQ 0(R11)(BX*1), R12 ADDQ $4, R12 -LBB6_85: +LBB7_85: MOVL R14, AX SHRL $6, AX ORB $-64, AX @@ -2651,34 +2970,34 @@ LBB6_85: MOVQ CX, R13 MOVQ R12, R11 TESTQ R13, R13 - JNE LBB6_22 - JMP LBB6_141 + JNE LBB7_22 + JMP LBB7_141 -LBB6_86: +LBB7_86: CMPL R14, $2048 - JB LBB6_85 + JB LBB7_85 ANDL $16252928, R10 CMPL R10, $14155776 - JNE LBB6_82 + JNE LBB7_82 CMPQ R13, $10 - JL LBB6_101 + JL LBB7_101 CMPL R14, $56319 - JA LBB6_101 + JA LBB7_101 CMPB 0(R12), $92 - JNE LBB6_101 + JNE LBB7_101 CMPB 5(R11), $117 - JNE LBB6_101 + JNE LBB7_101 MOVL 6(R11), R10 MOVL R10, BX NOTL BX LEAL -808464432(R10), AX ANDL $-2139062144, BX TESTL AX, BX - JNE LBB6_144 + JNE LBB7_144 LEAL 421075225(R10), AX ORL R10, AX TESTL $-2139062144, AX - JNE LBB6_144 + JNE LBB7_144 MOVL R10, AX ANDL $2139062143, AX MOVL $-1061109568, CX @@ -2686,13 +3005,13 @@ LBB6_86: LEAL 1179010630(AX), DX ANDL BX, CX TESTL DX, CX - JNE LBB6_144 + JNE LBB7_144 MOVL $-522133280, CX SUBL AX, CX ADDL $960051513, AX ANDL CX, BX TESTL AX, BX - JNE LBB6_144 + JNE LBB7_144 BSWAPL R10 MOVL R10, AX SHRL $4, AX @@ -2708,38 +3027,38 @@ LBB6_86: MOVL AX, CX ANDL $16515072, CX CMPL CX, $14417920 - JNE LBB6_124 + JNE LBB7_124 MOVL AX, CX SHRL $8, CX ANDL $65280, CX MOVBLZX AX, R10 ORL CX, R10 - JMP LBB6_19 + JMP LBB7_19 -LBB6_15: +LBB7_15: ADDQ BX, R11 ADDQ $4, R11 -LBB6_16: +LBB7_16: TESTB $2, -64(BP) - JE LBB6_160 + JE LBB7_160 -LBB6_17: +LBB7_17: MOVW $-16401, 0(R8) MOVB $-67, 2(R8) ADDQ $3, R8 MOVQ CX, R13 - JMP LBB6_21 + JMP LBB7_21 -LBB6_18: +LBB7_18: ADDQ BX, R11 ADDQ $10, R11 SUBQ BX, R13 -LBB6_19: +LBB7_19: ADDQ $-10, R13 -LBB6_20: +LBB7_20: SHLL $10, R14 MOVL R10, AX ADDL R14, AX @@ -2763,79 +3082,79 @@ LBB6_20: MOVB AX, 3(R8) ADDQ $4, R8 -LBB6_21: +LBB7_21: MOVQ R15, DX TESTQ R13, R13 - JNE LBB6_22 - JMP LBB6_141 + JNE LBB7_22 + JMP LBB7_141 -LBB6_98: +LBB7_98: CMPL R14, $2048 - JB LBB6_110 + JB LBB7_110 ANDL $16252928, R10 CMPL R10, $14155776 - JE LBB6_111 + JE LBB7_111 MOVQ R15, DX - JMP LBB6_82 + JMP LBB7_82 -LBB6_101: +LBB7_101: MOVQ R12, R11 - JMP LBB6_16 + JMP LBB7_16 -LBB6_105: +LBB7_105: LEAQ 0(R11)(BX*1), R12 ADDQ $5, R12 -LBB6_106: +LBB7_106: ADDQ $-1, CX MOVQ R12, R11 - JMP LBB6_16 + JMP LBB7_16 -LBB6_107: +LBB7_107: ADDQ BX, R11 ADDQ $4, R11 TESTB $2, -64(BP) - JNE LBB6_17 - JMP LBB6_124 + JNE LBB7_17 + JMP LBB7_124 -LBB6_108: +LBB7_108: ADDQ BX, R11 ADDQ $11, R11 SUBQ BX, R13 -LBB6_109: +LBB7_109: ADDQ $-11, R13 - JMP LBB6_20 + JMP LBB7_20 -LBB6_110: +LBB7_110: MOVQ R15, DX - JMP LBB6_85 + JMP LBB7_85 -LBB6_111: +LBB7_111: CMPQ R13, $5 - JL LBB6_162 + JL LBB7_162 CMPB 0(R12), $92 - JNE LBB6_123 + JNE LBB7_123 LEAQ 5(R11), R12 CMPQ R13, $11 - JL LBB6_106 + JL LBB7_106 CMPL R14, $56319 - JA LBB6_106 + JA LBB7_106 CMPB 0(R12), $92 - JNE LBB6_106 + JNE LBB7_106 CMPB 6(R11), $117 - JNE LBB6_106 + JNE LBB7_106 MOVL 7(R11), R10 MOVL R10, BX NOTL BX LEAL -808464432(R10), AX ANDL $-2139062144, BX TESTL AX, BX - JNE LBB6_144 + JNE LBB7_144 LEAL 421075225(R10), AX ORL R10, AX TESTL $-2139062144, AX - JNE LBB6_144 + JNE LBB7_144 MOVL R10, AX ANDL $2139062143, AX MOVL $-1061109568, CX @@ -2843,13 +3162,13 @@ LBB6_111: LEAL 1179010630(AX), DX ANDL BX, CX TESTL DX, CX - JNE LBB6_144 + JNE LBB7_144 MOVL $-522133280, CX SUBL AX, CX ADDL $960051513, AX ANDL CX, BX TESTL AX, BX - JNE LBB6_144 + JNE LBB7_144 BSWAPL R10 MOVL R10, AX SHRL $4, AX @@ -2865,32 +3184,32 @@ LBB6_111: MOVL AX, CX ANDL $16515072, CX CMPL CX, $14417920 - JNE LBB6_124 + JNE LBB7_124 MOVL AX, CX SHRL $8, CX ANDL $65280, CX MOVBLZX AX, R10 ORL CX, R10 - JMP LBB6_109 + JMP LBB7_109 -LBB6_123: +LBB7_123: MOVQ R12, R11 TESTB $2, -64(BP) - JNE LBB6_17 + JNE LBB7_17 -LBB6_124: +LBB7_124: SUBQ DI, R11 - JMP LBB6_161 + JMP LBB7_161 -LBB6_125: +LBB7_125: XORL R13, R13 MOVQ DX, R8 -LBB6_126: +LBB7_126: ADDQ R13, R8 SUBQ DX, R8 -LBB6_127: +LBB7_127: MOVQ R8, AX ADDQ $24, SP BYTE $0x5b // popq %rbx @@ -2902,7 +3221,7 @@ LBB6_127: WORD $0xf8c5; BYTE $0x77 // vzeroupper RET -LBB6_128: +LBB7_128: MOVQ R11, AX SUBQ DI, AX MOVQ -48(BP), SI @@ -2910,87 +3229,87 @@ LBB6_128: MOVB 0(R11), CX LEAL -48(CX), DX CMPB DX, $10 - JB LBB6_131 + JB LBB7_131 MOVQ $-2, R8 ADDB $-65, CX CMPB CX, $37 - JA LBB6_127 + JA LBB7_127 MOVBLZX CX, CX MOVQ $270582939711, DX BTQ CX, DX - JAE LBB6_127 + JAE LBB7_127 -LBB6_131: +LBB7_131: LEAQ 1(AX), CX MOVQ CX, 0(SI) MOVB 1(R11), CX LEAL -48(CX), DX CMPB DX, $9 - JBE LBB6_134 + JBE LBB7_134 MOVQ $-2, R8 ADDB $-65, CX CMPB CX, $37 - JA LBB6_127 + JA LBB7_127 MOVBLZX CX, CX MOVQ $270582939711, DX BTQ CX, DX - JAE LBB6_127 + JAE LBB7_127 -LBB6_134: +LBB7_134: LEAQ 2(AX), CX MOVQ CX, 0(SI) MOVB 2(R11), CX LEAL -48(CX), DX CMPB DX, $10 - JB LBB6_137 + JB LBB7_137 MOVQ $-2, R8 ADDB $-65, CX CMPB CX, $37 - JA LBB6_127 + JA LBB7_127 MOVBLZX CX, CX MOVQ $270582939711, DX BTQ CX, DX - JAE LBB6_127 + JAE LBB7_127 -LBB6_137: +LBB7_137: LEAQ 3(AX), CX MOVQ CX, 0(SI) MOVB 3(R11), CX LEAL -48(CX), DX CMPB DX, $10 - JB LBB6_140 + JB LBB7_140 MOVQ $-2, R8 ADDB $-65, CX CMPB CX, $37 - JA LBB6_127 + JA LBB7_127 MOVBLZX CX, CX MOVQ $270582939711, DX BTQ CX, DX - JAE LBB6_127 + JAE LBB7_127 -LBB6_140: +LBB7_140: ADDQ $4, AX MOVQ AX, 0(SI) MOVQ $-2, R8 - JMP LBB6_127 + JMP LBB7_127 -LBB6_141: +LBB7_141: XORL R13, R13 - JMP LBB6_126 + JMP LBB7_126 -LBB6_142: +LBB7_142: NOTQ DI ADDQ DI, R11 MOVQ -48(BP), AX MOVQ R11, 0(AX) MOVQ $-3, R8 - JMP LBB6_127 + JMP LBB7_127 -LBB6_143: +LBB7_143: LEAQ 0(R11)(BX*1), R12 ADDQ $4, R12 -LBB6_144: +LBB7_144: MOVQ R12, AX SUBQ DI, AX ADDQ $2, AX @@ -2999,142 +3318,142 @@ LBB6_144: MOVB 2(R12), CX LEAL -48(CX), DX CMPB DX, $10 - JB LBB6_147 + JB LBB7_147 MOVQ $-2, R8 ADDB $-65, CX CMPB CX, $37 - JA LBB6_127 + JA LBB7_127 MOVBLZX CX, CX MOVQ $270582939711, DX BTQ CX, DX MOVQ -48(BP), SI - JAE LBB6_127 + JAE LBB7_127 -LBB6_147: +LBB7_147: LEAQ 1(AX), CX MOVQ CX, 0(SI) MOVB 3(R12), CX LEAL -48(CX), DX CMPB DX, $9 - JBE LBB6_150 + JBE LBB7_150 MOVQ $-2, R8 ADDB $-65, CX CMPB CX, $37 - JA LBB6_127 + JA LBB7_127 MOVBLZX CX, CX MOVQ $270582939711, DX BTQ CX, DX MOVQ -48(BP), SI - JAE LBB6_127 + JAE LBB7_127 -LBB6_150: +LBB7_150: LEAQ 2(AX), CX MOVQ CX, 0(SI) MOVB 4(R12), CX LEAL -48(CX), DX CMPB DX, $10 - JB LBB6_153 + JB LBB7_153 MOVQ $-2, R8 ADDB $-65, CX CMPB CX, $37 - JA LBB6_127 + JA LBB7_127 MOVBLZX CX, CX MOVQ $270582939711, DX BTQ CX, DX MOVQ -48(BP), SI - JAE LBB6_127 + JAE LBB7_127 -LBB6_153: +LBB7_153: LEAQ 3(AX), CX MOVQ CX, 0(SI) MOVB 5(R12), CX LEAL -48(CX), DX CMPB DX, $10 - JB LBB6_140 + JB LBB7_140 MOVQ $-2, R8 ADDB $-65, CX CMPB CX, $37 - JA LBB6_127 + JA LBB7_127 MOVBLZX CX, CX MOVQ $270582939711, DX BTQ CX, DX MOVQ -48(BP), SI - JB LBB6_140 - JMP LBB6_127 + JB LBB7_140 + JMP LBB7_127 -LBB6_156: +LBB7_156: NOTQ DI ADDQ DI, R11 -LBB6_157: +LBB7_157: MOVQ -48(BP), AX MOVQ R11, 0(AX) MOVQ $-2, R8 - JMP LBB6_127 + JMP LBB7_127 -LBB6_158: +LBB7_158: SUBQ DI, R11 ADDQ $1, R11 - JMP LBB6_157 + JMP LBB7_157 -LBB6_159: +LBB7_159: LEAQ 0(R11)(BX*1), R12 ADDQ $5, R12 - JMP LBB6_144 + JMP LBB7_144 -LBB6_160: +LBB7_160: ADDQ DI, R9 SUBQ R9, R11 -LBB6_161: +LBB7_161: ADDQ $-4, R11 MOVQ -48(BP), AX MOVQ R11, 0(AX) MOVQ $-4, R8 - JMP LBB6_127 + JMP LBB7_127 -LBB6_162: +LBB7_162: TESTB $2, -64(BP) - JE LBB6_164 + JE LBB7_164 MOVW $-16401, 0(R8) MOVB $-67, 2(R8) ADDQ $3, R8 XORL R13, R13 MOVQ R15, DX - JMP LBB6_126 + JMP LBB7_126 -LBB6_164: +LBB7_164: MOVQ -48(BP), AX MOVQ SI, 0(AX) MOVQ $-1, R8 - JMP LBB6_127 + JMP LBB7_127 -LCPI7_0: +LCPI8_0: QUAD $0x2626262626262626; QUAD $0x2626262626262626 // .space 16, '&&&&&&&&&&&&&&&&' QUAD $0x2626262626262626; QUAD $0x2626262626262626 // .space 16, '&&&&&&&&&&&&&&&&' -LCPI7_1: +LCPI8_1: QUAD $0xe2e2e2e2e2e2e2e2; QUAD $0xe2e2e2e2e2e2e2e2 // .space 16, '\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2' QUAD $0xe2e2e2e2e2e2e2e2; QUAD $0xe2e2e2e2e2e2e2e2 // .space 16, '\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2' -LCPI7_2: +LCPI8_2: QUAD $0xfdfdfdfdfdfdfdfd; QUAD $0xfdfdfdfdfdfdfdfd // .space 16, '\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd' QUAD $0xfdfdfdfdfdfdfdfd; QUAD $0xfdfdfdfdfdfdfdfd // .space 16, '\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd' -LCPI7_3: +LCPI8_3: QUAD $0x3c3c3c3c3c3c3c3c; QUAD $0x3c3c3c3c3c3c3c3c // .space 16, '<<<<<<<<<<<<<<<<' QUAD $0x3c3c3c3c3c3c3c3c; QUAD $0x3c3c3c3c3c3c3c3c // .space 16, '<<<<<<<<<<<<<<<<' -LCPI7_4: +LCPI8_4: QUAD $0x2626262626262626; QUAD $0x2626262626262626 // .space 16, '&&&&&&&&&&&&&&&&' -LCPI7_5: +LCPI8_5: QUAD $0xe2e2e2e2e2e2e2e2; QUAD $0xe2e2e2e2e2e2e2e2 // .space 16, '\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2' -LCPI7_6: +LCPI8_6: QUAD $0xfdfdfdfdfdfdfdfd; QUAD $0xfdfdfdfdfdfdfdfd // .space 16, '\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd\xfd' -LCPI7_7: +LCPI8_7: QUAD $0x3c3c3c3c3c3c3c3c; QUAD $0x3c3c3c3c3c3c3c3c // .space 16, '<<<<<<<<<<<<<<<<' _html_escape: @@ -3152,34 +3471,34 @@ _html_escape: MOVQ DI, -48(BP) MOVQ DI, AX TESTQ SI, SI - JLE LBB7_106 + JLE LBB8_106 MOVQ -64(BP), AX MOVQ 0(AX), R9 - QUAD $0xffffff051d6ffec5 // vmovdqu $-251(%rip), %ymm3 /* LCPI7_0(%rip) */ - QUAD $0xffffff1d256ffec5 // vmovdqu $-227(%rip), %ymm4 /* LCPI7_1(%rip) */ - QUAD $0xffffff352d6ffec5 // vmovdqu $-203(%rip), %ymm5 /* LCPI7_2(%rip) */ - QUAD $0xffffff4d356ffec5 // vmovdqu $-179(%rip), %ymm6 /* LCPI7_3(%rip) */ - LONG $0x45358d4c; WORD $0x00b4; BYTE $0x00 // leaq $46149(%rip), %r14 /* __HtmlQuoteTab(%rip) */ + QUAD $0xffffff051d6ffec5 // vmovdqu $-251(%rip), %ymm3 /* LCPI8_0(%rip) */ + QUAD $0xffffff1d256ffec5 // vmovdqu $-227(%rip), %ymm4 /* LCPI8_1(%rip) */ + QUAD $0xffffff352d6ffec5 // vmovdqu $-203(%rip), %ymm5 /* LCPI8_2(%rip) */ + QUAD $0xffffff4d356ffec5 // vmovdqu $-179(%rip), %ymm6 /* LCPI8_3(%rip) */ + LONG $0x6d358d4c; WORD $0x00b4; BYTE $0x00 // leaq $46189(%rip), %r14 /* __HtmlQuoteTab(%rip) */ MOVQ -48(BP), R11 MOVQ -56(BP), R15 -LBB7_2: +LBB8_2: TESTQ R9, R9 - JLE LBB7_3 + JLE LBB8_3 CMPQ SI, $32 SETGE CX MOVQ R9, AX MOVQ R15, R8 MOVQ SI, R10 MOVQ R11, R12 - JL LBB7_12 + JL LBB8_12 CMPQ R9, $32 - JL LBB7_12 + JL LBB8_12 XORL R8, R8 MOVQ SI, BX MOVQ R9, DI -LBB7_7: +LBB8_7: LONG $0x6f7e81c4; WORD $0x0304 // vmovdqu (%r11,%r8), %ymm0 LONG $0xcb74fdc5 // vpcmpeqb %ymm3, %ymm0, %ymm1 LONG $0xd474fdc5 // vpcmpeqb %ymm4, %ymm0, %ymm2 @@ -3190,25 +3509,25 @@ LBB7_7: LONG $0x7f7e81c4; WORD $0x0704 // vmovdqu %ymm0, (%r15,%r8) LONG $0xc1d7fdc5 // vpmovmskb %ymm1, %eax TESTL AX, AX - JNE LBB7_8 + JNE LBB8_8 LEAQ -32(BX), R10 LEAQ -32(DI), AX ADDQ $32, R8 CMPQ BX, $64 SETGE CX - JL LBB7_11 + JL LBB8_11 MOVQ R10, BX CMPQ DI, $63 MOVQ AX, DI - JG LBB7_7 + JG LBB8_7 -LBB7_11: +LBB8_11: LEAQ 0(R11)(R8*1), R12 ADDQ R15, R8 -LBB7_12: +LBB8_12: TESTB CX, CX - JE LBB7_38 + JE LBB8_38 LONG $0x6f7ec1c4; WORD $0x2404 // vmovdqu (%r12), %ymm0 LONG $0xcb74fdc5 // vpcmpeqb %ymm3, %ymm0, %ymm1 LONG $0xd474fdc5 // vpcmpeqb %ymm4, %ymm0, %ymm2 @@ -3224,37 +3543,37 @@ LBB7_12: LONG $0x16f9e3c4; WORD $0x01c1 // vpextrq $1, %xmm0, %rcx LONG $0x7ef9e1c4; BYTE $0xc7 // vmovq %xmm0, %rdi CMPQ R13, AX - JLE LBB7_14 + JLE LBB8_14 CMPQ AX, $16 - JB LBB7_27 + JB LBB8_27 MOVQ DI, 0(R8) MOVQ CX, 8(R8) LEAQ 16(R12), R10 ADDQ $16, R8 LEAQ -16(AX), R14 CMPQ R14, $8 - JAE LBB7_30 - JMP LBB7_31 + JAE LBB8_30 + JMP LBB8_31 -LBB7_38: +LBB8_38: WORD $0xf8c5; BYTE $0x77 // vzeroupper CMPQ R10, $16 SETGE CX - JL LBB7_39 + JL LBB8_39 CMPQ AX, $16 - QUAD $0xfffffe3f3d6ffac5 // vmovdqu $-449(%rip), %xmm7 /* LCPI7_4(%rip) */ - QUAD $0xfffffe47056f7ac5 // vmovdqu $-441(%rip), %xmm8 /* LCPI7_5(%rip) */ - QUAD $0xfffffe4f0d6f7ac5 // vmovdqu $-433(%rip), %xmm9 /* LCPI7_6(%rip) */ - QUAD $0xfffffe57156f7ac5 // vmovdqu $-425(%rip), %xmm10 /* LCPI7_7(%rip) */ - JL LBB7_41 + QUAD $0xfffffe3f3d6ffac5 // vmovdqu $-449(%rip), %xmm7 /* LCPI8_4(%rip) */ + QUAD $0xfffffe47056f7ac5 // vmovdqu $-441(%rip), %xmm8 /* LCPI8_5(%rip) */ + QUAD $0xfffffe4f0d6f7ac5 // vmovdqu $-433(%rip), %xmm9 /* LCPI8_6(%rip) */ + QUAD $0xfffffe57156f7ac5 // vmovdqu $-425(%rip), %xmm10 /* LCPI8_7(%rip) */ + JL LBB8_41 MOVQ R11, DI SUBQ R12, DI - QUAD $0xfffffd931d6ffec5 // vmovdqu $-621(%rip), %ymm3 /* LCPI7_0(%rip) */ - QUAD $0xfffffdab256ffec5 // vmovdqu $-597(%rip), %ymm4 /* LCPI7_1(%rip) */ - QUAD $0xfffffdc32d6ffec5 // vmovdqu $-573(%rip), %ymm5 /* LCPI7_2(%rip) */ - QUAD $0xfffffddb356ffec5 // vmovdqu $-549(%rip), %ymm6 /* LCPI7_3(%rip) */ + QUAD $0xfffffd931d6ffec5 // vmovdqu $-621(%rip), %ymm3 /* LCPI8_0(%rip) */ + QUAD $0xfffffdab256ffec5 // vmovdqu $-597(%rip), %ymm4 /* LCPI8_1(%rip) */ + QUAD $0xfffffdc32d6ffec5 // vmovdqu $-573(%rip), %ymm5 /* LCPI8_2(%rip) */ + QUAD $0xfffffddb356ffec5 // vmovdqu $-549(%rip), %ymm6 /* LCPI8_3(%rip) */ -LBB7_43: +LBB8_43: LONG $0x6f7ac1c4; WORD $0x2404 // vmovdqu (%r12), %xmm0 LONG $0xcf74f9c5 // vpcmpeqb %xmm7, %xmm0, %xmm1 LONG $0xd074b9c5 // vpcmpeqb %xmm0, %xmm8, %xmm2 @@ -3265,95 +3584,95 @@ LBB7_43: LONG $0x7f7ac1c4; BYTE $0x00 // vmovdqu %xmm0, (%r8) LONG $0xc9d7f9c5 // vpmovmskb %xmm1, %ecx TESTL CX, CX - JNE LBB7_44 + JNE LBB8_44 ADDQ $16, R12 ADDQ $16, R8 LEAQ -16(R10), R14 LEAQ -16(AX), R13 CMPQ R10, $32 SETGE CX - JL LBB7_47 + JL LBB8_47 ADDQ $-16, DI MOVQ R14, R10 CMPQ AX, $31 MOVQ R13, AX - JG LBB7_43 - JMP LBB7_47 + JG LBB8_43 + JMP LBB8_47 -LBB7_8: +LBB8_8: BSFL AX, R12 ADDQ R8, R12 - JMP LBB7_83 + JMP LBB8_83 -LBB7_14: +LBB8_14: CMPL R13, $16 - JB LBB7_15 + JB LBB8_15 MOVQ DI, 0(R8) MOVQ CX, 8(R8) LEAQ 16(R12), R10 ADDQ $16, R8 LEAQ -16(R13), AX CMPQ AX, $8 - JAE LBB7_18 - JMP LBB7_19 + JAE LBB8_18 + JMP LBB8_19 -LBB7_44: +LBB8_44: BSFW CX, AX MOVWLZX AX, R12 SUBQ DI, R12 - JMP LBB7_82 + JMP LBB8_82 -LBB7_27: +LBB8_27: MOVQ R12, R10 MOVQ AX, R14 CMPQ R14, $8 - JB LBB7_31 + JB LBB8_31 -LBB7_30: +LBB8_30: MOVQ 0(R10), CX MOVQ CX, 0(R8) ADDQ $8, R10 ADDQ $8, R8 ADDQ $-8, R14 -LBB7_31: +LBB8_31: CMPQ R14, $4 - JAE LBB7_32 + JAE LBB8_32 CMPQ R14, $2 - JAE LBB7_34 + JAE LBB8_34 -LBB7_35: +LBB8_35: TESTQ R14, R14 - JE LBB7_37 + JE LBB8_37 -LBB7_36: +LBB8_36: MOVB 0(R10), CX MOVB CX, 0(R8) -LBB7_37: +LBB8_37: ADDQ R12, AX NOTQ AX ADDQ R11, AX MOVQ AX, R12 - JMP LBB7_82 + JMP LBB8_82 -LBB7_39: +LBB8_39: MOVQ AX, R13 MOVQ R10, R14 - QUAD $0xfffffc791d6ffec5 // vmovdqu $-903(%rip), %ymm3 /* LCPI7_0(%rip) */ - QUAD $0xfffffc91256ffec5 // vmovdqu $-879(%rip), %ymm4 /* LCPI7_1(%rip) */ - QUAD $0xfffffca92d6ffec5 // vmovdqu $-855(%rip), %ymm5 /* LCPI7_2(%rip) */ - QUAD $0xfffffcc1356ffec5 // vmovdqu $-831(%rip), %ymm6 /* LCPI7_3(%rip) */ - QUAD $0xfffffcd93d6ffac5 // vmovdqu $-807(%rip), %xmm7 /* LCPI7_4(%rip) */ - QUAD $0xfffffce1056f7ac5 // vmovdqu $-799(%rip), %xmm8 /* LCPI7_5(%rip) */ - QUAD $0xfffffce90d6f7ac5 // vmovdqu $-791(%rip), %xmm9 /* LCPI7_6(%rip) */ - QUAD $0xfffffcf1156f7ac5 // vmovdqu $-783(%rip), %xmm10 /* LCPI7_7(%rip) */ - -LBB7_47: + QUAD $0xfffffc791d6ffec5 // vmovdqu $-903(%rip), %ymm3 /* LCPI8_0(%rip) */ + QUAD $0xfffffc91256ffec5 // vmovdqu $-879(%rip), %ymm4 /* LCPI8_1(%rip) */ + QUAD $0xfffffca92d6ffec5 // vmovdqu $-855(%rip), %ymm5 /* LCPI8_2(%rip) */ + QUAD $0xfffffcc1356ffec5 // vmovdqu $-831(%rip), %ymm6 /* LCPI8_3(%rip) */ + QUAD $0xfffffcd93d6ffac5 // vmovdqu $-807(%rip), %xmm7 /* LCPI8_4(%rip) */ + QUAD $0xfffffce1056f7ac5 // vmovdqu $-799(%rip), %xmm8 /* LCPI8_5(%rip) */ + QUAD $0xfffffce90d6f7ac5 // vmovdqu $-791(%rip), %xmm9 /* LCPI8_6(%rip) */ + QUAD $0xfffffcf1156f7ac5 // vmovdqu $-783(%rip), %xmm10 /* LCPI8_7(%rip) */ + +LBB8_47: TESTB CX, CX - JE LBB7_48 + JE LBB8_48 -LBB7_58: +LBB8_58: LONG $0x6f7ac1c4; WORD $0x2404 // vmovdqu (%r12), %xmm0 LONG $0xcf74f9c5 // vpcmpeqb %xmm7, %xmm0, %xmm1 LONG $0xd074b9c5 // vpcmpeqb %xmm0, %xmm8, %xmm2 @@ -3366,266 +3685,266 @@ LBB7_58: BSFL AX, AX LONG $0x7ef9e1c4; BYTE $0xc1 // vmovq %xmm0, %rcx CMPQ R13, AX - JGE LBB7_59 + JGE LBB8_59 CMPQ R13, $8 - LONG $0x55358d4c; WORD $0x00b1; BYTE $0x00 // leaq $45397(%rip), %r14 /* __HtmlQuoteTab(%rip) */ - JB LBB7_70 + LONG $0x7d358d4c; WORD $0x00b1; BYTE $0x00 // leaq $45437(%rip), %r14 /* __HtmlQuoteTab(%rip) */ + JB LBB8_70 MOVQ CX, 0(R8) LEAQ 8(R12), AX ADDQ $8, R8 LEAQ -8(R13), DI CMPQ DI, $4 - JAE LBB7_73 - JMP LBB7_74 + JAE LBB8_73 + JMP LBB8_74 -LBB7_41: +LBB8_41: MOVQ AX, R13 MOVQ R10, R14 - QUAD $0xfffffbc31d6ffec5 // vmovdqu $-1085(%rip), %ymm3 /* LCPI7_0(%rip) */ - QUAD $0xfffffbdb256ffec5 // vmovdqu $-1061(%rip), %ymm4 /* LCPI7_1(%rip) */ - QUAD $0xfffffbf32d6ffec5 // vmovdqu $-1037(%rip), %ymm5 /* LCPI7_2(%rip) */ - QUAD $0xfffffc0b356ffec5 // vmovdqu $-1013(%rip), %ymm6 /* LCPI7_3(%rip) */ + QUAD $0xfffffbc31d6ffec5 // vmovdqu $-1085(%rip), %ymm3 /* LCPI8_0(%rip) */ + QUAD $0xfffffbdb256ffec5 // vmovdqu $-1061(%rip), %ymm4 /* LCPI8_1(%rip) */ + QUAD $0xfffffbf32d6ffec5 // vmovdqu $-1037(%rip), %ymm5 /* LCPI8_2(%rip) */ + QUAD $0xfffffc0b356ffec5 // vmovdqu $-1013(%rip), %ymm6 /* LCPI8_3(%rip) */ TESTB CX, CX - JNE LBB7_58 + JNE LBB8_58 -LBB7_48: +LBB8_48: TESTQ R14, R14 - JLE LBB7_56 + JLE LBB8_56 TESTQ R13, R13 - JLE LBB7_56 + JLE LBB8_56 XORL CX, CX XORL AX, AX -LBB7_51: +LBB8_51: MOVBLZX 0(R12)(CX*1), DI CMPQ DI, $62 - JA LBB7_52 + JA LBB8_52 MOVQ $5764607797912141824, DX BTQ DI, DX - JB LBB7_80 + JB LBB8_80 -LBB7_52: +LBB8_52: CMPB DI, $-30 - JE LBB7_80 + JE LBB8_80 LEAQ 0(R14)(AX*1), DX MOVB DI, 0(R8)(CX*1) LEAQ -1(AX), DI CMPQ DX, $2 - JL LBB7_55 + JL LBB8_55 ADDQ R13, AX ADDQ $1, CX CMPQ AX, $1 MOVQ DI, AX - JG LBB7_51 + JG LBB8_51 -LBB7_55: +LBB8_55: SUBQ DI, R12 ADDQ DI, R14 -LBB7_56: +LBB8_56: TESTQ R14, R14 - JE LBB7_57 + JE LBB8_57 NOTQ R12 ADDQ R11, R12 - JMP LBB7_82 + JMP LBB8_82 -LBB7_15: +LBB8_15: MOVQ R12, R10 MOVQ R13, AX CMPQ AX, $8 - JB LBB7_19 + JB LBB8_19 -LBB7_18: +LBB8_18: MOVQ 0(R10), CX MOVQ CX, 0(R8) ADDQ $8, R10 ADDQ $8, R8 ADDQ $-8, AX -LBB7_19: +LBB8_19: CMPQ AX, $4 - JAE LBB7_20 + JAE LBB8_20 CMPQ AX, $2 - JAE LBB7_22 + JAE LBB8_22 -LBB7_23: +LBB8_23: TESTQ AX, AX - JE LBB7_25 + JE LBB8_25 -LBB7_24: +LBB8_24: MOVB 0(R10), AX MOVB AX, 0(R8) -LBB7_25: +LBB8_25: SUBQ R11, R12 ADDQ R13, R12 - JMP LBB7_83 + JMP LBB8_83 -LBB7_32: +LBB8_32: MOVL 0(R10), CX MOVL CX, 0(R8) ADDQ $4, R10 ADDQ $4, R8 ADDQ $-4, R14 CMPQ R14, $2 - JB LBB7_35 + JB LBB8_35 -LBB7_34: +LBB8_34: MOVWLZX 0(R10), CX MOVW CX, 0(R8) ADDQ $2, R10 ADDQ $2, R8 ADDQ $-2, R14 TESTQ R14, R14 - JNE LBB7_36 - JMP LBB7_37 + JNE LBB8_36 + JMP LBB8_37 -LBB7_20: +LBB8_20: MOVL 0(R10), CX MOVL CX, 0(R8) ADDQ $4, R10 ADDQ $4, R8 ADDQ $-4, AX CMPQ AX, $2 - JB LBB7_23 + JB LBB8_23 -LBB7_22: +LBB8_22: MOVWLZX 0(R10), CX MOVW CX, 0(R8) ADDQ $2, R10 ADDQ $2, R8 ADDQ $-2, AX TESTQ AX, AX - JNE LBB7_24 - JMP LBB7_25 + JNE LBB8_24 + JMP LBB8_25 -LBB7_59: +LBB8_59: CMPL AX, $8 - LONG $0xd4358d4c; WORD $0x00af; BYTE $0x00 // leaq $45012(%rip), %r14 /* __HtmlQuoteTab(%rip) */ - JB LBB7_60 + LONG $0xfc358d4c; WORD $0x00af; BYTE $0x00 // leaq $45052(%rip), %r14 /* __HtmlQuoteTab(%rip) */ + JB LBB8_60 MOVQ CX, 0(R8) LEAQ 8(R12), R10 ADDQ $8, R8 LEAQ -8(AX), DI CMPQ DI, $4 - JAE LBB7_63 - JMP LBB7_64 + JAE LBB8_63 + JMP LBB8_64 -LBB7_80: +LBB8_80: SUBQ R11, R12 SUBQ AX, R12 - JMP LBB7_82 + JMP LBB8_82 -LBB7_70: +LBB8_70: MOVQ R12, AX MOVQ R13, DI CMPQ DI, $4 - JB LBB7_74 + JB LBB8_74 -LBB7_73: +LBB8_73: MOVL 0(AX), CX MOVL CX, 0(R8) ADDQ $4, AX ADDQ $4, R8 ADDQ $-4, DI -LBB7_74: +LBB8_74: CMPQ DI, $2 - JAE LBB7_75 + JAE LBB8_75 TESTQ DI, DI - JE LBB7_78 + JE LBB8_78 -LBB7_77: +LBB8_77: MOVB 0(AX), AX MOVB AX, 0(R8) -LBB7_78: +LBB8_78: ADDQ R12, R13 NOTQ R13 ADDQ R11, R13 MOVQ R13, R12 - JMP LBB7_83 + JMP LBB8_83 -LBB7_60: +LBB8_60: MOVQ R12, R10 MOVQ AX, DI CMPQ DI, $4 - JB LBB7_64 + JB LBB8_64 -LBB7_63: +LBB8_63: MOVL 0(R10), CX MOVL CX, 0(R8) ADDQ $4, R10 ADDQ $4, R8 ADDQ $-4, DI -LBB7_64: +LBB8_64: CMPQ DI, $2 - JAE LBB7_65 + JAE LBB8_65 TESTQ DI, DI - JE LBB7_68 + JE LBB8_68 -LBB7_67: +LBB8_67: MOVB 0(R10), CX MOVB CX, 0(R8) -LBB7_68: +LBB8_68: SUBQ R11, R12 ADDQ AX, R12 - JMP LBB7_83 + JMP LBB8_83 -LBB7_75: +LBB8_75: MOVWLZX 0(AX), CX MOVW CX, 0(R8) ADDQ $2, AX ADDQ $2, R8 ADDQ $-2, DI TESTQ DI, DI - JNE LBB7_77 - JMP LBB7_78 + JNE LBB8_77 + JMP LBB8_78 -LBB7_65: +LBB8_65: MOVWLZX 0(R10), CX MOVW CX, 0(R8) ADDQ $2, R10 ADDQ $2, R8 ADDQ $-2, DI TESTQ DI, DI - JNE LBB7_67 - JMP LBB7_68 + JNE LBB8_67 + JMP LBB8_68 -LBB7_57: +LBB8_57: SUBQ R11, R12 -LBB7_82: - LONG $0xfe358d4c; WORD $0x00ae; BYTE $0x00 // leaq $44798(%rip), %r14 /* __HtmlQuoteTab(%rip) */ +LBB8_82: + LONG $0x26358d4c; WORD $0x00af; BYTE $0x00 // leaq $44838(%rip), %r14 /* __HtmlQuoteTab(%rip) */ -LBB7_83: +LBB8_83: TESTQ R12, R12 - JS LBB7_84 + JS LBB8_84 ADDQ R12, R11 ADDQ R12, R15 SUBQ R12, SI - JLE LBB7_86 + JLE LBB8_86 SUBQ R12, R9 MOVB 0(R11), CX CMPB CX, $-30 - JE LBB7_89 + JE LBB8_89 MOVQ R11, AX -LBB7_93: +LBB8_93: MOVBLZX CX, DI SHLQ $4, DI MOVQ 0(DI)(R14*1), DX MOVLQSX DX, BX SUBQ BX, R9 - JL LBB7_94 + JL LBB8_94 SHLQ $32, DX LEAQ 0(DI)(R14*1), R8 ADDQ $8, R8 MOVQ $12884901889, CX CMPQ DX, CX - JL LBB7_98 + JL LBB8_98 MOVL 0(R8), CX MOVL CX, 0(R15) LEAQ 0(DI)(R14*1), R8 @@ -3633,64 +3952,64 @@ LBB7_93: LEAQ 4(R15), R10 LEAQ -4(BX), DI CMPQ DI, $2 - JGE LBB7_101 - JMP LBB7_102 + JGE LBB8_101 + JMP LBB8_102 -LBB7_98: +LBB8_98: MOVQ R15, R10 MOVQ BX, DI CMPQ DI, $2 - JL LBB7_102 + JL LBB8_102 -LBB7_101: +LBB8_101: MOVWLZX 0(R8), DX MOVW DX, 0(R10) ADDQ $2, R8 ADDQ $2, R10 ADDQ $-2, DI -LBB7_102: +LBB8_102: TESTQ DI, DI - JLE LBB7_104 + JLE LBB8_104 MOVB 0(R8), CX MOVB CX, 0(R10) -LBB7_104: +LBB8_104: ADDQ BX, R15 -LBB7_105: +LBB8_105: ADDQ $1, AX LEAQ -1(SI), CX MOVQ AX, R11 CMPQ SI, $1 MOVQ CX, SI - JG LBB7_2 - JMP LBB7_106 + JG LBB8_2 + JMP LBB8_106 -LBB7_89: +LBB8_89: CMPQ SI, $3 - JL LBB7_95 + JL LBB8_95 CMPB 1(R11), $-128 - JNE LBB7_95 + JNE LBB8_95 MOVB 2(R11), CX MOVL CX, AX ANDB $-2, AX CMPB AX, $-88 - JNE LBB7_95 + JNE LBB8_95 LEAQ 2(R11), AX ADDQ $-2, SI - JMP LBB7_93 + JMP LBB8_93 -LBB7_95: +LBB8_95: TESTQ R9, R9 - JLE LBB7_3 + JLE LBB8_3 MOVB $-30, 0(R15) ADDQ $1, R15 ADDQ $-1, R9 MOVQ R11, AX - JMP LBB7_105 + JMP LBB8_105 -LBB7_84: +LBB8_84: MOVQ -56(BP), CX ADDQ R12, CX NOTQ CX @@ -3700,29 +4019,29 @@ LBB7_84: MOVQ -48(BP), AX SUBQ R11, AX ADDQ R12, AX - JMP LBB7_107 + JMP LBB8_107 -LBB7_86: +LBB8_86: MOVQ R11, AX -LBB7_106: +LBB8_106: SUBQ -56(BP), R15 MOVQ -64(BP), CX MOVQ R15, 0(CX) SUBQ -48(BP), AX - JMP LBB7_107 + JMP LBB8_107 -LBB7_94: +LBB8_94: SUBQ -56(BP), R15 MOVQ -64(BP), AX MOVQ R15, 0(AX) -LBB7_3: +LBB8_3: NOTQ R11 ADDQ -48(BP), R11 MOVQ R11, AX -LBB7_107: +LBB8_107: ADDQ $24, SP BYTE $0x5b // popq %rbx WORD $0x5c41 // popq %r12 @@ -3741,35 +4060,35 @@ _atof_eisel_lemire64: BYTE $0x53 // pushq %rbx LEAL 348(SI), AX CMPL AX, $695 - JA LBB8_1 + JA LBB9_1 MOVQ CX, R8 MOVL DX, R9 TESTQ DI, DI - JE LBB8_4 + JE LBB9_4 BSRQ DI, R10 XORQ $63, R10 - JMP LBB8_5 + JMP LBB9_5 -LBB8_4: +LBB9_4: MOVL $64, R10 -LBB8_5: +LBB9_5: MOVL R10, CX SHLQ CX, DI MOVL AX, CX SHLQ $4, CX - LONG $0x853d8d4c; WORD $0x0035; BYTE $0x00 // leaq $13701(%rip), %r15 /* _POW10_M128_TAB(%rip) */ + LONG $0x9c3d8d4c; WORD $0x0035; BYTE $0x00 // leaq $13724(%rip), %r15 /* _POW10_M128_TAB(%rip) */ MOVQ DI, AX MULQ 8(CX)(R15*1) MOVQ AX, R11 MOVQ DX, R14 ANDL $511, DX CMPQ DX, $511 - JNE LBB8_11 + JNE LBB9_11 MOVQ DI, BX NOTQ BX CMPQ R11, BX - JBE LBB8_11 + JBE LBB9_11 MOVQ DI, AX MULQ 0(CX)(R15*1) ADDQ DX, R11 @@ -3777,27 +4096,27 @@ LBB8_5: MOVL R14, DX ANDL $511, DX CMPQ DX, $511 - JNE LBB8_11 + JNE LBB9_11 CMPQ R11, $-1 - JNE LBB8_11 + JNE LBB9_11 CMPQ AX, BX - JA LBB8_1 + JA LBB9_1 -LBB8_11: +LBB9_11: MOVQ R14, AX SHRQ $63, AX LEAL 9(AX), CX SHRQ CX, R14 TESTQ R11, R11 - JNE LBB8_15 + JNE LBB9_15 TESTQ DX, DX - JNE LBB8_15 + JNE LBB9_15 MOVL R14, CX ANDL $3, CX CMPL CX, $1 - JE LBB8_1 + JE LBB9_1 -LBB8_15: +LBB9_15: LONG $0x526ace69; WORD $0x0003 // imull $217706, %esi, %ecx SARL $16, CX ADDL $1087, CX @@ -3813,19 +4132,19 @@ LBB8_15: SBBQ $0, AX LEAQ -1(AX), SI CMPQ SI, $2045 - JBE LBB8_17 + JBE LBB9_17 -LBB8_1: +LBB9_1: XORL AX, AX -LBB8_18: +LBB9_18: BYTE $0x5b // popq %rbx WORD $0x5e41 // popq %r14 WORD $0x5f41 // popq %r15 BYTE $0x5d // popq %rbp RET -LBB8_17: +LBB9_17: CMPQ CX, $1 MOVB $2, CX SBBB $0, CX @@ -3840,21 +4159,21 @@ LBB8_17: LONG $0xc1450f48 // cmovneq %rcx, %rax MOVQ AX, 0(R8) MOVB $1, AX - JMP LBB8_18 + JMP LBB9_18 -LCPI9_0: +LCPI10_0: QUAD $0x0000000000000000 // .space 8, '\x00\x00\x00\x00\x00\x00\x00\x00' QUAD $0x0000000000000001 // .quad 1 QUAD $0x0000000000000001 // .quad 1 QUAD $0x0000000000000001 // .quad 1 -LCPI9_1: +LCPI10_1: QUAD $0x0000000000000001 // .quad 1 -LCPI9_2: +LCPI10_2: QUAD $0x0000000000002710 // .quad 10000 -LCPI9_3: +LCPI10_3: QUAD $0x000000000000000a // .quad 10 _decimal_to_f64: @@ -3870,150 +4189,150 @@ _decimal_to_f64: MOVQ DI, R15 MOVQ $4503599627370496, R13 CMPL 16(DI), $0 - JE LBB9_4 + JE LBB10_4 MOVQ $9218868437227405312, R14 MOVL 20(R15), AX XORL R12, R12 CMPL AX, $310 - JG LBB9_69 + JG LBB10_69 CMPL AX, $-330 - JGE LBB9_5 + JGE LBB10_5 XORL R14, R14 - JMP LBB9_69 + JMP LBB10_69 -LBB9_4: +LBB10_4: XORL R14, R14 XORL R12, R12 - JMP LBB9_69 + JMP LBB10_69 -LBB9_5: +LBB10_5: TESTL AX, AX MOVQ BX, -48(BP) - JLE LBB9_12 + JLE LBB10_12 XORL R12, R12 - LONG $0x7a358d4c; WORD $0x005f; BYTE $0x00 // leaq $24442(%rip), %r14 /* _POW_TAB(%rip) */ - JMP LBB9_8 + LONG $0x91358d4c; WORD $0x005f; BYTE $0x00 // leaq $24465(%rip), %r14 /* _POW_TAB(%rip) */ + JMP LBB10_8 -LBB9_10: +LBB10_10: MOVL AX, AX MOVL 0(R14)(AX*4), BX CMPL 16(R15), $0 - JE LBB9_7 + JE LBB10_7 -LBB9_11: +LBB10_11: MOVQ R15, DI MOVL BX, SI - LONG $0x003267e8; BYTE $0x00 // callq _right_shift + LONG $0x00327ee8; BYTE $0x00 // callq _right_shift -LBB9_7: +LBB10_7: ADDL BX, R12 MOVL 20(R15), AX TESTL AX, AX - JLE LBB9_12 + JLE LBB10_12 -LBB9_8: +LBB10_8: CMPL AX, $8 - JLE LBB9_10 + JLE LBB10_10 MOVL $27, BX CMPL 16(R15), $0 - JNE LBB9_11 - JMP LBB9_7 + JNE LBB10_11 + JMP LBB10_7 -LBB9_12: - LONG $0x3c358d4c; WORD $0x005f; BYTE $0x00 // leaq $24380(%rip), %r14 /* _POW_TAB(%rip) */ - JMP LBB9_14 +LBB10_12: + LONG $0x53358d4c; WORD $0x005f; BYTE $0x00 // leaq $24403(%rip), %r14 /* _POW_TAB(%rip) */ + JMP LBB10_14 -LBB9_18: +LBB10_18: MOVL $27, BX CMPL 16(R15), $0 - JE LBB9_13 + JE LBB10_13 -LBB9_20: +LBB10_20: MOVQ R15, DI MOVL BX, SI - LONG $0x00308be8; BYTE $0x00 // callq _left_shift + LONG $0x0030a2e8; BYTE $0x00 // callq _left_shift MOVL 20(R15), AX -LBB9_13: +LBB10_13: SUBL BX, R12 -LBB9_14: +LBB10_14: TESTL AX, AX - JS LBB9_17 - JNE LBB9_21 + JS LBB10_17 + JNE LBB10_21 MOVQ 0(R15), CX CMPB 0(CX), $53 - JL LBB9_19 - JMP LBB9_21 + JL LBB10_19 + JMP LBB10_21 -LBB9_17: +LBB10_17: CMPL AX, $-8 - JL LBB9_18 + JL LBB10_18 -LBB9_19: +LBB10_19: MOVL AX, CX NEGL CX MOVL 0(R14)(CX*4), BX CMPL 16(R15), $0 - JNE LBB9_20 - JMP LBB9_13 + JNE LBB10_20 + JMP LBB10_13 -LBB9_21: +LBB10_21: CMPL R12, $-1022 - JG LBB9_27 + JG LBB10_27 CMPL 16(R15), $0 MOVQ -48(BP), BX - JE LBB9_29 + JE LBB10_29 CMPL R12, $-1082 - JG LBB9_30 + JG LBB10_30 ADDL $961, R12 -LBB9_25: +LBB10_25: MOVQ R15, DI MOVL $60, SI - LONG $0x0031cce8; BYTE $0x00 // callq _right_shift + LONG $0x0031e3e8; BYTE $0x00 // callq _right_shift ADDL $60, R12 CMPL R12, $-120 - JL LBB9_25 + JL LBB10_25 ADDL $60, R12 - JMP LBB9_31 + JMP LBB10_31 -LBB9_27: +LBB10_27: CMPL R12, $1024 MOVQ -48(BP), BX - JG LBB9_66 + JG LBB10_66 ADDL $-1, R12 MOVL R12, R14 - JMP LBB9_32 + JMP LBB10_32 -LBB9_29: +LBB10_29: MOVL $-1022, R14 - JMP LBB9_34 + JMP LBB10_34 -LBB9_30: +LBB10_30: ADDL $1021, R12 -LBB9_31: +LBB10_31: NEGL R12 MOVQ R15, DI MOVL R12, SI - LONG $0x003185e8; BYTE $0x00 // callq _right_shift + LONG $0x00319ce8; BYTE $0x00 // callq _right_shift MOVL $-1022, R14 -LBB9_32: +LBB10_32: CMPL 16(R15), $0 - JE LBB9_34 + JE LBB10_34 MOVQ R15, DI MOVL $53, SI - LONG $0x002fcce8; BYTE $0x00 // callq _left_shift + LONG $0x002fe3e8; BYTE $0x00 // callq _left_shift -LBB9_34: +LBB10_34: MOVL 20(R15), AX MOVQ $-1, R12 CMPL AX, $20 - JG LBB9_68 + JG LBB10_68 TESTL AX, AX - JLE LBB9_40 + JLE LBB10_40 MOVL 16(R15), DX XORL SI, SI TESTL DX, DX @@ -4024,9 +4343,9 @@ LBB9_34: LEAL 1(R9), R8 XORL R12, R12 -LBB9_37: +LBB10_37: CMPQ DX, SI - JE LBB9_41 + JE LBB10_41 LEAQ 0(R12)(R12*4), DI MOVQ 0(R15), CX MOVBQSX 0(CX)(SI*1), CX @@ -4034,26 +4353,26 @@ LBB9_37: ADDQ $-48, R12 ADDQ $1, SI CMPQ AX, SI - JNE LBB9_37 + JNE LBB10_37 MOVL R8, R9 - JMP LBB9_41 + JMP LBB10_41 -LBB9_40: +LBB10_40: XORL R9, R9 XORL R12, R12 -LBB9_41: +LBB10_41: CMPL AX, R9 - JLE LBB9_54 + JLE LBB10_54 MOVL AX, DX SUBL R9, DX CMPL DX, $16 - JB LBB9_52 + JB LBB10_52 MOVL DX, R8 - QUAD $0xfffffdc3056ffac5 // vmovdqu $-573(%rip), %xmm0 /* LCPI9_0(%rip) */ + QUAD $0xfffffdc3056ffac5 // vmovdqu $-573(%rip), %xmm0 /* LCPI10_0(%rip) */ LONG $0x22f9c3c4; WORD $0x00c4 // vpinsrq $0, %r12, %xmm0, %xmm0 ANDL $-16, R8 - QUAD $0xfffdaf05027de3c4; WORD $0xf0ff // vpblendd $240, $-593(%rip), %ymm0, %ymm0 /* LCPI9_0(%rip) */ + QUAD $0xfffdaf05027de3c4; WORD $0xf0ff // vpblendd $240, $-593(%rip), %ymm0, %ymm0 /* LCPI10_0(%rip) */ LEAL -16(R8), CX MOVL CX, DI SHRL $4, DI @@ -4061,21 +4380,21 @@ LBB9_41: MOVL DI, SI ANDL $3, SI CMPL CX, $48 - JAE LBB9_45 - QUAD $0xfffdb015597de2c4; BYTE $0xff // vpbroadcastq $-592(%rip), %ymm2 /* LCPI9_1(%rip) */ + JAE LBB10_45 + QUAD $0xfffdb015597de2c4; BYTE $0xff // vpbroadcastq $-592(%rip), %ymm2 /* LCPI10_1(%rip) */ LONG $0xda6ffdc5 // vmovdqa %ymm2, %ymm3 LONG $0xca6ffdc5 // vmovdqa %ymm2, %ymm1 - JMP LBB9_47 + JMP LBB10_47 -LBB9_45: +LBB10_45: ANDL $-4, DI NEGL DI - QUAD $0xfffd9815597de2c4; BYTE $0xff // vpbroadcastq $-616(%rip), %ymm2 /* LCPI9_1(%rip) */ - QUAD $0xfffd9725597de2c4; BYTE $0xff // vpbroadcastq $-617(%rip), %ymm4 /* LCPI9_2(%rip) */ + QUAD $0xfffd9815597de2c4; BYTE $0xff // vpbroadcastq $-616(%rip), %ymm2 /* LCPI10_1(%rip) */ + QUAD $0xfffd9725597de2c4; BYTE $0xff // vpbroadcastq $-617(%rip), %ymm4 /* LCPI10_2(%rip) */ LONG $0xda6ffdc5 // vmovdqa %ymm2, %ymm3 LONG $0xca6ffdc5 // vmovdqa %ymm2, %ymm1 -LBB9_46: +LBB10_46: LONG $0xecf4fdc5 // vpmuludq %ymm4, %ymm0, %ymm5 LONG $0xd073fdc5; BYTE $0x20 // vpsrlq $32, %ymm0, %ymm0 LONG $0xc4f4fdc5 // vpmuludq %ymm4, %ymm0, %ymm0 @@ -4097,15 +4416,15 @@ LBB9_46: LONG $0xf173f5c5; BYTE $0x20 // vpsllq $32, %ymm1, %ymm1 LONG $0xc9d4d5c5 // vpaddq %ymm1, %ymm5, %ymm1 ADDL $4, DI - JNE LBB9_46 + JNE LBB10_46 -LBB9_47: +LBB10_47: TESTL SI, SI - JE LBB9_50 + JE LBB10_50 NEGL SI - QUAD $0xfffd2b25597de2c4; BYTE $0xff // vpbroadcastq $-725(%rip), %ymm4 /* LCPI9_3(%rip) */ + QUAD $0xfffd2b25597de2c4; BYTE $0xff // vpbroadcastq $-725(%rip), %ymm4 /* LCPI10_3(%rip) */ -LBB9_49: +LBB10_49: LONG $0xecf4fdc5 // vpmuludq %ymm4, %ymm0, %ymm5 LONG $0xd073fdc5; BYTE $0x20 // vpsrlq $32, %ymm0, %ymm0 LONG $0xc4f4fdc5 // vpmuludq %ymm4, %ymm0, %ymm0 @@ -4127,9 +4446,9 @@ LBB9_49: LONG $0xf173f5c5; BYTE $0x20 // vpsllq $32, %ymm1, %ymm1 LONG $0xc9d4d5c5 // vpaddq %ymm1, %ymm5, %ymm1 INCL SI - JNE LBB9_49 + JNE LBB10_49 -LBB9_50: +LBB10_50: LONG $0xd273ddc5; BYTE $0x20 // vpsrlq $32, %ymm2, %ymm4 LONG $0xe0f4ddc5 // vpmuludq %ymm0, %ymm4, %ymm4 LONG $0xd073d5c5; BYTE $0x20 // vpsrlq $32, %ymm0, %ymm5 @@ -4174,64 +4493,64 @@ LBB9_50: LONG $0xc2d4f9c5 // vpaddq %xmm2, %xmm0, %xmm0 LONG $0x7ef9c1c4; BYTE $0xc4 // vmovq %xmm0, %r12 CMPL DX, R8 - JE LBB9_54 + JE LBB10_54 ADDL R8, R9 -LBB9_52: +LBB10_52: MOVL AX, DX SUBL R9, DX -LBB9_53: +LBB10_53: ADDQ R12, R12 LEAQ 0(R12)(R12*4), R12 ADDL $-1, DX - JNE LBB9_53 + JNE LBB10_53 -LBB9_54: +LBB10_54: TESTL AX, AX - JS LBB9_62 + JS LBB10_62 MOVL 16(R15), CX CMPL CX, AX - JLE LBB9_62 + JLE LBB10_62 MOVQ 0(R15), SI MOVB 0(SI)(AX*1), DX CMPB DX, $53 - JNE LBB9_63 + JNE LBB10_63 LEAL 1(AX), DI CMPL DI, CX - JNE LBB9_63 + JNE LBB10_63 CMPL 28(R15), $0 SETNE CX - JNE LBB9_64 + JNE LBB10_64 TESTL AX, AX - JLE LBB9_64 + JLE LBB10_64 ADDL $-1, AX MOVB 0(SI)(AX*1), CX ANDB $1, CX - JMP LBB9_64 + JMP LBB10_64 -LBB9_62: +LBB10_62: XORL CX, CX -LBB9_64: +LBB10_64: MOVBLZX CX, AX ADDQ AX, R12 MOVQ $9007199254740992, AX CMPQ R12, AX - JNE LBB9_68 + JNE LBB10_68 CMPL R14, $1022 - JLE LBB9_67 + JLE LBB10_67 -LBB9_66: +LBB10_66: XORL R12, R12 MOVQ $9218868437227405312, R14 - JMP LBB9_69 + JMP LBB10_69 -LBB9_67: +LBB10_67: ADDL $1, R14 MOVQ R13, R12 -LBB9_68: +LBB10_68: MOVQ R12, AX ANDQ R13, AX ADDL $1023, R14 @@ -4240,7 +4559,7 @@ LBB9_68: TESTQ AX, AX LONG $0xf0440f4c // cmoveq %rax, %r14 -LBB9_69: +LBB10_69: ADDQ $-1, R13 ANDQ R12, R13 ORQ R14, R13 @@ -4260,10 +4579,10 @@ LBB9_69: WORD $0xf8c5; BYTE $0x77 // vzeroupper RET -LBB9_63: +LBB10_63: CMPB DX, $53 SETGE CX - JMP LBB9_64 + JMP LBB10_64 _atof_native: BYTE $0x55 // pushq %rbp @@ -4273,173 +4592,173 @@ _atof_native: MOVQ DX, -32(BP) MOVQ CX, -24(BP) TESTQ CX, CX - JE LBB10_5 + JE LBB11_5 MOVB $0, 0(DX) CMPQ CX, $1 - JE LBB10_5 + JE LBB11_5 MOVB $0, 1(DX) CMPQ -24(BP), $3 - JB LBB10_5 + JB LBB11_5 MOVL $2, AX -LBB10_4: +LBB11_4: MOVQ -32(BP), CX MOVB $0, 0(CX)(AX*1) ADDQ $1, AX CMPQ -24(BP), AX - JA LBB10_4 + JA LBB11_4 -LBB10_5: +LBB11_5: LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 LONG $0x4511f8c5; BYTE $0xf0 // vmovups %xmm0, $-16(%rbp) XORL DX, DX CMPB 0(DI), $45 - JNE LBB10_7 + JNE LBB11_7 MOVL $1, -8(BP) MOVL $1, AX CMPQ AX, SI - JL LBB10_8 - JMP LBB10_39 + JL LBB11_8 + JMP LBB11_39 -LBB10_7: +LBB11_7: XORL AX, AX CMPQ AX, SI - JGE LBB10_39 + JGE LBB11_39 -LBB10_8: +LBB11_8: MOVB $1, R11 XORL R9, R9 XORL R10, R10 XORL R8, R8 - JMP LBB10_12 + JMP LBB11_12 -LBB10_20: +LBB11_20: MOVL $1, -4(BP) -LBB10_11: +LBB11_11: ADDQ $1, AX CMPQ AX, SI SETLT R11 CMPQ SI, AX - JE LBB10_22 + JE LBB11_22 -LBB10_12: +LBB11_12: MOVBLZX 0(DI)(AX*1), CX LEAL -48(CX), DX CMPB DX, $9 - JA LBB10_17 + JA LBB11_17 CMPB CX, $48 - JNE LBB10_19 + JNE LBB11_19 TESTL R10, R10 - JE LBB10_21 + JE LBB11_21 MOVLQSX R9, R11 CMPQ -24(BP), R11 - JA LBB10_9 - JMP LBB10_10 + JA LBB11_9 + JMP LBB11_10 -LBB10_17: +LBB11_17: CMPB CX, $46 - JNE LBB10_23 + JNE LBB11_23 MOVL R10, -12(BP) MOVL $1, R8 - JMP LBB10_11 + JMP LBB11_11 -LBB10_19: +LBB11_19: MOVLQSX R10, R11 CMPQ -24(BP), R11 - JBE LBB10_20 + JBE LBB11_20 -LBB10_9: +LBB11_9: MOVQ -32(BP), DX MOVB CX, 0(DX)(R11*1) MOVL -16(BP), R9 ADDL $1, R9 MOVL R9, -16(BP) -LBB10_10: +LBB11_10: MOVL R9, R10 - JMP LBB10_11 + JMP LBB11_11 -LBB10_21: +LBB11_21: ADDL $-1, -12(BP) XORL R10, R10 - JMP LBB10_11 + JMP LBB11_11 -LBB10_22: +LBB11_22: MOVQ SI, AX -LBB10_23: +LBB11_23: TESTL R8, R8 - JE LBB10_25 + JE LBB11_25 TESTB $1, R11 - JNE LBB10_26 - JMP LBB10_40 + JNE LBB11_26 + JMP LBB11_40 -LBB10_25: +LBB11_25: MOVL R9, -12(BP) TESTB $1, R11 - JE LBB10_40 + JE LBB11_40 -LBB10_26: +LBB11_26: MOVL AX, DX MOVB 0(DI)(DX*1), CX ORB $32, CX CMPB CX, $101 - JNE LBB10_40 + JNE LBB11_40 MOVB 1(DI)(DX*1), CX CMPB CX, $45 - JE LBB10_30 + JE LBB11_30 MOVL $1, R8 CMPB CX, $43 - JNE LBB10_32 + JNE LBB11_32 ADDL $2, AX - JMP LBB10_31 + JMP LBB11_31 -LBB10_30: +LBB11_30: ADDL $2, AX MOVL $-1, R8 -LBB10_31: +LBB11_31: MOVL AX, DX MOVLQSX DX, AX XORL DX, DX CMPQ AX, SI - JL LBB10_33 - JMP LBB10_38 + JL LBB11_33 + JMP LBB11_38 -LBB10_32: +LBB11_32: ADDQ $1, DX MOVLQSX DX, AX XORL DX, DX CMPQ AX, SI - JGE LBB10_38 + JGE LBB11_38 -LBB10_33: +LBB11_33: XORL DX, DX -LBB10_34: +LBB11_34: MOVBLSX 0(DI)(AX*1), CX CMPL CX, $48 - JL LBB10_38 + JL LBB11_38 CMPB CX, $57 - JG LBB10_38 + JG LBB11_38 CMPL DX, $9999 - JG LBB10_38 + JG LBB11_38 LEAL 0(DX)(DX*4), DX LEAL 0(CX)(DX*2), DX ADDL $-48, DX ADDQ $1, AX CMPQ SI, AX - JNE LBB10_34 + JNE LBB11_34 -LBB10_38: +LBB11_38: IMULL R8, DX ADDL -12(BP), DX -LBB10_39: +LBB11_39: MOVL DX, -12(BP) -LBB10_40: +LBB11_40: LEAQ -32(BP), DI LEAQ -40(BP), SI LONG $0xfff985e8; BYTE $0xff // callq _decimal_to_f64 @@ -4469,68 +4788,68 @@ _value: LONG $0x0005cee8; BYTE $0x00 // callq _advance_ns MOVBLSX AX, AX CMPL AX, $125 - JA LBB11_11 - LONG $0x030d8d48; WORD $0x0003; BYTE $0x00 // leaq $771(%rip), %rcx /* LJTI11_0(%rip) */ + JA LBB12_11 + LONG $0x030d8d48; WORD $0x0003; BYTE $0x00 // leaq $771(%rip), %rcx /* LJTI12_0(%rip) */ MOVLQSX 0(CX)(AX*4), AX ADDQ CX, AX JMP AX -LBB11_2: +LBB12_2: MOVQ R14, -56(BP) MOVQ -48(BP), R14 LEAQ -1(R14), R13 MOVQ R13, -48(BP) TESTB $2, BX - JNE LBB11_4 + JNE LBB12_4 LEAQ -80(BP), DI LEAQ -48(BP), SI MOVQ -56(BP), DX LONG $0x000b80e8; BYTE $0x00 // callq _vnumber MOVQ -48(BP), BX - JMP LBB11_49 + JMP LBB12_49 -LBB11_4: +LBB12_4: XORL AX, AX CMPB 0(R15)(R13*1), $45 LEAQ 0(R15)(R13*1), BX SETEQ AX ADDQ AX, BX SUBQ AX, R12 - JE LBB11_44 + JE LBB12_44 CMPQ R13, R12 - JAE LBB11_7 + JAE LBB12_7 MOVB 0(BX), AX ADDB $-48, AX CMPB AX, $9 - JA LBB11_46 + JA LBB12_46 -LBB11_7: +LBB12_7: MOVQ BX, DI MOVQ R12, SI LONG $0x0021d2e8; BYTE $0x00 // callq _do_skip_number TESTQ AX, AX - JS LBB11_45 + JS LBB12_45 ADDQ AX, BX SUBQ R15, BX TESTQ R14, R14 - JLE LBB11_48 + JLE LBB12_48 MOVQ -56(BP), AX MOVQ $8, 0(AX) MOVQ R13, 24(AX) - JMP LBB11_49 + JMP LBB12_49 -LBB11_10: +LBB12_10: MOVQ $1, 0(R14) MOVQ -48(BP), BX - JMP LBB11_49 + JMP LBB12_49 -LBB11_11: +LBB12_11: MOVQ $-2, 0(R14) MOVQ -48(BP), BX ADDQ $-1, BX - JMP LBB11_49 + JMP LBB12_49 -LBB11_12: +LBB12_12: MOVQ $-1, -64(BP) MOVQ -48(BP), R15 LEAQ -80(BP), DI @@ -4539,7 +4858,7 @@ LBB11_12: LONG $0x0007fce8; BYTE $0x00 // callq _advance_string MOVQ AX, BX TESTQ AX, AX - JS LBB11_33 + JS LBB12_33 MOVQ BX, -48(BP) MOVQ R15, 16(R14) MOVQ -64(BP), AX @@ -4549,176 +4868,176 @@ LBB11_12: MOVQ CX, 24(R14) MOVL $7, AX MOVQ AX, 0(R14) - JMP LBB11_49 + JMP LBB12_49 -LBB11_14: +LBB12_14: TESTL BX, BX MOVQ $-2, AX MOVL $11, CX - JMP LBB11_32 + JMP LBB12_32 -LBB11_15: +LBB12_15: TESTL BX, BX MOVQ $-2, AX MOVL $10, CX - JMP LBB11_32 + JMP LBB12_32 -LBB11_16: +LBB12_16: MOVQ $5, 0(R14) MOVQ -48(BP), BX - JMP LBB11_49 + JMP LBB12_49 -LBB11_17: +LBB12_17: TESTL BX, BX MOVQ $-2, AX MOVL $12, CX - JMP LBB11_32 + JMP LBB12_32 -LBB11_18: +LBB12_18: MOVQ -48(BP), BX LEAQ -4(R12), CX MOVQ $-1, AX CMPQ BX, CX - JA LBB11_25 + JA LBB12_25 MOVL 0(R15)(BX*1), CX CMPL CX, $1702063201 - JNE LBB11_34 + JNE LBB12_34 ADDQ $4, BX MOVL $4, AX - JMP LBB11_24 + JMP LBB12_24 -LBB11_21: +LBB12_21: MOVQ -48(BP), BX LEAQ -3(R12), CX MOVQ $-1, AX CMPQ BX, CX - JA LBB11_25 + JA LBB12_25 MOVL -1(R15)(BX*1), CX CMPL CX, $1819047278 - JNE LBB11_37 + JNE LBB12_37 ADDQ $3, BX MOVL $2, AX - JMP LBB11_24 + JMP LBB12_24 -LBB11_27: +LBB12_27: MOVQ -48(BP), BX LEAQ -3(R12), CX MOVQ $-1, AX CMPQ BX, CX - JA LBB11_25 + JA LBB12_25 MOVL -1(R15)(BX*1), CX CMPL CX, $1702195828 - JNE LBB11_41 + JNE LBB12_41 ADDQ $3, BX MOVL $3, AX -LBB11_24: +LBB12_24: MOVQ BX, R12 -LBB11_25: +LBB12_25: MOVQ R12, -48(BP) MOVQ R12, BX -LBB11_26: +LBB12_26: MOVQ AX, 0(R14) - JMP LBB11_49 + JMP LBB12_49 -LBB11_30: +LBB12_30: MOVQ $6, 0(R14) MOVQ -48(BP), BX - JMP LBB11_49 + JMP LBB12_49 -LBB11_31: +LBB12_31: TESTL BX, BX MOVQ $-2, AX MOVL $13, CX -LBB11_32: +LBB12_32: LONG $0xc8490f48 // cmovnsq %rax, %rcx MOVQ CX, 0(R14) SARL $31, BX NOTL BX MOVLQSX BX, BX ADDQ -48(BP), BX - JMP LBB11_49 + JMP LBB12_49 -LBB11_33: +LBB12_33: MOVQ R12, -48(BP) MOVQ BX, 0(R14) MOVQ R12, BX - JMP LBB11_49 + JMP LBB12_49 -LBB11_34: +LBB12_34: MOVQ $-2, AX CMPB CX, $97 - JNE LBB11_26 + JNE LBB12_26 MOVL $1702063201, CX -LBB11_36: +LBB12_36: SHRL $8, CX MOVBLSX 1(R15)(BX*1), DX ADDQ $1, BX MOVBLZX CX, SI CMPL SI, DX - JE LBB11_36 - JMP LBB11_40 + JE LBB12_36 + JMP LBB12_40 -LBB11_37: +LBB12_37: ADDQ $-1, BX MOVQ $-2, AX CMPB CX, $110 - JNE LBB11_26 + JNE LBB12_26 MOVL $1819047278, CX -LBB11_39: +LBB12_39: SHRL $8, CX MOVBLSX 1(R15)(BX*1), DX ADDQ $1, BX MOVBLZX CX, SI CMPL SI, DX - JE LBB11_39 - JMP LBB11_40 + JE LBB12_39 + JMP LBB12_40 -LBB11_41: +LBB12_41: ADDQ $-1, BX MOVQ $-2, AX CMPB CX, $116 - JNE LBB11_26 + JNE LBB12_26 MOVL $1702195828, CX -LBB11_43: +LBB12_43: SHRL $8, CX MOVBLSX 1(R15)(BX*1), DX ADDQ $1, BX MOVBLZX CX, SI CMPL SI, DX - JE LBB11_43 + JE LBB12_43 -LBB11_40: +LBB12_40: MOVQ BX, -48(BP) MOVQ AX, 0(R14) - JMP LBB11_49 + JMP LBB12_49 -LBB11_44: +LBB12_44: MOVQ $-1, R13 - JMP LBB11_47 + JMP LBB12_47 -LBB11_45: +LBB12_45: NOTQ AX ADDQ AX, BX -LBB11_46: +LBB12_46: MOVQ $-2, R13 -LBB11_47: +LBB12_47: SUBQ R15, BX MOVQ BX, -48(BP) -LBB11_48: +LBB12_48: MOVQ -56(BP), AX MOVQ R13, 0(AX) -LBB11_49: +LBB12_49: MOVQ BX, AX ADDQ $40, SP BYTE $0x5b // popq %rbx @@ -4729,173 +5048,173 @@ LBB11_49: BYTE $0x5d // popq %rbp RET -// .set L11_0_set_10, LBB11_10-LJTI11_0 -// .set L11_0_set_11, LBB11_11-LJTI11_0 -// .set L11_0_set_12, LBB11_12-LJTI11_0 -// .set L11_0_set_14, LBB11_14-LJTI11_0 -// .set L11_0_set_2, LBB11_2-LJTI11_0 -// .set L11_0_set_15, LBB11_15-LJTI11_0 -// .set L11_0_set_16, LBB11_16-LJTI11_0 -// .set L11_0_set_17, LBB11_17-LJTI11_0 -// .set L11_0_set_18, LBB11_18-LJTI11_0 -// .set L11_0_set_21, LBB11_21-LJTI11_0 -// .set L11_0_set_27, LBB11_27-LJTI11_0 -// .set L11_0_set_30, LBB11_30-LJTI11_0 -// .set L11_0_set_31, LBB11_31-LJTI11_0 -LJTI11_0: - LONG $0xfffffd97 // .long L11_0_set_10 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffdbb // .long L11_0_set_12 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffe0e // .long L11_0_set_14 - LONG $0xfffffd06 // .long L11_0_set_2 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffd06 // .long L11_0_set_2 - LONG $0xfffffd06 // .long L11_0_set_2 - LONG $0xfffffd06 // .long L11_0_set_2 - LONG $0xfffffd06 // .long L11_0_set_2 - LONG $0xfffffd06 // .long L11_0_set_2 - LONG $0xfffffd06 // .long L11_0_set_2 - LONG $0xfffffd06 // .long L11_0_set_2 - LONG $0xfffffd06 // .long L11_0_set_2 - LONG $0xfffffd06 // .long L11_0_set_2 - LONG $0xfffffd06 // .long L11_0_set_2 - LONG $0xfffffe21 // .long L11_0_set_15 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffe34 // .long L11_0_set_16 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffe44 // .long L11_0_set_17 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffe57 // .long L11_0_set_18 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffe87 // .long L11_0_set_21 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffeb8 // .long L11_0_set_27 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xfffffef9 // .long L11_0_set_30 - LONG $0xfffffda7 // .long L11_0_set_11 - LONG $0xffffff09 // .long L11_0_set_31 - -LCPI12_0: +// .set L12_0_set_10, LBB12_10-LJTI12_0 +// .set L12_0_set_11, LBB12_11-LJTI12_0 +// .set L12_0_set_12, LBB12_12-LJTI12_0 +// .set L12_0_set_14, LBB12_14-LJTI12_0 +// .set L12_0_set_2, LBB12_2-LJTI12_0 +// .set L12_0_set_15, LBB12_15-LJTI12_0 +// .set L12_0_set_16, LBB12_16-LJTI12_0 +// .set L12_0_set_17, LBB12_17-LJTI12_0 +// .set L12_0_set_18, LBB12_18-LJTI12_0 +// .set L12_0_set_21, LBB12_21-LJTI12_0 +// .set L12_0_set_27, LBB12_27-LJTI12_0 +// .set L12_0_set_30, LBB12_30-LJTI12_0 +// .set L12_0_set_31, LBB12_31-LJTI12_0 +LJTI12_0: + LONG $0xfffffd97 // .long L12_0_set_10 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffdbb // .long L12_0_set_12 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffe0e // .long L12_0_set_14 + LONG $0xfffffd06 // .long L12_0_set_2 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffd06 // .long L12_0_set_2 + LONG $0xfffffd06 // .long L12_0_set_2 + LONG $0xfffffd06 // .long L12_0_set_2 + LONG $0xfffffd06 // .long L12_0_set_2 + LONG $0xfffffd06 // .long L12_0_set_2 + LONG $0xfffffd06 // .long L12_0_set_2 + LONG $0xfffffd06 // .long L12_0_set_2 + LONG $0xfffffd06 // .long L12_0_set_2 + LONG $0xfffffd06 // .long L12_0_set_2 + LONG $0xfffffd06 // .long L12_0_set_2 + LONG $0xfffffe21 // .long L12_0_set_15 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffe34 // .long L12_0_set_16 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffe44 // .long L12_0_set_17 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffe57 // .long L12_0_set_18 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffe87 // .long L12_0_set_21 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffeb8 // .long L12_0_set_27 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xfffffef9 // .long L12_0_set_30 + LONG $0xfffffda7 // .long L12_0_set_11 + LONG $0xffffff09 // .long L12_0_set_31 + +LCPI13_0: QUAD $0x2020202020202020; QUAD $0x2020202020202020 // .space 16, ' ' QUAD $0x2020202020202020; QUAD $0x2020202020202020 // .space 16, ' ' -LCPI12_1: +LCPI13_1: QUAD $0x0909090909090909; QUAD $0x0909090909090909 // .space 16, '\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t' QUAD $0x0909090909090909; QUAD $0x0909090909090909 // .space 16, '\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t' -LCPI12_2: +LCPI13_2: QUAD $0x0a0a0a0a0a0a0a0a; QUAD $0x0a0a0a0a0a0a0a0a // .space 16, '\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n' QUAD $0x0a0a0a0a0a0a0a0a; QUAD $0x0a0a0a0a0a0a0a0a // .space 16, '\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n' -LCPI12_3: +LCPI13_3: QUAD $0x0d0d0d0d0d0d0d0d; QUAD $0x0d0d0d0d0d0d0d0d // .space 16, '\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r' QUAD $0x0d0d0d0d0d0d0d0d; QUAD $0x0d0d0d0d0d0d0d0d // .space 16, '\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r' -LCPI12_4: +LCPI13_4: QUAD $0x2020202020202020; QUAD $0x2020202020202020 // .space 16, ' ' -LCPI12_5: +LCPI13_5: QUAD $0x0909090909090909; QUAD $0x0909090909090909 // .space 16, '\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t' -LCPI12_6: +LCPI13_6: QUAD $0x0a0a0a0a0a0a0a0a; QUAD $0x0a0a0a0a0a0a0a0a // .space 16, '\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n' -LCPI12_7: +LCPI13_7: QUAD $0x0d0d0d0d0d0d0d0d; QUAD $0x0d0d0d0d0d0d0d0d // .space 16, '\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r' _advance_ns: @@ -4906,108 +5225,108 @@ _advance_ns: MOVQ 0(DI), R9 MOVQ 8(DI), R8 CMPQ R11, R8 - JAE LBB12_4 + JAE LBB13_4 MOVB 0(R9)(R11*1), AX CMPB AX, $13 - JE LBB12_4 + JE LBB13_4 CMPB AX, $32 - JE LBB12_4 + JE LBB13_4 ADDB $-9, AX CMPB AX, $1 - JA LBB12_48 + JA LBB13_48 -LBB12_4: +LBB13_4: LEAQ 1(R11), AX CMPQ AX, R8 - JAE LBB12_9 + JAE LBB13_9 MOVB 0(R9)(AX*1), CX CMPB CX, $13 - JE LBB12_9 + JE LBB13_9 CMPB CX, $32 - JE LBB12_9 + JE LBB13_9 ADDB $-9, CX CMPB CX, $1 - JA LBB12_8 + JA LBB13_8 -LBB12_9: +LBB13_9: LEAQ 2(R11), AX CMPQ AX, R8 - JAE LBB12_14 + JAE LBB13_14 MOVB 0(R9)(AX*1), CX CMPB CX, $13 - JE LBB12_14 + JE LBB13_14 CMPB CX, $32 - JE LBB12_14 + JE LBB13_14 ADDB $-9, CX CMPB CX, $1 - JA LBB12_8 + JA LBB13_8 -LBB12_14: +LBB13_14: LEAQ 3(R11), AX CMPQ AX, R8 - JAE LBB12_19 + JAE LBB13_19 MOVB 0(R9)(AX*1), CX CMPB CX, $13 - JE LBB12_19 + JE LBB13_19 CMPB CX, $32 - JE LBB12_19 + JE LBB13_19 ADDB $-9, CX CMPB CX, $1 - JBE LBB12_19 + JBE LBB13_19 -LBB12_8: +LBB13_8: MOVQ AX, R11 - JMP LBB12_48 + JMP LBB13_48 -LBB12_19: +LBB13_19: ADDQ $4, R11 CMPQ R8, R11 - JBE LBB12_44 + JBE LBB13_44 LEAQ 0(R9)(R11*1), BX MOVQ R8, CX SUBQ R11, CX - JE LBB12_28 + JE LBB13_28 MOVL BX, AX ANDL $31, AX TESTQ AX, AX - JE LBB12_28 + JE LBB13_28 MOVL $5, DX SUBQ R8, DX MOVQ $4294977024, CX -LBB12_23: +LBB13_23: MOVBLSX 0(R9)(R11*1), AX CMPL AX, $32 - JA LBB12_46 + JA LBB13_46 BTQ AX, CX - JAE LBB12_46 + JAE LBB13_46 LEAQ 0(DX)(R11*1), BX LEAQ 1(R11), AX CMPQ BX, $4 - JE LBB12_27 + JE LBB13_27 LEAL 0(R9)(R11*1), BX ADDL $1, BX ANDL $31, BX MOVQ AX, R11 TESTQ BX, BX - JNE LBB12_23 + JNE LBB13_23 -LBB12_27: +LBB13_27: LEAQ 0(R9)(AX*1), BX MOVQ R8, CX SUBQ AX, CX -LBB12_28: +LBB13_28: CMPQ CX, $32 - JB LBB12_32 + JB LBB13_32 MOVQ R9, AX SUBQ BX, AX - QUAD $0xfffffe26056ffec5 // vmovdqu $-474(%rip), %ymm0 /* LCPI12_0(%rip) */ - QUAD $0xfffffe3e0d6ffec5 // vmovdqu $-450(%rip), %ymm1 /* LCPI12_1(%rip) */ - QUAD $0xfffffe56156ffec5 // vmovdqu $-426(%rip), %ymm2 /* LCPI12_2(%rip) */ - QUAD $0xfffffe6e1d6ffec5 // vmovdqu $-402(%rip), %ymm3 /* LCPI12_3(%rip) */ + QUAD $0xfffffe26056ffec5 // vmovdqu $-474(%rip), %ymm0 /* LCPI13_0(%rip) */ + QUAD $0xfffffe3e0d6ffec5 // vmovdqu $-450(%rip), %ymm1 /* LCPI13_1(%rip) */ + QUAD $0xfffffe56156ffec5 // vmovdqu $-426(%rip), %ymm2 /* LCPI13_2(%rip) */ + QUAD $0xfffffe6e1d6ffec5 // vmovdqu $-402(%rip), %ymm3 /* LCPI13_3(%rip) */ -LBB12_30: +LBB13_30: LONG $0x236ffdc5 // vmovdqa (%rbx), %ymm4 LONG $0xe874ddc5 // vpcmpeqb %ymm0, %ymm4, %ymm5 LONG $0xf174ddc5 // vpcmpeqb %ymm1, %ymm4, %ymm6 @@ -5018,25 +5337,25 @@ LBB12_30: LONG $0xe5ebddc5 // vpor %ymm5, %ymm4, %ymm4 LONG $0xd4d7fdc5 // vpmovmskb %ymm4, %edx CMPL DX, $-1 - JNE LBB12_45 + JNE LBB13_45 ADDQ $32, BX ADDQ $-32, CX ADDQ $-32, AX CMPQ CX, $31 - JA LBB12_30 + JA LBB13_30 -LBB12_32: +LBB13_32: WORD $0xf8c5; BYTE $0x77 // vzeroupper CMPQ CX, $16 - JB LBB12_36 + JB LBB13_36 MOVQ R9, R10 SUBQ BX, R10 - QUAD $0xfffffe38056ffac5 // vmovdqu $-456(%rip), %xmm0 /* LCPI12_4(%rip) */ - QUAD $0xfffffe400d6ffac5 // vmovdqu $-448(%rip), %xmm1 /* LCPI12_5(%rip) */ - QUAD $0xfffffe48156ffac5 // vmovdqu $-440(%rip), %xmm2 /* LCPI12_6(%rip) */ - QUAD $0xfffffe501d6ffac5 // vmovdqu $-432(%rip), %xmm3 /* LCPI12_7(%rip) */ + QUAD $0xfffffe38056ffac5 // vmovdqu $-456(%rip), %xmm0 /* LCPI13_4(%rip) */ + QUAD $0xfffffe400d6ffac5 // vmovdqu $-448(%rip), %xmm1 /* LCPI13_5(%rip) */ + QUAD $0xfffffe48156ffac5 // vmovdqu $-440(%rip), %xmm2 /* LCPI13_6(%rip) */ + QUAD $0xfffffe501d6ffac5 // vmovdqu $-432(%rip), %xmm3 /* LCPI13_7(%rip) */ -LBB12_34: +LBB13_34: LONG $0x236ff9c5 // vmovdqa (%rbx), %xmm4 LONG $0xe874d9c5 // vpcmpeqb %xmm0, %xmm4, %xmm5 LONG $0xf174d9c5 // vpcmpeqb %xmm1, %xmm4, %xmm6 @@ -5047,56 +5366,56 @@ LBB12_34: LONG $0xe5ebd9c5 // vpor %xmm5, %xmm4, %xmm4 LONG $0xc4d7f9c5 // vpmovmskb %xmm4, %eax CMPW AX, $-1 - JNE LBB12_49 + JNE LBB13_49 ADDQ $16, BX ADDQ $-16, CX ADDQ $-16, R10 CMPQ CX, $15 - JA LBB12_34 + JA LBB13_34 -LBB12_36: +LBB13_36: TESTQ CX, CX - JE LBB12_42 + JE LBB13_42 LEAQ 0(BX)(CX*1), R10 XORL AX, AX MOVQ $4294977024, R11 -LBB12_38: +LBB13_38: MOVBLSX 0(BX)(AX*1), DX CMPL DX, $32 - JA LBB12_51 + JA LBB13_51 BTQ DX, R11 - JAE LBB12_51 + JAE LBB13_51 ADDQ $1, AX CMPQ CX, AX - JNE LBB12_38 + JNE LBB13_38 MOVQ R10, BX -LBB12_42: +LBB13_42: SUBQ R9, BX MOVQ BX, R11 CMPQ R11, R8 - JB LBB12_47 - JMP LBB12_50 + JB LBB13_47 + JMP LBB13_50 -LBB12_44: +LBB13_44: MOVQ R11, 0(SI) - JMP LBB12_50 + JMP LBB13_50 -LBB12_45: +LBB13_45: WORD $0xf8c5; BYTE $0x77 // vzeroupper NOTL DX BSFL DX, R11 SUBQ AX, R11 -LBB12_46: +LBB13_46: CMPQ R11, R8 - JAE LBB12_50 + JAE LBB13_50 -LBB12_47: +LBB13_47: MOVQ 0(DI), R9 -LBB12_48: +LBB13_48: LEAQ 1(R11), AX MOVQ AX, 0(SI) MOVB 0(R9)(R11*1), AX @@ -5105,28 +5424,28 @@ LBB12_48: BYTE $0x5d // popq %rbp RET -LBB12_49: +LBB13_49: MOVWLZX AX, AX NOTL AX BSFL AX, R11 SUBQ R10, R11 CMPQ R11, R8 - JB LBB12_47 + JB LBB13_47 -LBB12_50: +LBB13_50: XORL AX, AX MOVBLSX AX, AX BYTE $0x5b // popq %rbx BYTE $0x5d // popq %rbp RET -LBB12_51: +LBB13_51: SUBQ R9, BX ADDQ AX, BX MOVQ BX, R11 CMPQ R11, R8 - JB LBB12_47 - JMP LBB12_50 + JB LBB13_47 + JMP LBB13_50 _vstring: BYTE $0x55 // pushq %rbp @@ -5145,7 +5464,7 @@ _vstring: MOVQ R12, SI LONG $0x000080e8; BYTE $0x00 // callq _advance_string TESTQ AX, AX - JS LBB13_1 + JS LBB14_1 MOVQ AX, 0(BX) MOVQ R12, 16(R14) MOVQ -40(BP), CX @@ -5154,13 +5473,13 @@ _vstring: LONG $0xc14c0f48 // cmovlq %rcx, %rax MOVQ AX, 24(R14) MOVL $7, AX - JMP LBB13_3 + JMP LBB14_3 -LBB13_1: +LBB14_1: MOVQ 8(R15), CX MOVQ CX, 0(BX) -LBB13_3: +LBB14_3: MOVQ AX, 0(R14) ADDQ $16, SP BYTE $0x5b // popq %rbx @@ -5170,11 +5489,11 @@ LBB13_3: BYTE $0x5d // popq %rbp RET -LCPI14_0: +LCPI15_0: QUAD $0x2222222222222222; QUAD $0x2222222222222222 // .space 16, '""""""""""""""""' QUAD $0x2222222222222222; QUAD $0x2222222222222222 // .space 16, '""""""""""""""""' -LCPI14_1: +LCPI15_1: QUAD $0x5c5c5c5c5c5c5c5c; QUAD $0x5c5c5c5c5c5c5c5c // .space 16, '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' QUAD $0x5c5c5c5c5c5c5c5c; QUAD $0x5c5c5c5c5c5c5c5c // .space 16, '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' @@ -5188,21 +5507,21 @@ _advance_string: BYTE $0x53 // pushq %rbx MOVQ 8(DI), R15 SUBQ SI, R15 - JE LBB14_17 + JE LBB15_17 MOVQ 0(DI), R9 MOVQ $-1, 0(DX) CMPQ R15, $64 - JB LBB14_18 + JB LBB15_18 MOVQ SI, DI NOTQ DI MOVQ $-1, R8 XORL R14, R14 - QUAD $0xffffff7a056ffec5 // vmovdqu $-134(%rip), %ymm0 /* LCPI14_0(%rip) */ - QUAD $0xffffff920d6ffec5 // vmovdqu $-110(%rip), %ymm1 /* LCPI14_1(%rip) */ + QUAD $0xffffff7a056ffec5 // vmovdqu $-134(%rip), %ymm0 /* LCPI15_0(%rip) */ + QUAD $0xffffff920d6ffec5 // vmovdqu $-110(%rip), %ymm1 /* LCPI15_1(%rip) */ MOVQ $-6148914691236517206, R10 MOVQ $6148914691236517205, R11 -LBB14_3: +LBB15_3: LONG $0x6f7ec1c4; WORD $0x3114 // vmovdqu (%r9,%rsi), %ymm2 LONG $0x6f7ec1c4; WORD $0x315c; BYTE $0x20 // vmovdqu $32(%r9,%rsi), %ymm3 LONG $0xe074edc5 // vpcmpeqb %ymm0, %ymm2, %ymm4 @@ -5217,29 +5536,29 @@ LBB14_3: ORQ CX, R12 SHLQ $32, BX ORQ BX, AX - JNE LBB14_7 + JNE LBB15_7 TESTQ R14, R14 - JNE LBB14_9 + JNE LBB15_9 XORL R14, R14 TESTQ R12, R12 - JNE LBB14_10 + JNE LBB15_10 -LBB14_6: +LBB15_6: ADDQ $-64, R15 ADDQ $-64, DI ADDQ $64, SI CMPQ R15, $63 - JA LBB14_3 - JMP LBB14_12 + JA LBB15_3 + JMP LBB15_12 -LBB14_7: +LBB15_7: CMPQ R8, $-1 - JNE LBB14_9 + JNE LBB15_9 BSFQ AX, R8 ADDQ SI, R8 MOVQ R8, 0(DX) -LBB14_9: +LBB15_9: MOVQ R14, CX NOTQ CX ANDQ AX, CX @@ -5258,13 +5577,13 @@ LBB14_9: NOTQ BX ANDQ BX, R12 TESTQ R12, R12 - JE LBB14_6 + JE LBB15_6 -LBB14_10: +LBB15_10: BSFQ R12, AX SUBQ DI, AX -LBB14_11: +LBB15_11: BYTE $0x5b // popq %rbx WORD $0x5c41 // popq %r12 WORD $0x5d41 // popq %r13 @@ -5274,50 +5593,50 @@ LBB14_11: WORD $0xf8c5; BYTE $0x77 // vzeroupper RET -LBB14_12: +LBB15_12: ADDQ R9, SI CMPQ R15, $32 - JB LBB14_23 + JB LBB15_23 -LBB14_13: +LBB15_13: LONG $0x066ffec5 // vmovdqu (%rsi), %ymm0 - QUAD $0xfffffe880d74fdc5 // vpcmpeqb $-376(%rip), %ymm0, %ymm1 /* LCPI14_0(%rip) */ + QUAD $0xfffffe880d74fdc5 // vpcmpeqb $-376(%rip), %ymm0, %ymm1 /* LCPI15_0(%rip) */ LONG $0xf9d7fdc5 // vpmovmskb %ymm1, %edi - QUAD $0xfffffe9c0574fdc5 // vpcmpeqb $-356(%rip), %ymm0, %ymm0 /* LCPI14_1(%rip) */ + QUAD $0xfffffe9c0574fdc5 // vpcmpeqb $-356(%rip), %ymm0, %ymm0 /* LCPI15_1(%rip) */ LONG $0xc0d7fdc5 // vpmovmskb %ymm0, %eax TESTL AX, AX - JNE LBB14_19 + JNE LBB15_19 TESTQ R14, R14 - JNE LBB14_21 + JNE LBB15_21 XORL R14, R14 TESTQ DI, DI - JE LBB14_22 + JE LBB15_22 -LBB14_16: +LBB15_16: BSFQ DI, AX SUBQ R9, SI ADDQ SI, AX ADDQ $1, AX - JMP LBB14_11 + JMP LBB15_11 -LBB14_18: +LBB15_18: ADDQ R9, SI MOVQ $-1, R8 XORL R14, R14 CMPQ R15, $32 - JAE LBB14_13 - JMP LBB14_23 + JAE LBB15_13 + JMP LBB15_23 -LBB14_19: +LBB15_19: CMPQ R8, $-1 - JNE LBB14_21 + JNE LBB15_21 MOVQ SI, CX SUBQ R9, CX BSFQ AX, R8 ADDQ CX, R8 MOVQ R8, 0(DX) -LBB14_21: +LBB15_21: MOVL R14, CX NOTL CX ANDL AX, CX @@ -5335,50 +5654,50 @@ LBB14_21: NOTL BX ANDL BX, DI TESTQ DI, DI - JNE LBB14_16 + JNE LBB15_16 -LBB14_22: +LBB15_22: ADDQ $32, SI ADDQ $-32, R15 -LBB14_23: +LBB15_23: TESTQ R14, R14 - JNE LBB14_37 + JNE LBB15_37 TESTQ R15, R15 - JE LBB14_36 + JE LBB15_36 -LBB14_25: +LBB15_25: MOVQ R9, R10 NOTQ R10 ADDQ $1, R10 -LBB14_26: +LBB15_26: XORL AX, AX -LBB14_27: +LBB15_27: MOVQ AX, DI MOVBLZX 0(SI)(AX*1), BX CMPB BX, $34 - JE LBB14_35 + JE LBB15_35 CMPB BX, $92 - JE LBB14_30 + JE LBB15_30 LEAQ 1(DI), AX CMPQ R15, AX - JNE LBB14_27 - JMP LBB14_34 + JNE LBB15_27 + JMP LBB15_34 -LBB14_30: +LBB15_30: LEAQ -1(R15), CX MOVQ $-1, AX CMPQ CX, DI - JE LBB14_11 + JE LBB15_11 CMPQ R8, $-1 - JNE LBB14_33 + JNE LBB15_33 LEAQ 0(R10)(SI*1), R8 ADDQ DI, R8 MOVQ R8, 0(DX) -LBB14_33: +LBB15_33: ADDQ DI, SI ADDQ $2, SI MOVQ R15, CX @@ -5387,58 +5706,58 @@ LBB14_33: ADDQ $-2, R15 CMPQ R15, DI MOVQ CX, R15 - JNE LBB14_26 - JMP LBB14_11 + JNE LBB15_26 + JMP LBB15_11 -LBB14_34: +LBB15_34: MOVQ $-1, AX CMPB BX, $34 - JNE LBB14_11 + JNE LBB15_11 -LBB14_35: +LBB15_35: ADDQ DI, SI ADDQ $1, SI -LBB14_36: +LBB15_36: SUBQ R9, SI MOVQ SI, AX - JMP LBB14_11 + JMP LBB15_11 -LBB14_37: +LBB15_37: TESTQ R15, R15 - JE LBB14_17 + JE LBB15_17 CMPQ R8, $-1 - JNE LBB14_40 + JNE LBB15_40 MOVQ R9, R8 NOTQ R8 ADDQ SI, R8 MOVQ R8, 0(DX) -LBB14_40: +LBB15_40: ADDQ $1, SI ADDQ $-1, R15 TESTQ R15, R15 - JNE LBB14_25 - JMP LBB14_36 + JNE LBB15_25 + JMP LBB15_36 -LBB14_17: +LBB15_17: MOVQ $-1, AX - JMP LBB14_11 + JMP LBB15_11 -LCPI15_0: +LCPI16_0: LONG $0x43300000 // .long 1127219200 LONG $0x45300000 // .long 1160773632 LONG $0x00000000 // .long 0 LONG $0x00000000 // .long 0 -LCPI15_1: +LCPI16_1: QUAD $0x4330000000000000 // .quad 0x4330000000000000 QUAD $0x4530000000000000 // .quad 0x4530000000000000 -LCPI15_2: +LCPI16_2: QUAD $0x430c6bf526340000 // .quad 0x430c6bf526340000 -LCPI15_3: +LCPI16_3: QUAD $0xc30c6bf526340000 // .quad 0xc30c6bf526340000 _vnumber: @@ -5463,215 +5782,215 @@ _vnumber: MOVQ 0(SI), CX MOVQ CX, 24(DX) CMPQ AX, R13 - JAE LBB15_52 + JAE LBB16_52 MOVB 0(R15)(AX*1), DI MOVL $1, DX CMPB DI, $45 - JNE LBB15_4 + JNE LBB16_4 ADDQ $1, AX CMPQ AX, R13 - JAE LBB15_52 + JAE LBB16_52 MOVB 0(R15)(AX*1), DI MOVL $-1, DX -LBB15_4: +LBB16_4: LEAL -48(DI), CX CMPB CX, $10 - JB LBB15_6 + JB LBB16_6 -LBB15_5: +LBB16_5: MOVQ AX, 0(R14) MOVQ $-2, 0(BX) - JMP LBB15_53 + JMP LBB16_53 -LBB15_6: +LBB16_6: CMPB DI, $48 - JNE LBB15_10 + JNE LBB16_10 LEAQ 1(AX), R8 CMPQ AX, R13 - JAE LBB15_22 + JAE LBB16_22 MOVB 0(R15)(R8*1), CX ADDB $-46, CX CMPB CX, $55 - JA LBB15_22 + JA LBB16_22 MOVBLZX CX, CX MOVQ $36028797027352577, SI BTQ CX, SI - JAE LBB15_22 + JAE LBB16_22 -LBB15_10: +LBB16_10: MOVL DX, -44(BP) MOVB $1, CX MOVL CX, -56(BP) CMPQ AX, R13 - JAE LBB15_21 + JAE LBB16_21 MOVL $4294967248, R9 ADDQ $1, AX XORL CX, CX XORL R8, R8 XORL R12, R12 -LBB15_12: +LBB16_12: CMPL R8, $18 - JG LBB15_14 + JG LBB16_14 LEAQ 0(R12)(R12*4), DX MOVBLZX DI, DI ADDL R9, DI LEAQ 0(DI)(DX*2), R12 ADDL $1, R8 - JMP LBB15_15 + JMP LBB16_15 -LBB15_14: +LBB16_14: ADDL $1, CX -LBB15_15: +LBB16_15: CMPQ R13, AX - JE LBB15_23 + JE LBB16_23 MOVBLZX 0(R15)(AX*1), DI LEAL -48(DI), DX ADDQ $1, AX CMPB DX, $10 - JB LBB15_12 + JB LBB16_12 CMPB DI, $46 - JNE LBB15_24 + JNE LBB16_24 MOVQ $8, 0(BX) CMPQ AX, R13 - JAE LBB15_52 + JAE LBB16_52 MOVB 0(R15)(AX*1), DX ADDB $-48, DX CMPB DX, $10 - JAE LBB15_5 + JAE LBB16_5 MOVL $0, -56(BP) - JMP LBB15_25 + JMP LBB16_25 -LBB15_21: +LBB16_21: XORL CX, CX XORL R8, R8 XORL R12, R12 - JMP LBB15_25 + JMP LBB16_25 -LBB15_22: +LBB16_22: MOVQ R8, 0(R14) - JMP LBB15_53 + JMP LBB16_53 -LBB15_23: +LBB16_23: MOVQ R13, AX - JMP LBB15_25 + JMP LBB16_25 -LBB15_24: +LBB16_24: ADDQ $-1, AX -LBB15_25: +LBB16_25: XORL DX, DX TESTL CX, CX SETGT DX MOVL DX, -68(BP) TESTQ R12, R12 - JNE LBB15_34 + JNE LBB16_34 TESTL CX, CX - JNE LBB15_34 + JNE LBB16_34 CMPQ AX, R13 - JAE LBB15_32 + JAE LBB16_32 MOVL AX, SI SUBL R13, SI XORL R8, R8 XORL CX, CX -LBB15_29: +LBB16_29: CMPB 0(R15)(AX*1), $48 - JNE LBB15_33 + JNE LBB16_33 ADDQ $1, AX ADDL $-1, CX CMPQ R13, AX - JNE LBB15_29 + JNE LBB16_29 XORL R12, R12 MOVL -56(BP), AX TESTB AX, AX - JNE LBB15_55 - JMP LBB15_60 + JNE LBB16_55 + JMP LBB16_60 -LBB15_32: +LBB16_32: XORL CX, CX XORL R8, R8 -LBB15_33: +LBB16_33: XORL R12, R12 -LBB15_34: +LBB16_34: CMPQ AX, R13 - JAE LBB15_40 + JAE LBB16_40 CMPL R8, $18 - JG LBB15_40 + JG LBB16_40 MOVL $4294967248, R9 -LBB15_37: +LBB16_37: MOVBLZX 0(R15)(AX*1), DI LEAL -48(DI), DX CMPB DX, $9 - JA LBB15_40 + JA LBB16_40 LEAQ 0(R12)(R12*4), DX ADDL R9, DI LEAQ 0(DI)(DX*2), R12 ADDL $-1, CX ADDQ $1, AX CMPQ AX, R13 - JAE LBB15_40 + JAE LBB16_40 LEAL 1(R8), DX CMPL R8, $18 MOVL DX, R8 - JL LBB15_37 + JL LBB16_37 -LBB15_40: +LBB16_40: CMPQ AX, R13 - JAE LBB15_54 + JAE LBB16_54 MOVB 0(R15)(AX*1), DX LEAL -48(DX), SI CMPB SI, $9 - JA LBB15_46 + JA LBB16_46 LEAQ -1(R13), SI -LBB15_43: +LBB16_43: CMPQ SI, AX - JE LBB15_59 + JE LBB16_59 MOVBLZX 1(R15)(AX*1), DX LEAL -48(DX), DI ADDQ $1, AX CMPB DI, $9 - JBE LBB15_43 + JBE LBB16_43 MOVL $1, -68(BP) -LBB15_46: +LBB16_46: ORB $32, DX CMPB DX, $101 - JNE LBB15_54 + JNE LBB16_54 LEAQ 1(AX), DI MOVQ $8, 0(BX) CMPQ DI, R13 - JAE LBB15_52 + JAE LBB16_52 MOVB 0(R15)(DI*1), SI CMPB SI, $45 - JE LBB15_50 + JE LBB16_50 MOVL $1, R8 CMPB SI, $43 - JNE LBB15_87 + JNE LBB16_87 -LBB15_50: +LBB16_50: ADDQ $2, AX CMPQ AX, R13 - JAE LBB15_52 + JAE LBB16_52 XORL DX, DX CMPB SI, $43 SETEQ DX LEAL 0(DX)(DX*1), R8 ADDL $-1, R8 MOVB 0(R15)(AX*1), SI - JMP LBB15_88 + JMP LBB16_88 -LBB15_52: +LBB16_52: MOVQ R13, 0(R14) MOVQ $-1, 0(BX) -LBB15_53: +LBB16_53: ADDQ $56, SP BYTE $0x5b // popq %rbx WORD $0x5c41 // popq %r12 @@ -5681,43 +6000,43 @@ LBB15_53: BYTE $0x5d // popq %rbp RET -LBB15_54: +LBB16_54: MOVL CX, SI MOVQ AX, R13 MOVL -56(BP), AX TESTB AX, AX - JE LBB15_60 + JE LBB16_60 -LBB15_55: +LBB16_55: TESTL SI, SI MOVL -44(BP), DX - JNE LBB15_58 + JNE LBB16_58 MOVQ $-9223372036854775808, AX MOVLQSX DX, CX TESTQ R12, R12 - JNS LBB15_69 + JNS LBB16_69 MOVQ R12, DI ANDQ CX, DI CMPQ DI, AX - JE LBB15_69 + JE LBB16_69 -LBB15_58: +LBB16_58: MOVQ $8, 0(BX) - JMP LBB15_61 + JMP LBB16_61 -LBB15_59: +LBB16_59: MOVL $1, -68(BP) MOVL CX, SI MOVL -56(BP), AX TESTB AX, AX - JNE LBB15_55 - JMP LBB15_60 + JNE LBB16_55 + JMP LBB16_60 -LBB15_69: +LBB16_69: LONG $0x6ef9c1c4; BYTE $0xc4 // vmovq %r12, %xmm0 IMULQ CX, R12 - QUAD $0xfffffcd10562f9c5 // vpunpckldq $-815(%rip), %xmm0, %xmm0 /* LCPI15_0(%rip) */ - QUAD $0xfffffcd9055cf9c5 // vsubpd $-807(%rip), %xmm0, %xmm0 /* LCPI15_1(%rip) */ + QUAD $0xfffffcd10562f9c5 // vpunpckldq $-815(%rip), %xmm0, %xmm0 /* LCPI16_0(%rip) */ + QUAD $0xfffffcd9055cf9c5 // vsubpd $-807(%rip), %xmm0, %xmm0 /* LCPI16_1(%rip) */ MOVQ R12, 16(BX) LONG $0x0579e3c4; WORD $0x01c8 // vpermilpd $1, %xmm0, %xmm1 LONG $0xc058f3c5 // vaddsd %xmm0, %xmm1, %xmm0 @@ -5725,21 +6044,21 @@ LBB15_69: LONG $0x7ef9e1c4; BYTE $0xc1 // vmovq %xmm0, %rcx ORQ AX, CX MOVQ CX, 8(BX) - JMP LBB15_86 + JMP LBB16_86 -LBB15_87: +LBB16_87: MOVQ DI, AX -LBB15_88: +LBB16_88: LEAL -48(SI), DI CMPB DI, $9 - JA LBB15_5 + JA LBB16_5 CMPQ AX, R13 - JAE LBB15_93 + JAE LBB16_93 LEAQ -1(R13), R9 XORL DI, DI -LBB15_91: +LBB16_91: MOVL DI, DX MOVBLZX SI, SI CMPL DI, $10000 @@ -5747,41 +6066,41 @@ LBB15_91: LEAL -48(SI)(DI*2), DI WORD $0x4d0f; BYTE $0xfa // cmovgel %edx, %edi CMPQ R9, AX - JE LBB15_94 + JE LBB16_94 MOVBLZX 1(R15)(AX*1), SI LEAL -48(SI), DX ADDQ $1, AX CMPB DX, $10 - JB LBB15_91 - JMP LBB15_95 + JB LBB16_91 + JMP LBB16_95 -LBB15_93: +LBB16_93: XORL DI, DI - JMP LBB15_95 + JMP LBB16_95 -LBB15_94: +LBB16_94: MOVQ R13, AX -LBB15_95: +LBB16_95: MOVQ DI, SI IMULL R8, SI ADDL CX, SI MOVQ AX, R13 -LBB15_60: +LBB16_60: MOVL -44(BP), DX -LBB15_61: +LBB16_61: MOVQ $0, -80(BP) LONG $0x6ef9c1c4; BYTE $0xc4 // vmovq %r12, %xmm0 - QUAD $0xfffffc320562f9c5 // vpunpckldq $-974(%rip), %xmm0, %xmm0 /* LCPI15_0(%rip) */ - QUAD $0xfffffc3a055cf9c5 // vsubpd $-966(%rip), %xmm0, %xmm0 /* LCPI15_1(%rip) */ + QUAD $0xfffffc320562f9c5 // vpunpckldq $-974(%rip), %xmm0, %xmm0 /* LCPI16_0(%rip) */ + QUAD $0xfffffc3a055cf9c5 // vsubpd $-966(%rip), %xmm0, %xmm0 /* LCPI16_1(%rip) */ LONG $0x0579e3c4; WORD $0x01c8 // vpermilpd $1, %xmm0, %xmm1 LONG $0xc058f3c5 // vaddsd %xmm0, %xmm1, %xmm0 LONG $0x4511fbc5; BYTE $0xc0 // vmovsd %xmm0, $-64(%rbp) MOVQ R12, AX SHRQ $52, AX - JNE LBB15_74 + JNE LBB16_74 LONG $0x7ef9e1c4; BYTE $0xc1 // vmovq %xmm0, %rcx MOVL DX, AX SHRL $31, AX @@ -5789,47 +6108,47 @@ LBB15_61: ORQ CX, AX MOVQ AX, -64(BP) TESTL SI, SI - JE LBB15_82 + JE LBB16_82 TESTQ R12, R12 - JE LBB15_82 + JE LBB16_82 LONG $0x6ef9e1c4; BYTE $0xc0 // vmovq %rax, %xmm0 LEAL -1(SI), AX CMPL AX, $36 - JA LBB15_67 + JA LBB16_67 CMPL SI, $23 - JL LBB15_70 + JL LBB16_70 LEAL -22(SI), AX - LONG $0x740d8d48; WORD $0x00be; BYTE $0x00 // leaq $48756(%rip), %rcx /* _P10_TAB(%rip) */ + LONG $0x9c0d8d48; WORD $0x00be; BYTE $0x00 // leaq $48796(%rip), %rcx /* _P10_TAB(%rip) */ LONG $0x0459fbc5; BYTE $0xc1 // vmulsd (%rcx,%rax,8), %xmm0, %xmm0 LONG $0x4511fbc5; BYTE $0xc0 // vmovsd %xmm0, $-64(%rbp) MOVL $22, AX - JMP LBB15_71 + JMP LBB16_71 -LBB15_67: +LBB16_67: CMPL SI, $-22 - JB LBB15_74 + JB LBB16_74 NEGL SI - LONG $0x55058d48; WORD $0x00be; BYTE $0x00 // leaq $48725(%rip), %rax /* _P10_TAB(%rip) */ + LONG $0x7d058d48; WORD $0x00be; BYTE $0x00 // leaq $48765(%rip), %rax /* _P10_TAB(%rip) */ LONG $0x045efbc5; BYTE $0xf0 // vdivsd (%rax,%rsi,8), %xmm0, %xmm0 LONG $0x4511fbc5; BYTE $0xc0 // vmovsd %xmm0, $-64(%rbp) - JMP LBB15_78 + JMP LBB16_78 -LBB15_70: +LBB16_70: MOVL SI, AX -LBB15_71: - QUAD $0xfffffbb7052ef9c5 // vucomisd $-1097(%rip), %xmm0 /* LCPI15_2(%rip) */ - JA LBB15_74 - QUAD $0xfffffbb50d10fbc5 // vmovsd $-1099(%rip), %xmm1 /* LCPI15_3(%rip) */ +LBB16_71: + QUAD $0xfffffbb7052ef9c5 // vucomisd $-1097(%rip), %xmm0 /* LCPI16_2(%rip) */ + JA LBB16_74 + QUAD $0xfffffbb50d10fbc5 // vmovsd $-1099(%rip), %xmm1 /* LCPI16_3(%rip) */ LONG $0xc82ef9c5 // vucomisd %xmm0, %xmm1 - JA LBB15_74 + JA LBB16_74 MOVL AX, AX - LONG $0x260d8d48; WORD $0x00be; BYTE $0x00 // leaq $48678(%rip), %rcx /* _P10_TAB(%rip) */ + LONG $0x4e0d8d48; WORD $0x00be; BYTE $0x00 // leaq $48718(%rip), %rcx /* _P10_TAB(%rip) */ LONG $0x0459fbc5; BYTE $0xc1 // vmulsd (%rcx,%rax,8), %xmm0, %xmm0 LONG $0x4511fbc5; BYTE $0xc0 // vmovsd %xmm0, $-64(%rbp) - JMP LBB15_78 + JMP LBB16_78 -LBB15_74: +LBB16_74: MOVQ R11, -96(BP) MOVQ R10, -56(BP) LEAQ -64(BP), CX @@ -5837,28 +6156,28 @@ LBB15_74: MOVQ SI, -88(BP) LONG $0xffe795e8; BYTE $0xff // callq _atof_eisel_lemire64 TESTB AX, AX - JE LBB15_80 + JE LBB16_80 MOVQ -88(BP), SI CMPL -68(BP), $0 - JE LBB15_81 + JE LBB16_81 ADDQ $1, R12 LEAQ -80(BP), CX MOVQ R12, DI MOVL -44(BP), DX LONG $0xffe774e8; BYTE $0xff // callq _atof_eisel_lemire64 TESTB AX, AX - JE LBB15_80 + JE LBB16_80 LONG $0x4d10fbc5; BYTE $0xb0 // vmovsd $-80(%rbp), %xmm1 LONG $0x4510fbc5; BYTE $0xc0 // vmovsd $-64(%rbp), %xmm0 LONG $0xc82ef9c5 // vucomisd %xmm0, %xmm1 - JNE LBB15_80 - JP LBB15_80 + JNE LBB16_80 + JP LBB16_80 -LBB15_78: +LBB16_78: LONG $0x7ef9e1c4; BYTE $0xc0 // vmovq %xmm0, %rax - JMP LBB15_82 + JMP LBB16_82 -LBB15_80: +LBB16_80: MOVQ 0(R14), AX ADDQ AX, R15 MOVQ R13, SI @@ -5869,29 +6188,29 @@ LBB15_80: LONG $0xffed89e8; BYTE $0xff // callq _atof_native LONG $0x4511fbc5; BYTE $0xc0 // vmovsd %xmm0, $-64(%rbp) LONG $0x7ef9e1c4; BYTE $0xc0 // vmovq %xmm0, %rax - JMP LBB15_83 + JMP LBB16_83 -LBB15_81: +LBB16_81: MOVQ -64(BP), AX -LBB15_82: +LBB16_82: LONG $0x6ef9e1c4; BYTE $0xc0 // vmovq %rax, %xmm0 -LBB15_83: +LBB16_83: MOVQ $-9223372036854775808, CX ADDQ $-1, CX ANDQ AX, CX MOVQ $9218868437227405312, AX CMPQ CX, AX - JNE LBB15_85 + JNE LBB16_85 MOVQ $-8, 0(BX) -LBB15_85: +LBB16_85: LONG $0x4311fbc5; BYTE $0x08 // vmovsd %xmm0, $8(%rbx) -LBB15_86: +LBB16_86: MOVQ R13, 0(R14) - JMP LBB15_53 + JMP LBB16_53 _vsigned: BYTE $0x55 // pushq %rbp @@ -5906,71 +6225,71 @@ _vsigned: MOVQ 0(SI), CX MOVQ CX, 24(DX) CMPQ AX, R11 - JAE LBB16_1 + JAE LBB17_1 MOVB 0(R8)(AX*1), CX MOVL $1, R9 CMPB CX, $45 - JNE LBB16_5 + JNE LBB17_5 ADDQ $1, AX CMPQ AX, R11 - JAE LBB16_1 + JAE LBB17_1 MOVB 0(R8)(AX*1), CX MOVQ $-1, R9 -LBB16_5: +LBB17_5: LEAL -48(CX), DI CMPB DI, $10 - JB LBB16_7 + JB LBB17_7 MOVQ AX, 0(SI) MOVQ $-2, 0(DX) BYTE $0x5b // popq %rbx BYTE $0x5d // popq %rbp RET -LBB16_1: +LBB17_1: MOVQ R11, 0(SI) MOVQ $-1, 0(DX) BYTE $0x5b // popq %rbx BYTE $0x5d // popq %rbp RET -LBB16_7: +LBB17_7: CMPB CX, $48 - JNE LBB16_12 + JNE LBB17_12 LEAQ 1(AX), DI CMPQ AX, R11 - JAE LBB16_11 + JAE LBB17_11 MOVB 0(R8)(DI*1), CX ADDB $-46, CX CMPB CX, $55 - JA LBB16_11 + JA LBB17_11 MOVBLZX CX, R10 MOVQ $36028797027352577, CX BTQ R10, CX - JAE LBB16_11 + JAE LBB17_11 -LBB16_12: +LBB17_12: CMPQ AX, R11 MOVQ R11, R10 LONG $0xd0470f4c // cmovaq %rax, %r10 XORL DI, DI -LBB16_13: +LBB17_13: CMPQ R10, AX - JE LBB16_23 + JE LBB17_23 MOVBQSX 0(R8)(AX*1), CX LEAL -48(CX), BX CMPB BX, $9 - JA LBB16_18 + JA LBB17_18 IMUL3Q $10, DI, DI - JO LBB16_17 + JO LBB17_17 ADDQ $1, AX ADDL $-48, CX IMULQ R9, CX ADDQ CX, DI - JNO LBB16_13 + JNO LBB17_13 -LBB16_17: +LBB17_17: ADDQ $-1, AX MOVQ AX, 0(SI) MOVQ $-5, 0(DX) @@ -5978,33 +6297,33 @@ LBB16_17: BYTE $0x5d // popq %rbp RET -LBB16_11: +LBB17_11: MOVQ DI, 0(SI) BYTE $0x5b // popq %rbx BYTE $0x5d // popq %rbp RET -LBB16_18: +LBB17_18: CMPQ AX, R11 - JAE LBB16_22 + JAE LBB17_22 CMPB CX, $46 - JE LBB16_25 + JE LBB17_25 CMPB CX, $69 - JE LBB16_25 + JE LBB17_25 CMPB CX, $101 - JNE LBB16_22 + JNE LBB17_22 -LBB16_25: +LBB17_25: MOVQ AX, 0(SI) MOVQ $-6, 0(DX) BYTE $0x5b // popq %rbx BYTE $0x5d // popq %rbp RET -LBB16_22: +LBB17_22: MOVQ AX, R10 -LBB16_23: +LBB17_23: MOVQ R10, 0(SI) MOVQ DI, 16(DX) BYTE $0x5b // popq %rbx @@ -6026,12 +6345,12 @@ _vunsigned: MOVQ 0(SI), AX MOVQ AX, 24(DX) CMPQ CX, R14 - JAE LBB17_1 + JAE LBB18_1 MOVB 0(R9)(CX*1), AX CMPB AX, $45 - JNE LBB17_4 + JNE LBB18_4 -LBB17_3: +LBB18_3: MOVQ CX, 0(SI) MOVQ $-6, 0(R8) BYTE $0x5b // popq %rbx @@ -6039,7 +6358,7 @@ LBB17_3: BYTE $0x5d // popq %rbp RET -LBB17_1: +LBB18_1: MOVQ R14, 0(SI) MOVQ $-1, 0(R8) BYTE $0x5b // popq %rbx @@ -6047,10 +6366,10 @@ LBB17_1: BYTE $0x5d // popq %rbp RET -LBB17_4: +LBB18_4: LEAL -48(AX), DX CMPB DX, $10 - JB LBB17_6 + JB LBB18_6 MOVQ CX, 0(SI) MOVQ $-2, 0(R8) BYTE $0x5b // popq %rbx @@ -6058,34 +6377,34 @@ LBB17_4: BYTE $0x5d // popq %rbp RET -LBB17_6: +LBB18_6: CMPB AX, $48 - JNE LBB17_10 + JNE LBB18_10 MOVB 1(R9)(CX*1), AX ADDB $-46, AX CMPB AX, $55 - JA LBB17_9 + JA LBB18_9 MOVBLZX AX, AX MOVQ $36028797027352577, DX BTQ AX, DX - JAE LBB17_9 + JAE LBB18_9 -LBB17_10: +LBB18_10: CMPQ R14, CX MOVQ CX, R10 LONG $0xd6470f4d // cmovaq %r14, %r10 XORL AX, AX MOVL $10, R11 -LBB17_11: +LBB18_11: CMPQ R10, CX - JE LBB17_22 + JE LBB18_22 MOVBLSX 0(R9)(CX*1), BX LEAL -48(BX), DX CMPB DX, $9 - JA LBB17_17 + JA LBB18_17 MULQ R11 - JO LBB17_16 + JO LBB18_16 ADDQ $1, CX ADDL $-48, BX XORL DI, DI @@ -6094,11 +6413,11 @@ LBB17_11: MOVQ DI, DX NEGQ DX XORQ DX, DI - JNE LBB17_16 + JNE LBB18_16 TESTQ DX, DX - JNS LBB17_11 + JNS LBB18_11 -LBB17_16: +LBB18_16: ADDQ $-1, CX MOVQ CX, 0(SI) MOVQ $-5, 0(R8) @@ -6107,20 +6426,20 @@ LBB17_16: BYTE $0x5d // popq %rbp RET -LBB17_17: +LBB18_17: CMPQ CX, R14 - JAE LBB17_21 + JAE LBB18_21 CMPB BX, $46 - JE LBB17_3 + JE LBB18_3 CMPB BX, $69 - JE LBB17_3 + JE LBB18_3 CMPB BX, $101 - JE LBB17_3 + JE LBB18_3 -LBB17_21: +LBB18_21: MOVQ CX, R10 -LBB17_22: +LBB18_22: MOVQ R10, 0(SI) MOVQ AX, 16(R8) BYTE $0x5b // popq %rbx @@ -6128,7 +6447,7 @@ LBB17_22: BYTE $0x5d // popq %rbp RET -LBB17_9: +LBB18_9: ADDQ $1, CX MOVQ CX, 0(SI) BYTE $0x5b // popq %rbx @@ -6159,30 +6478,30 @@ _fsm_exec: SUBQ $40, SP MOVL CX, -60(BP) CMPL 0(DI), $0 - JE LBB19_2 + JE LBB20_2 MOVQ DI, R12 MOVQ DX, -56(BP) MOVQ SI, -48(BP) MOVQ $-1, R14 - JMP LBB19_6 + JMP LBB20_6 -LBB19_2: +LBB20_2: MOVQ $-1, R13 - JMP LBB19_74 + JMP LBB20_74 -LBB19_3: +LBB20_3: LEAQ 3(AX), CX MOVQ CX, 0(BX) TESTQ AX, AX - JLE LBB19_74 + JLE LBB20_74 -LBB19_4: +LBB20_4: MOVL 0(R12), CX MOVQ R14, R13 TESTL CX, CX - JE LBB19_74 + JE LBB20_74 -LBB19_6: +LBB20_6: MOVQ -48(BP), R13 MOVQ R13, DI MOVQ -56(BP), BX @@ -6191,87 +6510,87 @@ LBB19_6: MOVLQSX 0(R12), DX LEAQ -1(DX), CX CMPQ R14, $-1 - JNE LBB19_8 + JNE LBB20_8 MOVQ 0(BX), R14 ADDQ $-1, R14 -LBB19_8: +LBB20_8: MOVL 0(R12)(DX*4), SI ADDL $-1, SI CMPL SI, $5 - JA LBB19_13 - LONG $0x353d8d48; WORD $0x0004; BYTE $0x00 // leaq $1077(%rip), %rdi /* LJTI19_0(%rip) */ + JA LBB20_13 + LONG $0x353d8d48; WORD $0x0004; BYTE $0x00 // leaq $1077(%rip), %rdi /* LJTI20_0(%rip) */ MOVLQSX 0(DI)(SI*4), SI ADDQ DI, SI JMP SI -LBB19_10: +LBB20_10: MOVBLSX AX, AX CMPL AX, $44 - JE LBB19_30 + JE LBB20_30 CMPL AX, $93 - JE LBB19_12 - JMP LBB19_68 + JE LBB20_12 + JMP LBB20_68 -LBB19_13: +LBB20_13: MOVL CX, 0(R12) MOVBLSX AX, AX CMPL AX, $123 - JBE LBB19_25 - JMP LBB19_68 + JBE LBB20_25 + JMP LBB20_68 -LBB19_14: +LBB20_14: MOVBLSX AX, AX CMPL AX, $44 - JNE LBB19_15 + JNE LBB20_15 CMPL DX, $65535 - JG LBB19_73 + JG LBB20_73 LEAL 1(DX), AX MOVL AX, 0(R12) MOVL $3, 4(R12)(DX*4) - JMP LBB19_4 + JMP LBB20_4 -LBB19_16: +LBB20_16: CMPB AX, $34 - JNE LBB19_68 + JNE LBB20_68 MOVL $4, 0(R12)(DX*4) MOVQ 0(BX), R15 MOVQ R13, DI -LBB19_18: +LBB20_18: MOVQ R15, SI LEAQ -72(BP), DX LONG $0xfff492e8; BYTE $0xff // callq _advance_string MOVQ AX, R13 TESTQ AX, AX - JS LBB19_65 + JS LBB20_65 MOVQ R13, 0(BX) TESTQ R15, R15 - JG LBB19_4 - JMP LBB19_20 + JG LBB20_4 + JMP LBB20_20 -LBB19_21: +LBB20_21: CMPB AX, $58 - JNE LBB19_68 + JNE LBB20_68 MOVL $0, 0(R12)(DX*4) - JMP LBB19_4 + JMP LBB20_4 -LBB19_23: +LBB20_23: CMPB AX, $93 - JE LBB19_12 + JE LBB20_12 MOVL $1, 0(R12)(DX*4) MOVBLSX AX, AX CMPL AX, $123 - JA LBB19_68 + JA LBB20_68 -LBB19_25: +LBB20_25: MOVQ $-1, R13 - LONG $0x6a0d8d48; WORD $0x0003; BYTE $0x00 // leaq $874(%rip), %rcx /* LJTI19_1(%rip) */ + LONG $0x6a0d8d48; WORD $0x0003; BYTE $0x00 // leaq $874(%rip), %rcx /* LJTI20_1(%rip) */ MOVLQSX 0(CX)(AX*4), AX ADDQ CX, AX JMP AX -LBB19_28: +LBB20_28: MOVQ -56(BP), BX MOVQ 0(BX), R15 LEAQ -1(R15), R13 @@ -6282,51 +6601,51 @@ LBB19_28: SUBQ R13, SI LONG $0x000d76e8; BYTE $0x00 // callq _do_skip_number TESTQ AX, AX - JS LBB19_66 + JS LBB20_66 MOVQ 0(BX), CX ADDQ CX, AX ADDQ $-1, AX MOVQ AX, 0(BX) TESTQ R15, R15 - JG LBB19_4 - JMP LBB19_74 + JG LBB20_4 + JMP LBB20_74 -LBB19_26: +LBB20_26: MOVBLSX AX, AX CMPL AX, $34 - JE LBB19_34 + JE LBB20_34 -LBB19_15: +LBB20_15: CMPL AX, $125 - JNE LBB19_68 + JNE LBB20_68 -LBB19_12: +LBB20_12: MOVL CX, 0(R12) MOVQ R14, R13 TESTL CX, CX - JNE LBB19_6 - JMP LBB19_74 + JNE LBB20_6 + JMP LBB20_74 -LBB19_30: +LBB20_30: CMPL DX, $65535 - JG LBB19_73 + JG LBB20_73 LEAL 1(DX), AX MOVL AX, 0(R12) MOVL $0, 4(R12)(DX*4) - JMP LBB19_4 + JMP LBB20_4 -LBB19_34: +LBB20_34: MOVL $2, 0(R12)(DX*4) CMPL -60(BP), $0 - JE LBB19_37 + JE LBB20_37 MOVQ R13, DI MOVQ BX, SI LONG $0x000599e8; BYTE $0x00 // callq _validate_string TESTQ AX, AX - JNS LBB19_39 - JMP LBB19_36 + JNS LBB20_39 + JMP LBB20_36 -LBB19_37: +LBB20_37: MOVQ 0(BX), R15 MOVQ R13, DI MOVQ R15, SI @@ -6334,31 +6653,31 @@ LBB19_37: LONG $0xfff36de8; BYTE $0xff // callq _advance_string MOVQ AX, R13 TESTQ AX, AX - JS LBB19_65 + JS LBB20_65 MOVQ R13, 0(BX) TESTQ R15, R15 - JLE LBB19_20 + JLE LBB20_20 -LBB19_39: +LBB20_39: MOVLQSX 0(R12), AX CMPQ AX, $65535 - JG LBB19_73 + JG LBB20_73 LEAL 1(AX), CX MOVL CX, 0(R12) MOVL $4, 4(R12)(AX*4) - JMP LBB19_4 + JMP LBB20_4 -LBB19_41: +LBB20_41: CMPL -60(BP), $0 - JE LBB19_64 + JE LBB20_64 MOVQ -48(BP), DI MOVQ -56(BP), SI LONG $0x000529e8; BYTE $0x00 // callq _validate_string TESTQ AX, AX - JNS LBB19_4 - JMP LBB19_36 + JNS LBB20_4 + JMP LBB20_36 -LBB19_43: +LBB20_43: MOVQ -56(BP), BX MOVQ 0(BX), R13 MOVQ -48(BP), AX @@ -6368,107 +6687,107 @@ LBB19_43: SUBQ R13, SI LONG $0x000c58e8; BYTE $0x00 // callq _do_skip_number TESTQ AX, AX - JS LBB19_67 + JS LBB20_67 ADDQ AX, 0(BX) TESTQ R13, R13 - JG LBB19_4 - JMP LBB19_45 + JG LBB20_4 + JMP LBB20_45 -LBB19_46: +LBB20_46: MOVLQSX 0(R12), AX CMPQ AX, $65535 - JG LBB19_73 + JG LBB20_73 LEAL 1(AX), CX MOVL CX, 0(R12) MOVL $5, 4(R12)(AX*4) - JMP LBB19_4 + JMP LBB20_4 -LBB19_48: +LBB20_48: MOVQ -56(BP), BX MOVQ 0(BX), AX MOVQ -48(BP), SI MOVQ 8(SI), CX LEAQ -4(CX), DX CMPQ AX, DX - JA LBB19_72 + JA LBB20_72 MOVQ 0(SI), CX MOVL 0(CX)(AX*1), DX CMPL DX, $1702063201 - JNE LBB19_69 + JNE LBB20_69 LEAQ 4(AX), CX MOVQ CX, 0(BX) TESTQ AX, AX - JG LBB19_4 - JMP LBB19_51 + JG LBB20_4 + JMP LBB20_51 -LBB19_52: +LBB20_52: MOVQ -56(BP), BX MOVQ 0(BX), AX MOVQ -48(BP), SI MOVQ 8(SI), CX LEAQ -3(CX), DX CMPQ AX, DX - JA LBB19_72 + JA LBB20_72 MOVQ 0(SI), CX LEAQ -1(AX), R13 CMPL -1(CX)(AX*1), $1819047278 - JE LBB19_3 - JMP LBB19_54 + JE LBB20_3 + JMP LBB20_54 -LBB19_57: +LBB20_57: MOVQ -56(BP), BX MOVQ 0(BX), AX MOVQ -48(BP), SI MOVQ 8(SI), CX LEAQ -3(CX), DX CMPQ AX, DX - JA LBB19_72 + JA LBB20_72 MOVQ 0(SI), CX LEAQ -1(AX), R13 CMPL -1(CX)(AX*1), $1702195828 - JE LBB19_3 - JMP LBB19_59 + JE LBB20_3 + JMP LBB20_59 -LBB19_62: +LBB20_62: MOVLQSX 0(R12), AX CMPQ AX, $65535 - JG LBB19_73 + JG LBB20_73 LEAL 1(AX), CX MOVL CX, 0(R12) MOVL $6, 4(R12)(AX*4) - JMP LBB19_4 + JMP LBB20_4 -LBB19_64: +LBB20_64: MOVQ -56(BP), BX MOVQ 0(BX), R15 MOVQ -48(BP), DI - JMP LBB19_18 + JMP LBB20_18 -LBB19_73: +LBB20_73: MOVQ $-7, R13 - JMP LBB19_74 + JMP LBB20_74 -LBB19_65: +LBB20_65: MOVQ -48(BP), AX MOVQ 8(AX), AX MOVQ AX, 0(BX) - JMP LBB19_74 + JMP LBB20_74 -LBB19_20: +LBB20_20: ADDQ $-1, R15 MOVQ R15, R13 - JMP LBB19_74 + JMP LBB20_74 -LBB19_66: +LBB20_66: MOVQ 0(BX), CX SUBQ AX, CX ADDQ $-2, CX MOVQ CX, 0(BX) -LBB19_68: +LBB20_68: MOVQ $-2, R13 -LBB19_74: +LBB20_74: MOVQ R13, AX ADDQ $40, SP BYTE $0x5b // popq %rbx @@ -6479,226 +6798,226 @@ LBB19_74: BYTE $0x5d // popq %rbp RET -LBB19_36: +LBB20_36: MOVQ AX, R13 - JMP LBB19_74 + JMP LBB20_74 -LBB19_72: +LBB20_72: MOVQ CX, 0(BX) - JMP LBB19_74 + JMP LBB20_74 -LBB19_67: +LBB20_67: NOTQ AX ADDQ AX, 0(BX) - JMP LBB19_68 + JMP LBB20_68 -LBB19_45: +LBB20_45: ADDQ $-1, R13 - JMP LBB19_74 + JMP LBB20_74 -LBB19_69: +LBB20_69: MOVQ $-2, R13 CMPB DX, $97 - JNE LBB19_74 + JNE LBB20_74 ADDQ $1, AX MOVL $1702063201, DX -LBB19_71: +LBB20_71: SHRL $8, DX MOVQ AX, 0(BX) MOVBLSX 0(CX)(AX*1), SI MOVBLZX DX, DI ADDQ $1, AX CMPL DI, SI - JE LBB19_71 - JMP LBB19_74 + JE LBB20_71 + JMP LBB20_74 -LBB19_59: +LBB20_59: MOVQ R13, 0(BX) CMPB 0(CX)(R13*1), $116 - JNE LBB19_68 + JNE LBB20_68 MOVL $1702195828, DX -LBB19_61: +LBB20_61: SHRL $8, DX MOVQ AX, 0(BX) MOVBLSX 0(CX)(AX*1), SI MOVBLZX DX, DI ADDQ $1, AX CMPL DI, SI - JE LBB19_61 - JMP LBB19_68 + JE LBB20_61 + JMP LBB20_68 -LBB19_51: +LBB20_51: ADDQ $-1, AX MOVQ AX, R13 - JMP LBB19_74 + JMP LBB20_74 -LBB19_54: +LBB20_54: MOVQ R13, 0(BX) CMPB 0(CX)(R13*1), $110 - JNE LBB19_68 + JNE LBB20_68 MOVL $1819047278, DX -LBB19_56: +LBB20_56: SHRL $8, DX MOVQ AX, 0(BX) MOVBLSX 0(CX)(AX*1), SI MOVBLZX DX, DI ADDQ $1, AX CMPL DI, SI - JE LBB19_56 - JMP LBB19_68 - -// .set L19_0_set_10, LBB19_10-LJTI19_0 -// .set L19_0_set_14, LBB19_14-LJTI19_0 -// .set L19_0_set_16, LBB19_16-LJTI19_0 -// .set L19_0_set_21, LBB19_21-LJTI19_0 -// .set L19_0_set_23, LBB19_23-LJTI19_0 -// .set L19_0_set_26, LBB19_26-LJTI19_0 -LJTI19_0: - LONG $0xfffffbd4 // .long L19_0_set_10 - LONG $0xfffffc03 // .long L19_0_set_14 - LONG $0xfffffc30 // .long L19_0_set_16 - LONG $0xfffffc6f // .long L19_0_set_21 - LONG $0xfffffc84 // .long L19_0_set_23 - LONG $0xfffffcfc // .long L19_0_set_26 - - // .set L19_1_set_74, LBB19_74-LJTI19_1 - // .set L19_1_set_68, LBB19_68-LJTI19_1 - // .set L19_1_set_41, LBB19_41-LJTI19_1 - // .set L19_1_set_43, LBB19_43-LJTI19_1 - // .set L19_1_set_28, LBB19_28-LJTI19_1 - // .set L19_1_set_46, LBB19_46-LJTI19_1 - // .set L19_1_set_48, LBB19_48-LJTI19_1 - // .set L19_1_set_52, LBB19_52-LJTI19_1 - // .set L19_1_set_57, LBB19_57-LJTI19_1 - // .set L19_1_set_62, LBB19_62-LJTI19_1 -LJTI19_1: - LONG $0xffffff30 // .long L19_1_set_74 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xfffffd9c // .long L19_1_set_41 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xfffffdc1 // .long L19_1_set_43 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xfffffc9f // .long L19_1_set_28 - LONG $0xfffffc9f // .long L19_1_set_28 - LONG $0xfffffc9f // .long L19_1_set_28 - LONG $0xfffffc9f // .long L19_1_set_28 - LONG $0xfffffc9f // .long L19_1_set_28 - LONG $0xfffffc9f // .long L19_1_set_28 - LONG $0xfffffc9f // .long L19_1_set_28 - LONG $0xfffffc9f // .long L19_1_set_28 - LONG $0xfffffc9f // .long L19_1_set_28 - LONG $0xfffffc9f // .long L19_1_set_28 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xfffffdf8 // .long L19_1_set_46 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xfffffe1d // .long L19_1_set_48 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xfffffe60 // .long L19_1_set_52 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xfffffe96 // .long L19_1_set_57 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xffffff29 // .long L19_1_set_68 - LONG $0xfffffecc // .long L19_1_set_62 + JE LBB20_56 + JMP LBB20_68 + +// .set L20_0_set_10, LBB20_10-LJTI20_0 +// .set L20_0_set_14, LBB20_14-LJTI20_0 +// .set L20_0_set_16, LBB20_16-LJTI20_0 +// .set L20_0_set_21, LBB20_21-LJTI20_0 +// .set L20_0_set_23, LBB20_23-LJTI20_0 +// .set L20_0_set_26, LBB20_26-LJTI20_0 +LJTI20_0: + LONG $0xfffffbd4 // .long L20_0_set_10 + LONG $0xfffffc03 // .long L20_0_set_14 + LONG $0xfffffc30 // .long L20_0_set_16 + LONG $0xfffffc6f // .long L20_0_set_21 + LONG $0xfffffc84 // .long L20_0_set_23 + LONG $0xfffffcfc // .long L20_0_set_26 + + // .set L20_1_set_74, LBB20_74-LJTI20_1 + // .set L20_1_set_68, LBB20_68-LJTI20_1 + // .set L20_1_set_41, LBB20_41-LJTI20_1 + // .set L20_1_set_43, LBB20_43-LJTI20_1 + // .set L20_1_set_28, LBB20_28-LJTI20_1 + // .set L20_1_set_46, LBB20_46-LJTI20_1 + // .set L20_1_set_48, LBB20_48-LJTI20_1 + // .set L20_1_set_52, LBB20_52-LJTI20_1 + // .set L20_1_set_57, LBB20_57-LJTI20_1 + // .set L20_1_set_62, LBB20_62-LJTI20_1 +LJTI20_1: + LONG $0xffffff30 // .long L20_1_set_74 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xfffffd9c // .long L20_1_set_41 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xfffffdc1 // .long L20_1_set_43 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xfffffc9f // .long L20_1_set_28 + LONG $0xfffffc9f // .long L20_1_set_28 + LONG $0xfffffc9f // .long L20_1_set_28 + LONG $0xfffffc9f // .long L20_1_set_28 + LONG $0xfffffc9f // .long L20_1_set_28 + LONG $0xfffffc9f // .long L20_1_set_28 + LONG $0xfffffc9f // .long L20_1_set_28 + LONG $0xfffffc9f // .long L20_1_set_28 + LONG $0xfffffc9f // .long L20_1_set_28 + LONG $0xfffffc9f // .long L20_1_set_28 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xfffffdf8 // .long L20_1_set_46 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xfffffe1d // .long L20_1_set_48 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xfffffe60 // .long L20_1_set_52 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xfffffe96 // .long L20_1_set_57 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xffffff29 // .long L20_1_set_68 + LONG $0xfffffecc // .long L20_1_set_62 _skip_array: BYTE $0x55 // pushq %rbp @@ -6740,16 +7059,16 @@ _skip_string: MOVQ BX, SI LONG $0xffee73e8; BYTE $0xff // callq _advance_string TESTQ AX, AX - JS LBB22_2 + JS LBB23_2 ADDQ $-1, BX MOVQ AX, CX MOVQ BX, AX - JMP LBB22_3 + JMP LBB23_3 -LBB22_2: +LBB23_2: MOVQ 8(R15), CX -LBB22_3: +LBB23_3: MOVQ CX, 0(R14) ADDQ $8, SP BYTE $0x5b // popq %rbx @@ -6758,15 +7077,15 @@ LBB22_3: BYTE $0x5d // popq %rbp RET -LCPI23_0: +LCPI24_0: QUAD $0x2222222222222222; QUAD $0x2222222222222222 // .space 16, '""""""""""""""""' QUAD $0x2222222222222222; QUAD $0x2222222222222222 // .space 16, '""""""""""""""""' -LCPI23_1: +LCPI24_1: QUAD $0x5c5c5c5c5c5c5c5c; QUAD $0x5c5c5c5c5c5c5c5c // .space 16, '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' QUAD $0x5c5c5c5c5c5c5c5c; QUAD $0x5c5c5c5c5c5c5c5c // .space 16, '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -LCPI23_2: +LCPI24_2: QUAD $0x2020202020202020; QUAD $0x2020202020202020 // .space 16, ' ' QUAD $0x2020202020202020; QUAD $0x2020202020202020 // .space 16, ' ' @@ -6783,20 +7102,20 @@ _validate_string: MOVQ 8(DI), R13 MOVQ R13, -48(BP) SUBQ R14, R13 - JE LBB23_54 + JE LBB24_54 MOVQ 0(DI), R11 LEAQ 0(R11)(R14*1), DI MOVQ $-1, AX XORL R12, R12 CMPQ R13, $64 - JB LBB23_2 - QUAD $0xffffff5b056ffec5 // vmovdqu $-165(%rip), %ymm0 /* LCPI23_0(%rip) */ - QUAD $0xffffff730d6ffec5 // vmovdqu $-141(%rip), %ymm1 /* LCPI23_1(%rip) */ - QUAD $0xffffff8b156ffec5 // vmovdqu $-117(%rip), %ymm2 /* LCPI23_2(%rip) */ + JB LBB24_2 + QUAD $0xffffff5b056ffec5 // vmovdqu $-165(%rip), %ymm0 /* LCPI24_0(%rip) */ + QUAD $0xffffff730d6ffec5 // vmovdqu $-141(%rip), %ymm1 /* LCPI24_1(%rip) */ + QUAD $0xffffff8b156ffec5 // vmovdqu $-117(%rip), %ymm2 /* LCPI24_2(%rip) */ LONG $0xdb76e5c5 // vpcmpeqd %ymm3, %ymm3, %ymm3 MOVQ R14, R15 -LBB23_4: +LBB24_4: LONG $0x6f7e81c4; WORD $0x3b24 // vmovdqu (%r11,%r15), %ymm4 LONG $0x6f7e81c4; WORD $0x3b6c; BYTE $0x20 // vmovdqu $32(%r11,%r15), %ymm5 LONG $0xf074ddc5 // vpcmpeqb %ymm0, %ymm4, %ymm6 @@ -6816,34 +7135,34 @@ LBB23_4: SHLQ $32, R10 SHLQ $32, R8 ORQ R10, CX - JNE LBB23_5 + JNE LBB24_5 TESTQ R12, R12 - JNE LBB23_7 + JNE LBB24_7 XORL R12, R12 -LBB23_10: +LBB24_10: LONG $0xec64edc5 // vpcmpgtb %ymm4, %ymm2, %ymm5 LONG $0xe364ddc5 // vpcmpgtb %ymm3, %ymm4, %ymm4 LONG $0xe4dbd5c5 // vpand %ymm4, %ymm5, %ymm4 LONG $0xccd7fdc5 // vpmovmskb %ymm4, %ecx ORQ CX, R8 TESTQ BX, BX - JNE LBB23_11 + JNE LBB24_11 TESTQ R8, R8 - JNE LBB23_15 + JNE LBB24_15 ADDQ $-64, R13 ADDQ $64, R15 CMPQ R13, $63 - JA LBB23_4 - JMP LBB23_18 + JA LBB24_4 + JMP LBB24_18 -LBB23_5: +LBB24_5: CMPQ AX, $-1 - JNE LBB23_7 + JNE LBB24_7 BSFQ CX, AX ADDQ R15, AX -LBB23_7: +LBB24_7: MOVQ R12, R9 NOTQ R9 ANDQ CX, R9 @@ -6863,21 +7182,21 @@ LBB23_7: ANDQ DX, R10 NOTQ R10 ANDQ R10, BX - JMP LBB23_10 + JMP LBB24_10 -LBB23_11: +LBB24_11: BSFQ BX, CX LEAQ 0(CX)(R15*1), BX ADDQ $1, BX TESTQ R8, R8 - JE LBB23_52 + JE LBB24_52 BSFQ R8, DX CMPQ DX, CX - JBE LBB23_13 + JBE LBB24_13 -LBB23_52: +LBB24_52: TESTQ BX, BX - JS LBB23_53 + JS LBB24_53 MOVQ SI, R15 MOVQ R14, SI NOTQ SI @@ -6885,30 +7204,30 @@ LBB23_52: WORD $0xf8c5; BYTE $0x77 // vzeroupper LONG $0x000280e8; BYTE $0x00 // callq _utf8_validate TESTQ AX, AX - JS LBB23_56 + JS LBB24_56 MOVQ R15, SI ADDQ 0(R15), AX -LBB23_58: +LBB24_58: MOVQ $-2, BX - JMP LBB23_59 + JMP LBB24_59 -LBB23_53: +LBB24_53: CMPQ BX, $-1 - JNE LBB23_59 + JNE LBB24_59 -LBB23_54: +LBB24_54: MOVQ $-1, BX MOVQ -48(BP), AX - JMP LBB23_59 + JMP LBB24_59 -LBB23_56: +LBB24_56: ADDQ $-1, R14 MOVQ BX, AX MOVQ R14, BX MOVQ R15, SI -LBB23_59: +LBB24_59: MOVQ AX, 0(SI) MOVQ BX, AX ADDQ $8, SP @@ -6921,80 +7240,80 @@ LBB23_59: WORD $0xf8c5; BYTE $0x77 // vzeroupper RET -LBB23_15: +LBB24_15: MOVQ $-2, BX CMPQ AX, $-1 - JNE LBB23_59 + JNE LBB24_59 BSFQ R8, AX ADDQ R15, AX - JMP LBB23_59 + JMP LBB24_59 -LBB23_18: +LBB24_18: ADDQ R11, R15 CMPQ R13, $32 - JB LBB23_34 + JB LBB24_34 -LBB23_20: +LBB24_20: LONG $0x6f7ec1c4; BYTE $0x07 // vmovdqu (%r15), %ymm0 - QUAD $0xfffffda50d74fdc5 // vpcmpeqb $-603(%rip), %ymm0, %ymm1 /* LCPI23_0(%rip) */ + QUAD $0xfffffda50d74fdc5 // vpcmpeqb $-603(%rip), %ymm0, %ymm1 /* LCPI24_0(%rip) */ LONG $0xd9d7fdc5 // vpmovmskb %ymm1, %ebx - QUAD $0xfffffdb90d74fdc5 // vpcmpeqb $-583(%rip), %ymm0, %ymm1 /* LCPI23_1(%rip) */ + QUAD $0xfffffdb90d74fdc5 // vpcmpeqb $-583(%rip), %ymm0, %ymm1 /* LCPI24_1(%rip) */ LONG $0xc9d7fdc5 // vpmovmskb %ymm1, %ecx - QUAD $0xfffffdcd0d6ffec5 // vmovdqu $-563(%rip), %ymm1 /* LCPI23_2(%rip) */ + QUAD $0xfffffdcd0d6ffec5 // vmovdqu $-563(%rip), %ymm1 /* LCPI24_2(%rip) */ LONG $0xc864f5c5 // vpcmpgtb %ymm0, %ymm1, %ymm1 LONG $0xd276edc5 // vpcmpeqd %ymm2, %ymm2, %ymm2 LONG $0xc264fdc5 // vpcmpgtb %ymm2, %ymm0, %ymm0 LONG $0xc0dbf5c5 // vpand %ymm0, %ymm1, %ymm0 LONG $0xc8d77dc5 // vpmovmskb %ymm0, %r9d TESTL CX, CX - JNE LBB23_21 + JNE LBB24_21 TESTQ R12, R12 - JNE LBB23_24 + JNE LBB24_24 XORL R12, R12 TESTQ BX, BX - JE LBB23_30 + JE LBB24_30 -LBB23_27: +LBB24_27: SUBQ R11, R15 BSFQ BX, DX LEAQ 0(R15)(DX*1), BX ADDQ $1, BX TESTQ R9, R9 - JE LBB23_52 + JE LBB24_52 BSFQ R9, CX CMPQ CX, DX - JA LBB23_52 + JA LBB24_52 ADDQ R15, CX CMPQ AX, $-1 LONG $0xc1440f48 // cmoveq %rcx, %rax - JMP LBB23_58 + JMP LBB24_58 -LBB23_2: +LBB24_2: MOVQ DI, R15 CMPQ R13, $32 - JAE LBB23_20 - JMP LBB23_34 + JAE LBB24_20 + JMP LBB24_34 -LBB23_13: +LBB24_13: ADDQ R15, DX CMPQ AX, $-1 LONG $0xc2440f48 // cmoveq %rdx, %rax - JMP LBB23_58 + JMP LBB24_58 -LBB23_21: +LBB24_21: MOVQ SI, R8 CMPQ AX, $-1 - JNE LBB23_25 + JNE LBB24_25 MOVQ R15, SI SUBQ R11, SI BSFQ CX, AX ADDQ SI, AX - JMP LBB23_25 + JMP LBB24_25 -LBB23_24: +LBB24_24: MOVQ SI, R8 -LBB23_25: +LBB24_25: MOVL R12, R10 NOTL R10 ANDL CX, R10 @@ -7013,45 +7332,45 @@ LBB23_25: ANDL SI, BX MOVQ R8, SI TESTQ BX, BX - JNE LBB23_27 + JNE LBB24_27 -LBB23_30: +LBB24_30: TESTQ R9, R9 - JNE LBB23_31 + JNE LBB24_31 ADDQ $32, R15 ADDQ $-32, R13 -LBB23_34: +LBB24_34: MOVQ R11, DX NOTQ DX TESTQ R12, R12 - JNE LBB23_35 + JNE LBB24_35 TESTQ R13, R13 - JE LBB23_51 + JE LBB24_51 -LBB23_38: +LBB24_38: LEAQ 1(DX), R9 -LBB23_39: +LBB24_39: XORL CX, CX -LBB23_40: +LBB24_40: MOVBLZX 0(R15)(CX*1), BX CMPB BX, $34 - JE LBB23_50 + JE LBB24_50 CMPB BX, $92 - JE LBB23_42 + JE LBB24_42 CMPB BX, $31 - JBE LBB23_45 + JBE LBB24_45 ADDQ $1, CX CMPQ R13, CX - JNE LBB23_40 - JMP LBB23_48 + JNE LBB24_40 + JMP LBB24_48 -LBB23_42: +LBB24_42: LEAQ -1(R13), BX CMPQ BX, CX - JE LBB23_54 + JE LBB24_54 LEAQ 0(R9)(R15*1), BX ADDQ CX, BX CMPQ AX, $-1 @@ -7064,53 +7383,53 @@ LBB23_42: ADDQ $-2, R13 CMPQ R13, CX MOVQ BX, R13 - JNE LBB23_39 - JMP LBB23_54 + JNE LBB24_39 + JMP LBB24_54 -LBB23_50: +LBB24_50: ADDQ CX, R15 ADDQ $1, R15 -LBB23_51: +LBB24_51: SUBQ R11, R15 MOVQ R15, BX - JMP LBB23_52 + JMP LBB24_52 -LBB23_48: +LBB24_48: CMPB BX, $34 - JNE LBB23_54 + JNE LBB24_54 ADDQ R13, R15 - JMP LBB23_51 + JMP LBB24_51 -LBB23_31: +LBB24_31: MOVQ $-2, BX CMPQ AX, $-1 - JNE LBB23_59 + JNE LBB24_59 SUBQ R11, R15 BSFQ R9, AX ADDQ R15, AX - JMP LBB23_59 + JMP LBB24_59 -LBB23_35: +LBB24_35: TESTQ R13, R13 - JE LBB23_54 + JE LBB24_54 LEAQ 0(R15)(DX*1), CX CMPQ AX, $-1 LONG $0xc1440f48 // cmoveq %rcx, %rax ADDQ $1, R15 ADDQ $-1, R13 TESTQ R13, R13 - JNE LBB23_38 - JMP LBB23_51 + JNE LBB24_38 + JMP LBB24_51 -LBB23_45: +LBB24_45: MOVQ $-2, BX CMPQ AX, $-1 - JNE LBB23_59 + JNE LBB24_59 ADDQ R15, DX LEAQ 0(CX)(DX*1), AX ADDQ $1, AX - JMP LBB23_59 + JMP LBB24_59 _utf8_validate: BYTE $0x55 // pushq %rbp @@ -7120,151 +7439,151 @@ _utf8_validate: BYTE $0x53 // pushq %rbx MOVQ $-1, AX TESTQ SI, SI - JLE LBB24_35 - LONG $0xda058d4c; WORD $0x00af; BYTE $0x00 // leaq $45018(%rip), %r8 /* _first(%rip) */ - LONG $0xd30d8d4c; WORD $0x00b0; BYTE $0x00 // leaq $45267(%rip), %r9 /* _ranges(%rip) */ - LONG $0x84158d4c; WORD $0x0001; BYTE $0x00 // leaq $388(%rip), %r10 /* LJTI24_0(%rip) */ + JLE LBB25_35 + LONG $0x02058d4c; WORD $0x00b0; BYTE $0x00 // leaq $45058(%rip), %r8 /* _first(%rip) */ + LONG $0xfb0d8d4c; WORD $0x00b0; BYTE $0x00 // leaq $45307(%rip), %r9 /* _ranges(%rip) */ + LONG $0x84158d4c; WORD $0x0001; BYTE $0x00 // leaq $388(%rip), %r10 /* LJTI25_0(%rip) */ MOVQ DI, R11 -LBB24_2: +LBB25_2: CMPB 0(R11), $0 - JS LBB24_3 + JS LBB25_3 MOVQ SI, R14 MOVQ R11, DX CMPQ SI, $32 - JL LBB24_16 + JL LBB25_16 XORL BX, BX XORL CX, CX -LBB24_6: +LBB25_6: LONG $0x6f7ec1c4; WORD $0x1b04 // vmovdqu (%r11,%rbx), %ymm0 LONG $0xd0d7fdc5 // vpmovmskb %ymm0, %edx TESTL DX, DX - JNE LBB24_7 + JNE LBB25_7 ADDQ $32, BX LEAQ 0(SI)(CX*1), DX ADDQ $-32, DX ADDQ $-32, CX ADDQ $32, DX CMPQ DX, $63 - JG LBB24_6 + JG LBB25_6 MOVQ R11, DX SUBQ CX, DX MOVQ SI, R14 SUBQ BX, R14 -LBB24_16: +LBB25_16: WORD $0xf8c5; BYTE $0x77 // vzeroupper CMPQ R14, $16 - JL LBB24_17 + JL LBB25_17 MOVQ R11, CX SUBQ DX, CX -LBB24_23: +LBB25_23: LONG $0x026ffac5 // vmovdqu (%rdx), %xmm0 LONG $0xd8d7f9c5 // vpmovmskb %xmm0, %ebx TESTL BX, BX - JNE LBB24_24 + JNE LBB25_24 LEAQ -16(R14), BX ADDQ $16, DX ADDQ $-16, CX CMPQ R14, $31 MOVQ BX, R14 - JG LBB24_23 - JMP LBB24_18 + JG LBB25_23 + JMP LBB25_18 -LBB24_3: +LBB25_3: XORL R14, R14 -LBB24_9: +LBB25_9: CMPQ R14, $-1 - JE LBB24_35 + JE LBB25_35 SUBQ R14, SI - JLE LBB24_35 + JLE LBB25_35 LEAQ 0(R11)(R14*1), R15 MOVBLZX 0(R11)(R14*1), R11 MOVBLZX 0(R11)(R8*1), BX MOVL BX, CX ANDL $7, CX CMPQ SI, CX - JB LBB24_33 + JB LBB25_33 CMPB CX, $4 - JA LBB24_33 + JA LBB25_33 MOVL $1, R14 MOVBLZX CX, DX MOVLQSX 0(R10)(DX*4), DX ADDQ R10, DX JMP DX -LBB24_26: +LBB25_26: MOVB 3(R15), DX TESTB DX, DX - JNS LBB24_33 + JNS LBB25_33 CMPB DX, $-65 - JA LBB24_33 + JA LBB25_33 -LBB24_28: +LBB25_28: MOVB 2(R15), DX TESTB DX, DX - JNS LBB24_33 + JNS LBB25_33 CMPB DX, $-65 - JA LBB24_33 + JA LBB25_33 -LBB24_30: +LBB25_30: SHRQ $4, BX MOVB 1(R15), DX CMPB DX, 0(R9)(BX*2) - JB LBB24_33 + JB LBB25_33 CMPB 1(R9)(BX*2), DX - JB LBB24_33 + JB LBB25_33 MOVQ CX, R14 TESTB R11, R11 - JNS LBB24_33 + JNS LBB25_33 -LBB24_34: +LBB25_34: ADDQ R14, R15 MOVQ R15, R11 SUBQ R14, SI - JG LBB24_2 - JMP LBB24_35 + JG LBB25_2 + JMP LBB25_35 -LBB24_17: +LBB25_17: MOVQ R14, BX -LBB24_18: +LBB25_18: TESTQ BX, BX - JLE LBB24_35 + JLE LBB25_35 ADDQ $1, BX MOVQ DX, R14 SUBQ R11, R14 -LBB24_20: +LBB25_20: CMPB 0(DX), $0 - JS LBB24_9 + JS LBB25_9 ADDQ $1, DX ADDQ $-1, BX ADDQ $1, R14 CMPQ BX, $1 - JG LBB24_20 - JMP LBB24_35 + JG LBB25_20 + JMP LBB25_35 -LBB24_7: +LBB25_7: BSFL DX, R14 - JMP LBB24_8 + JMP LBB25_8 -LBB24_24: +LBB25_24: BSFW BX, DX MOVWLZX DX, R14 -LBB24_8: +LBB25_8: SUBQ CX, R14 - JMP LBB24_9 + JMP LBB25_9 -LBB24_33: +LBB25_33: SUBQ DI, R15 MOVQ R15, AX -LBB24_35: +LBB25_35: BYTE $0x5b // popq %rbx WORD $0x5e41 // popq %r14 WORD $0x5f41 // popq %r15 @@ -7272,17 +7591,17 @@ LBB24_35: WORD $0xf8c5; BYTE $0x77 // vzeroupper RET -// .set L24_0_set_34, LBB24_34-LJTI24_0 -// .set L24_0_set_33, LBB24_33-LJTI24_0 -// .set L24_0_set_30, LBB24_30-LJTI24_0 -// .set L24_0_set_28, LBB24_28-LJTI24_0 -// .set L24_0_set_26, LBB24_26-LJTI24_0 -LJTI24_0: - LONG $0xffffff9a // .long L24_0_set_34 - LONG $0xfffffff0 // .long L24_0_set_33 - LONG $0xffffff7d // .long L24_0_set_30 - LONG $0xffffff70 // .long L24_0_set_28 - LONG $0xffffff5b // .long L24_0_set_26 +// .set L25_0_set_34, LBB25_34-LJTI25_0 +// .set L25_0_set_33, LBB25_33-LJTI25_0 +// .set L25_0_set_30, LBB25_30-LJTI25_0 +// .set L25_0_set_28, LBB25_28-LJTI25_0 +// .set L25_0_set_26, LBB25_26-LJTI25_0 +LJTI25_0: + LONG $0xffffff9a // .long L25_0_set_34 + LONG $0xfffffff0 // .long L25_0_set_33 + LONG $0xffffff7d // .long L25_0_set_30 + LONG $0xffffff70 // .long L25_0_set_28 + LONG $0xffffff5b // .long L25_0_set_26 _skip_negative: BYTE $0x55 // pushq %rbp @@ -7298,70 +7617,70 @@ _skip_negative: MOVQ AX, DI LONG $0x000173e8; BYTE $0x00 // callq _do_skip_number TESTQ AX, AX - JS LBB25_1 + JS LBB26_1 ADDQ AX, 0(R14) ADDQ $-1, BX - JMP LBB25_3 + JMP LBB26_3 -LBB25_1: +LBB26_1: NOTQ AX ADDQ AX, 0(R14) MOVQ $-2, BX -LBB25_3: +LBB26_3: MOVQ BX, AX BYTE $0x5b // popq %rbx WORD $0x5e41 // popq %r14 BYTE $0x5d // popq %rbp RET -LCPI26_0: +LCPI27_0: QUAD $0x2f2f2f2f2f2f2f2f; QUAD $0x2f2f2f2f2f2f2f2f // .space 16, '////////////////' QUAD $0x2f2f2f2f2f2f2f2f; QUAD $0x2f2f2f2f2f2f2f2f // .space 16, '////////////////' -LCPI26_1: +LCPI27_1: QUAD $0x3a3a3a3a3a3a3a3a; QUAD $0x3a3a3a3a3a3a3a3a // .space 16, '::::::::::::::::' QUAD $0x3a3a3a3a3a3a3a3a; QUAD $0x3a3a3a3a3a3a3a3a // .space 16, '::::::::::::::::' -LCPI26_2: +LCPI27_2: QUAD $0x2b2b2b2b2b2b2b2b; QUAD $0x2b2b2b2b2b2b2b2b // .space 16, '++++++++++++++++' QUAD $0x2b2b2b2b2b2b2b2b; QUAD $0x2b2b2b2b2b2b2b2b // .space 16, '++++++++++++++++' -LCPI26_3: +LCPI27_3: QUAD $0x2d2d2d2d2d2d2d2d; QUAD $0x2d2d2d2d2d2d2d2d // .space 16, '----------------' QUAD $0x2d2d2d2d2d2d2d2d; QUAD $0x2d2d2d2d2d2d2d2d // .space 16, '----------------' -LCPI26_4: +LCPI27_4: QUAD $0xdfdfdfdfdfdfdfdf; QUAD $0xdfdfdfdfdfdfdfdf // .space 16, '\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf' QUAD $0xdfdfdfdfdfdfdfdf; QUAD $0xdfdfdfdfdfdfdfdf // .space 16, '\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf' -LCPI26_5: +LCPI27_5: QUAD $0x2e2e2e2e2e2e2e2e; QUAD $0x2e2e2e2e2e2e2e2e // .space 16, '................' QUAD $0x2e2e2e2e2e2e2e2e; QUAD $0x2e2e2e2e2e2e2e2e // .space 16, '................' -LCPI26_6: +LCPI27_6: QUAD $0x4545454545454545; QUAD $0x4545454545454545 // .space 16, 'EEEEEEEEEEEEEEEE' QUAD $0x4545454545454545; QUAD $0x4545454545454545 // .space 16, 'EEEEEEEEEEEEEEEE' -LCPI26_7: +LCPI27_7: QUAD $0x2f2f2f2f2f2f2f2f; QUAD $0x2f2f2f2f2f2f2f2f // .space 16, '////////////////' -LCPI26_8: +LCPI27_8: QUAD $0x3a3a3a3a3a3a3a3a; QUAD $0x3a3a3a3a3a3a3a3a // .space 16, '::::::::::::::::' -LCPI26_9: +LCPI27_9: QUAD $0x2b2b2b2b2b2b2b2b; QUAD $0x2b2b2b2b2b2b2b2b // .space 16, '++++++++++++++++' -LCPI26_10: +LCPI27_10: QUAD $0x2d2d2d2d2d2d2d2d; QUAD $0x2d2d2d2d2d2d2d2d // .space 16, '----------------' -LCPI26_11: +LCPI27_11: QUAD $0xdfdfdfdfdfdfdfdf; QUAD $0xdfdfdfdfdfdfdfdf // .space 16, '\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf\xdf' -LCPI26_12: +LCPI27_12: QUAD $0x2e2e2e2e2e2e2e2e; QUAD $0x2e2e2e2e2e2e2e2e // .space 16, '................' -LCPI26_13: +LCPI27_13: QUAD $0x4545454545454545; QUAD $0x4545454545454545 // .space 16, 'EEEEEEEEEEEEEEEE' _do_skip_number: @@ -7372,37 +7691,37 @@ _do_skip_number: WORD $0x5441 // pushq %r12 BYTE $0x53 // pushq %rbx TESTQ SI, SI - JE LBB26_1 + JE LBB27_1 CMPB 0(DI), $48 - JNE LBB26_6 + JNE LBB27_6 MOVL $1, AX CMPQ SI, $1 - JE LBB26_81 + JE LBB27_81 MOVB 1(DI), CX ADDB $-46, CX CMPB CX, $55 - JA LBB26_81 + JA LBB27_81 MOVBLZX CX, CX MOVQ $36028797027352577, DX BTQ CX, DX - JAE LBB26_81 + JAE LBB27_81 -LBB26_6: +LBB27_6: MOVQ $-1, R9 CMPQ SI, $32 - JB LBB26_7 + JB LBB27_7 XORL AX, AX - QUAD $0xfffffe47056ffec5 // vmovdqu $-441(%rip), %ymm0 /* LCPI26_0(%rip) */ - QUAD $0xfffffe5f0d6ffec5 // vmovdqu $-417(%rip), %ymm1 /* LCPI26_1(%rip) */ - QUAD $0xfffffe77156ffec5 // vmovdqu $-393(%rip), %ymm2 /* LCPI26_2(%rip) */ - QUAD $0xfffffe8f1d6ffec5 // vmovdqu $-369(%rip), %ymm3 /* LCPI26_3(%rip) */ - QUAD $0xfffffea7256ffec5 // vmovdqu $-345(%rip), %ymm4 /* LCPI26_4(%rip) */ - QUAD $0xfffffebf2d6ffec5 // vmovdqu $-321(%rip), %ymm5 /* LCPI26_5(%rip) */ - QUAD $0xfffffed7356ffec5 // vmovdqu $-297(%rip), %ymm6 /* LCPI26_6(%rip) */ + QUAD $0xfffffe47056ffec5 // vmovdqu $-441(%rip), %ymm0 /* LCPI27_0(%rip) */ + QUAD $0xfffffe5f0d6ffec5 // vmovdqu $-417(%rip), %ymm1 /* LCPI27_1(%rip) */ + QUAD $0xfffffe77156ffec5 // vmovdqu $-393(%rip), %ymm2 /* LCPI27_2(%rip) */ + QUAD $0xfffffe8f1d6ffec5 // vmovdqu $-369(%rip), %ymm3 /* LCPI27_3(%rip) */ + QUAD $0xfffffea7256ffec5 // vmovdqu $-345(%rip), %ymm4 /* LCPI27_4(%rip) */ + QUAD $0xfffffebf2d6ffec5 // vmovdqu $-321(%rip), %ymm5 /* LCPI27_5(%rip) */ + QUAD $0xfffffed7356ffec5 // vmovdqu $-297(%rip), %ymm6 /* LCPI27_6(%rip) */ MOVQ $-1, R8 MOVQ $-1, R10 -LBB26_9: +LBB27_9: LONG $0x3c6ffec5; BYTE $0x07 // vmovdqu (%rdi,%rax), %ymm7 LONG $0xc06445c5 // vpcmpgtb %ymm0, %ymm7, %ymm8 LONG $0xcf6475c5 // vpcmpgtb %ymm7, %ymm1, %ymm9 @@ -7423,7 +7742,7 @@ LBB26_9: NOTQ CX BSFQ CX, R14 CMPL R14, $32 - JE LBB26_11 + JE LBB27_11 MOVL $-1, BX MOVL R14, CX SHLL CX, BX @@ -7433,68 +7752,68 @@ LBB26_9: ANDL R11, BX MOVL BX, R11 -LBB26_11: +LBB27_11: LEAL -1(DX), CX ANDL DX, CX - JNE LBB26_12 + JNE LBB27_12 LEAL -1(R15), CX ANDL R15, CX - JNE LBB26_12 + JNE LBB27_12 LEAL -1(R11), CX ANDL R11, CX - JNE LBB26_12 + JNE LBB27_12 TESTL DX, DX - JE LBB26_19 + JE LBB27_19 BSFL DX, CX CMPQ R10, $-1 - JNE LBB26_82 + JNE LBB27_82 ADDQ AX, CX MOVQ CX, R10 -LBB26_19: +LBB27_19: TESTL R15, R15 - JE LBB26_22 + JE LBB27_22 BSFL R15, CX CMPQ R8, $-1 - JNE LBB26_82 + JNE LBB27_82 ADDQ AX, CX MOVQ CX, R8 -LBB26_22: +LBB27_22: TESTL R11, R11 - JE LBB26_25 + JE LBB27_25 BSFL R11, CX CMPQ R9, $-1 - JNE LBB26_82 + JNE LBB27_82 ADDQ AX, CX MOVQ CX, R9 -LBB26_25: +LBB27_25: CMPL R14, $32 - JNE LBB26_83 + JNE LBB27_83 ADDQ $-32, SI ADDQ $32, AX CMPQ SI, $31 - JA LBB26_9 + JA LBB27_9 WORD $0xf8c5; BYTE $0x77 // vzeroupper ADDQ DI, AX MOVQ AX, R14 CMPQ SI, $16 - JB LBB26_49 + JB LBB27_49 -LBB26_29: +LBB27_29: MOVQ R14, R11 SUBQ DI, R11 XORL AX, AX - QUAD $0xfffffdce056f7ac5 // vmovdqu $-562(%rip), %xmm8 /* LCPI26_7(%rip) */ - QUAD $0xfffffdd60d6f7ac5 // vmovdqu $-554(%rip), %xmm9 /* LCPI26_8(%rip) */ - QUAD $0xfffffdde156f7ac5 // vmovdqu $-546(%rip), %xmm10 /* LCPI26_9(%rip) */ - QUAD $0xfffffde61d6f7ac5 // vmovdqu $-538(%rip), %xmm11 /* LCPI26_10(%rip) */ - QUAD $0xfffffdee256ffac5 // vmovdqu $-530(%rip), %xmm4 /* LCPI26_11(%rip) */ - QUAD $0xfffffdf62d6ffac5 // vmovdqu $-522(%rip), %xmm5 /* LCPI26_12(%rip) */ - QUAD $0xfffffdfe356ffac5 // vmovdqu $-514(%rip), %xmm6 /* LCPI26_13(%rip) */ - -LBB26_30: + QUAD $0xfffffdce056f7ac5 // vmovdqu $-562(%rip), %xmm8 /* LCPI27_7(%rip) */ + QUAD $0xfffffdd60d6f7ac5 // vmovdqu $-554(%rip), %xmm9 /* LCPI27_8(%rip) */ + QUAD $0xfffffdde156f7ac5 // vmovdqu $-546(%rip), %xmm10 /* LCPI27_9(%rip) */ + QUAD $0xfffffde61d6f7ac5 // vmovdqu $-538(%rip), %xmm11 /* LCPI27_10(%rip) */ + QUAD $0xfffffdee256ffac5 // vmovdqu $-530(%rip), %xmm4 /* LCPI27_11(%rip) */ + QUAD $0xfffffdf62d6ffac5 // vmovdqu $-522(%rip), %xmm5 /* LCPI27_12(%rip) */ + QUAD $0xfffffdfe356ffac5 // vmovdqu $-514(%rip), %xmm6 /* LCPI27_13(%rip) */ + +LBB27_30: LONG $0x6f7ac1c4; WORD $0x063c // vmovdqu (%r14,%rax), %xmm7 LONG $0x6441c1c4; BYTE $0xc0 // vpcmpgtb %xmm8, %xmm7, %xmm0 LONG $0xcf64b1c5 // vpcmpgtb %xmm7, %xmm9, %xmm1 @@ -7515,7 +7834,7 @@ LBB26_30: NOTL CX BSFL CX, CX CMPL CX, $16 - JE LBB26_32 + JE LBB27_32 MOVL $-1, BX SHLL CX, BX NOTL BX @@ -7524,166 +7843,166 @@ LBB26_30: ANDL R15, BX MOVL BX, R15 -LBB26_32: +LBB27_32: LEAL -1(DX), BX ANDL DX, BX - JNE LBB26_33 + JNE LBB27_33 LEAL -1(R12), BX ANDL R12, BX - JNE LBB26_33 + JNE LBB27_33 LEAL -1(R15), BX ANDL R15, BX - JNE LBB26_33 + JNE LBB27_33 TESTL DX, DX - JE LBB26_40 + JE LBB27_40 BSFL DX, DX CMPQ R10, $-1 - JNE LBB26_84 + JNE LBB27_84 ADDQ R11, DX ADDQ AX, DX MOVQ DX, R10 -LBB26_40: +LBB27_40: TESTL R12, R12 - JE LBB26_43 + JE LBB27_43 BSFL R12, DX CMPQ R8, $-1 - JNE LBB26_84 + JNE LBB27_84 ADDQ R11, DX ADDQ AX, DX MOVQ DX, R8 -LBB26_43: +LBB27_43: TESTL R15, R15 - JE LBB26_46 + JE LBB27_46 BSFL R15, DX CMPQ R9, $-1 - JNE LBB26_84 + JNE LBB27_84 ADDQ R11, DX ADDQ AX, DX MOVQ DX, R9 -LBB26_46: +LBB27_46: CMPL CX, $16 - JNE LBB26_65 + JNE LBB27_65 ADDQ $-16, SI ADDQ $16, AX CMPQ SI, $15 - JA LBB26_30 + JA LBB27_30 ADDQ AX, R14 -LBB26_49: +LBB27_49: TESTQ SI, SI - JE LBB26_67 + JE LBB27_67 LEAQ 0(R14)(SI*1), R11 MOVQ R14, DX SUBQ DI, DX XORL AX, AX - LONG $0x813d8d4c; WORD $0x0001; BYTE $0x00 // leaq $385(%rip), %r15 /* LJTI26_0(%rip) */ - JMP LBB26_51 + LONG $0x813d8d4c; WORD $0x0001; BYTE $0x00 // leaq $385(%rip), %r15 /* LJTI27_0(%rip) */ + JMP LBB27_51 -LBB26_53: +LBB27_53: CMPL CX, $101 - JNE LBB26_66 + JNE LBB27_66 -LBB26_54: +LBB27_54: CMPQ R8, $-1 - JNE LBB26_59 + JNE LBB27_59 LEAQ 0(DX)(AX*1), R8 -LBB26_63: +LBB27_63: ADDQ $1, AX CMPQ SI, AX - JE LBB26_64 + JE LBB27_64 -LBB26_51: +LBB27_51: MOVBLSX 0(R14)(AX*1), CX LEAL -48(CX), BX CMPL BX, $10 - JB LBB26_63 + JB LBB27_63 LEAL -43(CX), BX CMPL BX, $26 - JA LBB26_53 + JA LBB27_53 MOVLQSX 0(R15)(BX*4), CX ADDQ R15, CX JMP CX -LBB26_61: +LBB27_61: CMPQ R9, $-1 - JNE LBB26_59 + JNE LBB27_59 LEAQ 0(DX)(AX*1), R9 - JMP LBB26_63 + JMP LBB27_63 -LBB26_57: +LBB27_57: CMPQ R10, $-1 - JNE LBB26_59 + JNE LBB27_59 LEAQ 0(DX)(AX*1), R10 - JMP LBB26_63 + JMP LBB27_63 -LBB26_1: +LBB27_1: MOVQ $-1, AX - JMP LBB26_81 + JMP LBB27_81 -LBB26_83: +LBB27_83: ADDQ AX, R14 ADDQ DI, R14 WORD $0xf8c5; BYTE $0x77 // vzeroupper MOVQ $-1, AX TESTQ R10, R10 - JNE LBB26_68 - JMP LBB26_81 + JNE LBB27_68 + JMP LBB27_81 -LBB26_65: +LBB27_65: MOVL CX, CX ADDQ CX, R14 -LBB26_66: +LBB27_66: ADDQ AX, R14 -LBB26_67: +LBB27_67: MOVQ $-1, AX TESTQ R10, R10 - JE LBB26_81 + JE LBB27_81 -LBB26_68: +LBB27_68: TESTQ R9, R9 - JE LBB26_81 + JE LBB27_81 TESTQ R8, R8 - JE LBB26_81 + JE LBB27_81 SUBQ DI, R14 LEAQ -1(R14), AX CMPQ R10, AX - JE LBB26_73 + JE LBB27_73 CMPQ R9, AX - JE LBB26_73 + JE LBB27_73 CMPQ R8, AX - JE LBB26_73 + JE LBB27_73 TESTQ R9, R9 - JLE LBB26_77 + JLE LBB27_77 LEAQ -1(R9), AX CMPQ R8, AX - JE LBB26_77 + JE LBB27_77 NOTQ R9 MOVQ R9, AX - JMP LBB26_81 + JMP LBB27_81 -LBB26_73: +LBB27_73: NEGQ R14 MOVQ R14, AX - JMP LBB26_81 + JMP LBB27_81 -LBB26_77: +LBB27_77: MOVQ R10, AX ORQ R8, AX SETPL AX - JS LBB26_80 + JS LBB27_80 CMPQ R10, R8 - JL LBB26_80 + JL LBB27_80 NOTQ R10 MOVQ R10, AX - JMP LBB26_81 + JMP LBB27_81 -LBB26_80: +LBB27_80: LEAQ -1(R8), CX CMPQ R10, CX NOTQ R8 @@ -7691,47 +8010,47 @@ LBB26_80: TESTB AX, AX LONG $0xc6440f4d // cmoveq %r14, %r8 MOVQ R8, AX - JMP LBB26_81 + JMP LBB27_81 -LBB26_64: +LBB27_64: MOVQ R11, R14 MOVQ $-1, AX TESTQ R10, R10 - JNE LBB26_68 - JMP LBB26_81 + JNE LBB27_68 + JMP LBB27_81 -LBB26_12: +LBB27_12: BSFL CX, CX - JMP LBB26_13 + JMP LBB27_13 -LBB26_82: +LBB27_82: MOVL CX, CX -LBB26_13: +LBB27_13: NOTQ AX SUBQ CX, AX - JMP LBB26_81 + JMP LBB27_81 -LBB26_33: +LBB27_33: BSFL BX, CX - JMP LBB26_34 + JMP LBB27_34 -LBB26_59: +LBB27_59: SUBQ R14, DI - JMP LBB26_60 + JMP LBB27_60 -LBB26_84: +LBB27_84: MOVL DX, CX -LBB26_34: +LBB27_34: SUBQ R14, DI SUBQ CX, DI -LBB26_60: +LBB27_60: NOTQ AX ADDQ DI, AX -LBB26_81: +LBB27_81: BYTE $0x5b // popq %rbx WORD $0x5c41 // popq %r12 WORD $0x5e41 // popq %r14 @@ -7740,46 +8059,46 @@ LBB26_81: WORD $0xf8c5; BYTE $0x77 // vzeroupper RET -LBB26_7: +LBB27_7: MOVQ $-1, R8 MOVQ $-1, R10 MOVQ DI, R14 CMPQ SI, $16 - JAE LBB26_29 - JMP LBB26_49 - -// .set L26_0_set_61, LBB26_61-LJTI26_0 -// .set L26_0_set_66, LBB26_66-LJTI26_0 -// .set L26_0_set_57, LBB26_57-LJTI26_0 -// .set L26_0_set_54, LBB26_54-LJTI26_0 -LJTI26_0: - LONG $0xfffffec3 // .long L26_0_set_61 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xfffffec3 // .long L26_0_set_61 - LONG $0xfffffed3 // .long L26_0_set_57 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xffffff0e // .long L26_0_set_66 - LONG $0xfffffe8a // .long L26_0_set_54 + JAE LBB27_29 + JMP LBB27_49 + +// .set L27_0_set_61, LBB27_61-LJTI27_0 +// .set L27_0_set_66, LBB27_66-LJTI27_0 +// .set L27_0_set_57, LBB27_57-LJTI27_0 +// .set L27_0_set_54, LBB27_54-LJTI27_0 +LJTI27_0: + LONG $0xfffffec3 // .long L27_0_set_61 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xfffffec3 // .long L27_0_set_61 + LONG $0xfffffed3 // .long L27_0_set_57 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xffffff0e // .long L27_0_set_66 + LONG $0xfffffe8a // .long L27_0_set_54 _skip_positive: BYTE $0x55 // pushq %rbp @@ -7796,19 +8115,19 @@ _skip_positive: MOVQ AX, DI LONG $0xfffae4e8; BYTE $0xff // callq _do_skip_number TESTQ AX, AX - JS LBB27_1 + JS LBB28_1 MOVQ 0(R14), CX ADDQ AX, CX ADDQ $-1, CX - JMP LBB27_3 + JMP LBB28_3 -LBB27_1: +LBB28_1: MOVQ 0(R14), CX SUBQ AX, CX ADDQ $-2, CX MOVQ $-2, BX -LBB27_3: +LBB28_3: MOVQ CX, 0(R14) MOVQ BX, AX BYTE $0x5b // popq %rbx @@ -7833,22 +8152,22 @@ _skip_number: SETEQ AX ADDQ AX, BX SUBQ AX, SI - JE LBB28_6 + JE LBB29_6 CMPQ R15, SI - JAE LBB28_3 + JAE LBB29_3 MOVB 0(BX), AX ADDB $-48, AX CMPB AX, $9 - JA LBB28_8 + JA LBB29_8 -LBB28_3: +LBB29_3: MOVQ BX, DI LONG $0xfffa74e8; BYTE $0xff // callq _do_skip_number TESTQ AX, AX - JS LBB28_7 + JS LBB29_7 ADDQ AX, BX -LBB28_5: +LBB29_5: SUBQ R12, BX MOVQ BX, 0(R14) MOVQ R15, AX @@ -7859,17 +8178,17 @@ LBB28_5: BYTE $0x5d // popq %rbp RET -LBB28_6: +LBB29_6: MOVQ $-1, R15 - JMP LBB28_5 + JMP LBB29_5 -LBB28_7: +LBB29_7: NOTQ AX ADDQ AX, BX -LBB28_8: +LBB29_8: MOVQ $-2, R15 - JMP LBB28_5 + JMP LBB29_5 _validate_one: BYTE $0x55 // pushq %rbp @@ -7887,89 +8206,89 @@ _find_non_ascii: BYTE $0x55 // pushq %rbp WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp CMPQ SI, $32 - JL LBB30_1 + JL LBB31_1 XORL AX, AX XORL DX, DX -LBB30_3: +LBB31_3: LONG $0x046ffec5; BYTE $0x07 // vmovdqu (%rdi,%rax), %ymm0 LONG $0xc8d7fdc5 // vpmovmskb %ymm0, %ecx TESTL CX, CX - JNE LBB30_4 + JNE LBB31_4 ADDQ $32, AX LEAQ 0(SI)(DX*1), CX ADDQ $-32, CX ADDQ $-32, DX ADDQ $32, CX CMPQ CX, $63 - JG LBB30_3 + JG LBB31_3 MOVQ DI, CX SUBQ DX, CX SUBQ AX, SI WORD $0xf8c5; BYTE $0x77 // vzeroupper CMPQ SI, $16 - JGE LBB30_13 + JGE LBB31_13 -LBB30_8: +LBB31_8: MOVQ SI, DX - JMP LBB30_9 + JMP LBB31_9 -LBB30_1: +LBB31_1: MOVQ DI, CX WORD $0xf8c5; BYTE $0x77 // vzeroupper CMPQ SI, $16 - JL LBB30_8 + JL LBB31_8 -LBB30_13: +LBB31_13: MOVQ DI, R8 SUBQ CX, R8 -LBB30_14: +LBB31_14: LONG $0x016ffac5 // vmovdqu (%rcx), %xmm0 LONG $0xc0d7f9c5 // vpmovmskb %xmm0, %eax TESTL AX, AX - JNE LBB30_15 + JNE LBB31_15 LEAQ -16(SI), DX ADDQ $16, CX ADDQ $-16, R8 CMPQ SI, $31 MOVQ DX, SI - JG LBB30_14 + JG LBB31_14 -LBB30_9: +LBB31_9: MOVQ $-1, AX TESTQ DX, DX - JLE LBB30_18 + JLE LBB31_18 ADDQ $1, DX MOVQ CX, SI SUBQ DI, SI -LBB30_11: +LBB31_11: CMPB 0(CX), $0 - JS LBB30_12 + JS LBB31_12 ADDQ $1, CX ADDQ $-1, DX ADDQ $1, SI CMPQ DX, $1 - JG LBB30_11 + JG LBB31_11 -LBB30_18: +LBB31_18: BYTE $0x5d // popq %rbp RET -LBB30_12: +LBB31_12: MOVQ SI, AX BYTE $0x5d // popq %rbp RET -LBB30_4: +LBB31_4: BSFL CX, AX SUBQ DX, AX BYTE $0x5d // popq %rbp WORD $0xf8c5; BYTE $0x77 // vzeroupper RET -LBB30_15: +LBB31_15: BSFW AX, AX MOVWLZX AX, AX SUBQ R8, AX @@ -7985,7 +8304,7 @@ _print_mantissa: ADDQ SI, R14 MOVQ DI, AX SHRQ $32, AX - JE LBB31_2 + JE LBB32_2 MOVQ $-6067343680855748867, DX MOVQ DI, AX MULQ DX @@ -8017,7 +8336,7 @@ _print_mantissa: LONG $0x64fa6b41 // imull $100, %r10d, %edi SUBL DI, AX MOVWLZX AX, R11 - LONG $0x753d8d48; WORD $0x0059; BYTE $0x00 // leaq $22901(%rip), %rdi /* _Digits(%rip) */ + LONG $0x8c3d8d48; WORD $0x0059; BYTE $0x00 // leaq $22924(%rip), %rdi /* _Digits(%rip) */ MOVWLZX 0(DI)(R8*2), AX MOVW AX, -2(R14) MOVWLZX 0(DI)(R9*2), AX @@ -8029,13 +8348,13 @@ _print_mantissa: ADDQ $-8, R14 MOVQ DX, DI -LBB31_2: +LBB32_2: CMPL DI, $10000 - JB LBB31_3 + JB LBB32_3 MOVL $3518437209, R8 - LONG $0x2d0d8d4c; WORD $0x0059; BYTE $0x00 // leaq $22829(%rip), %r9 /* _Digits(%rip) */ + LONG $0x440d8d4c; WORD $0x0059; BYTE $0x00 // leaq $22852(%rip), %r9 /* _Digits(%rip) */ -LBB31_5: +LBB32_5: MOVL DI, AX IMULQ R8, AX SHRQ $45, AX @@ -8052,11 +8371,11 @@ LBB31_5: ADDQ $-4, R14 CMPL DI, $99999999 MOVL AX, DI - JA LBB31_5 + JA LBB32_5 CMPL AX, $100 - JB LBB31_8 + JB LBB32_8 -LBB31_7: +LBB32_7: MOVWLZX AX, CX SHRL $2, CX LONG $0x147bc969; WORD $0x0000 // imull $5243, %ecx, %ecx @@ -8064,17 +8383,17 @@ LBB31_7: WORD $0xd16b; BYTE $0x64 // imull $100, %ecx, %edx SUBL DX, AX MOVWLZX AX, AX - LONG $0xc6158d48; WORD $0x0058; BYTE $0x00 // leaq $22726(%rip), %rdx /* _Digits(%rip) */ + LONG $0xdd158d48; WORD $0x0058; BYTE $0x00 // leaq $22749(%rip), %rdx /* _Digits(%rip) */ MOVWLZX 0(DX)(AX*2), AX MOVW AX, -2(R14) ADDQ $-2, R14 MOVL CX, AX -LBB31_8: +LBB32_8: CMPL AX, $10 - JB LBB31_10 + JB LBB32_10 MOVL AX, AX - LONG $0xa90d8d48; WORD $0x0058; BYTE $0x00 // leaq $22697(%rip), %rcx /* _Digits(%rip) */ + LONG $0xc00d8d48; WORD $0x0058; BYTE $0x00 // leaq $22720(%rip), %rcx /* _Digits(%rip) */ MOVWLZX 0(CX)(AX*2), AX MOVW AX, -2(R14) BYTE $0x5b // popq %rbx @@ -8082,13 +8401,13 @@ LBB31_8: BYTE $0x5d // popq %rbp RET -LBB31_3: +LBB32_3: MOVL DI, AX CMPL AX, $100 - JAE LBB31_7 - JMP LBB31_8 + JAE LBB32_7 + JMP LBB32_8 -LBB31_10: +LBB32_10: ADDB $48, AX MOVB AX, 0(SI) BYTE $0x5b // popq %rbx @@ -8096,6 +8415,14 @@ LBB31_10: BYTE $0x5d // popq %rbp RET +_write_syscall: + MOVQ SI, DX + MOVQ DI, SI + MOVQ $1, DI + MOVQ $33554436, AX + SYSCALL + RET + _left_shift: BYTE $0x55 // pushq %rbp WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp @@ -8104,44 +8431,44 @@ _left_shift: BYTE $0x53 // pushq %rbx MOVL SI, CX IMUL3Q $104, CX, R14 - LONG $0xcb158d48; WORD $0x008a; BYTE $0x00 // leaq $35531(%rip), %rdx /* _LSHIFT_TAB(%rip) */ + LONG $0xdc158d48; WORD $0x008a; BYTE $0x00 // leaq $35548(%rip), %rdx /* _LSHIFT_TAB(%rip) */ MOVL 0(R14)(DX*1), R8 MOVQ 0(DI), R11 MOVLQSX 16(DI), R9 MOVL R9, R10 TESTQ R9, R9 - JE LBB32_1 + JE LBB34_1 LEAQ 0(R14)(DX*1), SI ADDQ $4, SI XORL BX, BX -LBB32_3: +LBB34_3: MOVBLZX 0(SI)(BX*1), AX TESTB AX, AX - JE LBB32_10 + JE LBB34_10 CMPB 0(R11)(BX*1), AX - JNE LBB32_5 + JNE LBB34_5 ADDQ $1, BX CMPQ R9, BX - JNE LBB32_3 + JNE LBB34_3 MOVL R9, SI ADDQ R14, DX CMPB 4(SI)(DX*1), $0 - JNE LBB32_9 - JMP LBB32_10 + JNE LBB34_9 + JMP LBB34_10 -LBB32_1: +LBB34_1: XORL SI, SI ADDQ R14, DX CMPB 4(SI)(DX*1), $0 - JE LBB32_10 + JE LBB34_10 -LBB32_9: +LBB34_9: ADDL $-1, R8 -LBB32_10: +LBB34_10: TESTL R10, R10 - JLE LBB32_25 + JLE LBB34_25 LEAL 0(R8)(R10*1), AX MOVLQSX AX, R15 ADDL $-1, R9 @@ -8149,7 +8476,7 @@ LBB32_10: XORL DX, DX MOVQ $-3689348814741910323, R14 -LBB32_12: +LBB34_12: MOVL R9, AX MOVBQSX 0(R11)(AX*1), SI ADDQ $-48, SI @@ -8163,90 +8490,90 @@ LBB32_12: MOVQ SI, AX SUBQ BX, AX CMPQ 8(DI), R15 - JBE LBB32_18 + JBE LBB34_18 ADDB $48, AX MOVB AX, 0(R11)(R15*1) - JMP LBB32_20 + JMP LBB34_20 -LBB32_18: +LBB34_18: TESTQ AX, AX - JE LBB32_20 + JE LBB34_20 MOVL $1, 28(DI) -LBB32_20: +LBB34_20: CMPQ R10, $2 - JL LBB32_14 + JL LBB34_14 ADDQ $-1, R10 MOVQ 0(DI), R11 ADDL $-1, R9 ADDQ $-1, R15 - JMP LBB32_12 + JMP LBB34_12 -LBB32_14: +LBB34_14: CMPQ SI, $10 - JAE LBB32_15 + JAE LBB34_15 -LBB32_25: +LBB34_25: MOVLQSX 16(DI), CX MOVLQSX R8, AX ADDQ CX, AX MOVL AX, 16(DI) MOVQ 8(DI), CX CMPQ CX, AX - JA LBB32_27 + JA LBB34_27 MOVL CX, 16(DI) MOVL CX, AX -LBB32_27: +LBB34_27: ADDL R8, 20(DI) TESTL AX, AX - JLE LBB32_31 + JLE LBB34_31 MOVQ 0(DI), CX MOVL AX, DX ADDQ $1, DX ADDL $-1, AX -LBB32_29: +LBB34_29: MOVL AX, SI CMPB 0(CX)(SI*1), $48 - JNE LBB32_33 + JNE LBB34_33 MOVL AX, 16(DI) ADDQ $-1, DX ADDL $-1, AX CMPQ DX, $1 - JG LBB32_29 - JMP LBB32_32 + JG LBB34_29 + JMP LBB34_32 -LBB32_31: - JNE LBB32_33 +LBB34_31: + JNE LBB34_33 -LBB32_32: +LBB34_32: MOVL $0, 20(DI) -LBB32_33: +LBB34_33: BYTE $0x5b // popq %rbx WORD $0x5e41 // popq %r14 WORD $0x5f41 // popq %r15 BYTE $0x5d // popq %rbp RET -LBB32_15: +LBB34_15: ADDL R8, R9 MOVLQSX R9, SI ADDQ $-1, SI - JMP LBB32_16 + JMP LBB34_16 -LBB32_17: +LBB34_17: ADDB $48, AX MOVQ 0(DI), BX MOVB AX, 0(BX)(SI*1) -LBB32_24: +LBB34_24: ADDQ $-1, SI CMPQ CX, $9 - JBE LBB32_25 + JBE LBB34_25 -LBB32_16: +LBB34_16: MOVQ DX, CX MOVQ DX, AX MULQ R14 @@ -8256,15 +8583,15 @@ LBB32_16: MOVQ CX, AX SUBQ BX, AX CMPQ 8(DI), SI - JA LBB32_17 + JA LBB34_17 TESTQ AX, AX - JE LBB32_24 + JE LBB34_24 MOVL $1, 28(DI) - JMP LBB32_24 + JMP LBB34_24 -LBB32_5: - JL LBB32_9 - JMP LBB32_10 +LBB34_5: + JL LBB34_9 + JMP LBB34_10 _right_shift: BYTE $0x55 // pushq %rbp @@ -8278,9 +8605,9 @@ _right_shift: LONG $0xd84f0f45 // cmovgl %r8d, %r11d XORL AX, AX -LBB33_1: +LBB35_1: CMPQ R11, DX - JE LBB33_2 + JE LBB35_2 LEAQ 0(AX)(AX*4), AX MOVQ 0(DI), SI MOVBQSX 0(SI)(DX*1), SI @@ -8290,10 +8617,10 @@ LBB33_1: MOVQ AX, SI SHRQ CX, SI TESTQ SI, SI - JE LBB33_1 + JE LBB35_1 MOVL DX, R11 -LBB33_7: +LBB35_7: MOVL 20(DI), DX SUBL R11, DX ADDL $1, DX @@ -8303,12 +8630,12 @@ LBB33_7: NOTQ R9 XORL R10, R10 CMPL R11, R8 - JGE LBB33_10 + JGE LBB35_10 MOVLQSX R11, R8 MOVQ 0(DI), SI XORL R10, R10 -LBB33_9: +LBB35_9: MOVQ AX, DX SHRQ CX, DX ANDQ R9, AX @@ -8324,87 +8651,87 @@ LBB33_9: ADDQ $-48, AX MOVLQSX 16(DI), DX CMPQ BX, DX - JL LBB33_9 - JMP LBB33_10 + JL LBB35_9 + JMP LBB35_10 -LBB33_12: +LBB35_12: ADDB $48, SI MOVQ 0(DI), BX MOVB SI, 0(BX)(DX*1) ADDL $1, DX MOVL DX, R10 -LBB33_15: +LBB35_15: ADDQ AX, AX LEAQ 0(AX)(AX*4), AX -LBB33_10: +LBB35_10: TESTQ AX, AX - JE LBB33_16 + JE LBB35_16 MOVQ AX, SI SHRQ CX, SI ANDQ R9, AX MOVLQSX R10, DX CMPQ 8(DI), DX - JA LBB33_12 + JA LBB35_12 TESTQ SI, SI - JE LBB33_15 + JE LBB35_15 MOVL $1, 28(DI) - JMP LBB33_15 + JMP LBB35_15 -LBB33_16: +LBB35_16: MOVL R10, 16(DI) TESTL R10, R10 - JLE LBB33_20 + JLE LBB35_20 MOVQ 0(DI), AX MOVL R10, CX ADDQ $1, CX ADDL $-1, R10 -LBB33_18: +LBB35_18: MOVL R10, DX CMPB 0(AX)(DX*1), $48 - JNE LBB33_22 + JNE LBB35_22 MOVL R10, 16(DI) ADDQ $-1, CX ADDL $-1, R10 CMPQ CX, $1 - JG LBB33_18 - JMP LBB33_21 + JG LBB35_18 + JMP LBB35_21 -LBB33_2: +LBB35_2: TESTQ AX, AX - JE LBB33_23 + JE LBB35_23 MOVQ AX, DX SHRQ CX, DX TESTQ DX, DX - JNE LBB33_7 + JNE LBB35_7 -LBB33_4: +LBB35_4: ADDQ AX, AX LEAQ 0(AX)(AX*4), AX ADDL $1, R11 MOVQ AX, DX SHRQ CX, DX TESTQ DX, DX - JE LBB33_4 - JMP LBB33_7 + JE LBB35_4 + JMP LBB35_7 -LBB33_20: - JE LBB33_21 +LBB35_20: + JE LBB35_21 -LBB33_22: +LBB35_22: BYTE $0x5b // popq %rbx BYTE $0x5d // popq %rbp RET -LBB33_21: +LBB35_21: MOVL $0, 20(DI) BYTE $0x5b // popq %rbx BYTE $0x5d // popq %rbp RET -LBB33_23: +LBB35_23: MOVL $0, 16(DI) BYTE $0x5b // popq %rbx BYTE $0x5d // popq %rbp @@ -11187,6 +11514,10 @@ _VecShiftShuffles: QUAD $0x0e0d0c0b0a090807; QUAD $0xffffffffffffff0f // .ascii 16, '\x07\x08\t\n\x0b\x0c\r\x0e\x0f\xff\xff\xff\xff\xff\xff\xff' QUAD $0x0f0e0d0c0b0a0908; QUAD $0xffffffffffffffff // .ascii 16, '\x08\t\n\x0b\x0c\r\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff' +_LB_5d98b3b5: // _printhex.tab + QUAD $0x3736353433323130; QUAD $0x6665646362613938 // .asciz 16, '0123456789abcdef' + BYTE $0x00 // .asciz 1, '\x00' + __SingleQuoteTab: QUAD $0x0000000000000006 // .quad 6 QUAD $0x000030303030755c // .asciz 8, '\\u0000\x00\x00' @@ -12631,7 +12962,7 @@ _html_escape: MOVQ nb+8(FP), SI MOVQ dp+16(FP), DX MOVQ dn+24(FP), CX - CALL ·__native_entry__+10491(SB) // _html_escape + CALL ·__native_entry__+11513(SB) // _html_escape MOVQ AX, ret+32(FP) RET @@ -12715,7 +13046,7 @@ _quote: MOVQ dp+16(FP), DX MOVQ dn+24(FP), CX MOVQ flags+32(FP), R8 - CALL ·__native_entry__+4842(SB) // _quote + CALL ·__native_entry__+5827(SB) // _quote MOVQ AX, ret+40(FP) RET @@ -12736,7 +13067,7 @@ _skip_array: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ m+16(FP), DX - CALL ·__native_entry__+21045(SB) // _skip_array + CALL ·__native_entry__+22067(SB) // _skip_array MOVQ AX, ret+24(FP) RET @@ -12756,7 +13087,7 @@ _entry: _skip_number: MOVQ s+0(FP), DI MOVQ p+8(FP), SI - CALL ·__native_entry__+24516(SB) // _skip_number + CALL ·__native_entry__+25538(SB) // _skip_number MOVQ AX, ret+16(FP) RET @@ -12777,7 +13108,7 @@ _skip_object: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ m+16(FP), DX - CALL ·__native_entry__+21082(SB) // _skip_object + CALL ·__native_entry__+22104(SB) // _skip_object MOVQ AX, ret+24(FP) RET @@ -12798,7 +13129,7 @@ _skip_one: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ m+16(FP), DX - CALL ·__native_entry__+19273(SB) // _skip_one + CALL ·__native_entry__+20295(SB) // _skip_one MOVQ AX, ret+24(FP) RET @@ -12841,7 +13172,7 @@ _unquote: MOVQ dp+16(FP), DX MOVQ ep+24(FP), CX MOVQ flags+32(FP), R8 - CALL ·__native_entry__+7241(SB) // _unquote + CALL ·__native_entry__+8263(SB) // _unquote MOVQ AX, ret+40(FP) RET @@ -12862,7 +13193,7 @@ _validate_one: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ m+16(FP), DX - CALL ·__native_entry__+24633(SB) // _validate_one + CALL ·__native_entry__+25655(SB) // _validate_one MOVQ AX, ret+24(FP) RET @@ -12885,7 +13216,7 @@ _value: MOVQ p+16(FP), DX MOVQ v+24(FP), CX MOVQ allow_control+32(FP), R8 - CALL ·__native_entry__+14322(SB) // _value + CALL ·__native_entry__+15344(SB) // _value MOVQ AX, ret+40(FP) RET @@ -12906,7 +13237,7 @@ _vnumber: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ v+16(FP), DX - LEAQ ·__native_entry__+17386(SB), AX // _vnumber + LEAQ ·__native_entry__+18408(SB), AX // _vnumber JMP AX _stack_grow: @@ -12926,7 +13257,7 @@ _vsigned: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ v+16(FP), DX - LEAQ ·__native_entry__+18691(SB), AX // _vsigned + LEAQ ·__native_entry__+19713(SB), AX // _vsigned JMP AX _stack_grow: @@ -12946,7 +13277,7 @@ _vstring: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ v+16(FP), DX - LEAQ ·__native_entry__+16482(SB), AX // _vstring + LEAQ ·__native_entry__+17504(SB), AX // _vstring JMP AX _stack_grow: @@ -12966,7 +13297,7 @@ _vunsigned: MOVQ s+0(FP), DI MOVQ p+8(FP), SI MOVQ v+16(FP), DX - LEAQ ·__native_entry__+18971(SB), AX // _vunsigned + LEAQ ·__native_entry__+19993(SB), AX // _vunsigned JMP AX _stack_grow: diff --git a/internal/native/avx2/native_subr_amd64.go b/internal/native/avx2/native_subr_amd64.go index e8136aa07..bda72c262 100644 --- a/internal/native/avx2/native_subr_amd64.go +++ b/internal/native/avx2/native_subr_amd64.go @@ -10,23 +10,23 @@ func __native_entry__() uintptr var ( _subr__f64toa = __native_entry__() + 814 - _subr__html_escape = __native_entry__() + 10491 + _subr__html_escape = __native_entry__() + 11513 _subr__i64toa = __native_entry__() + 3449 _subr__lspace = __native_entry__() + 379 _subr__lzero = __native_entry__() + 13 - _subr__quote = __native_entry__() + 4842 - _subr__skip_array = __native_entry__() + 21045 - _subr__skip_number = __native_entry__() + 24516 - _subr__skip_object = __native_entry__() + 21082 - _subr__skip_one = __native_entry__() + 19273 + _subr__quote = __native_entry__() + 5827 + _subr__skip_array = __native_entry__() + 22067 + _subr__skip_number = __native_entry__() + 25538 + _subr__skip_object = __native_entry__() + 22104 + _subr__skip_one = __native_entry__() + 20295 _subr__u64toa = __native_entry__() + 3544 - _subr__unquote = __native_entry__() + 7241 - _subr__validate_one = __native_entry__() + 24633 - _subr__value = __native_entry__() + 14322 - _subr__vnumber = __native_entry__() + 17386 - _subr__vsigned = __native_entry__() + 18691 - _subr__vstring = __native_entry__() + 16482 - _subr__vunsigned = __native_entry__() + 18971 + _subr__unquote = __native_entry__() + 8263 + _subr__validate_one = __native_entry__() + 25655 + _subr__value = __native_entry__() + 15344 + _subr__vnumber = __native_entry__() + 18408 + _subr__vsigned = __native_entry__() + 19713 + _subr__vstring = __native_entry__() + 17504 + _subr__vunsigned = __native_entry__() + 19993 ) const ( diff --git a/native/parsing.c b/native/parsing.c index 197a9ec82..f171cf5a2 100644 --- a/native/parsing.c +++ b/native/parsing.c @@ -16,12 +16,13 @@ #include "native.h" #include +#include "xprintf.c" /** String Quoting **/ - +#define MAX_ESCAPED_BYTES 8 typedef struct { const long n; - const char s[8]; + const char s[MAX_ESCAPED_BYTES]; } quoted_t; static const quoted_t _SingleQuoteTab[256] = { @@ -258,7 +259,6 @@ static inline ssize_t memcchr_quote_unsafe(const char *sp, ssize_t nb, char *dp, size_t cn = 0; simd_copy: - #if USE_AVX2 /* 32-byte loop, full store */ while (nb >= 32) { @@ -330,7 +330,10 @@ static inline ssize_t memcchr_quote_unsafe(const char *sp, ssize_t nb, char *dp, goto simd_copy; } - /* copy the quoted value */ + /* copy the quoted value. + * Note: dp always has at least 8 bytes (MAX_ESCAPED_BYTES) here. + * so, we not use memcpy_p8(dp, tab[ch].s, nc); + */ *(uint64_t *)dp = *(const uint64_t *)tab[ch].s; sp++; nb--; @@ -344,18 +347,15 @@ ssize_t quote(const char *sp, ssize_t nb, char *dp, ssize_t *dn, uint64_t flags) const char * ds = dp; const char * ss = sp; const quoted_t * tab; - size_t max_size = 0; /* select quoting table */ if (!(flags & F_DBLUNQ)) { tab = _SingleQuoteTab; - max_size = nb * 6; } else { tab = _DoubleQuoteTab; - max_size = nb * 7; } - if (dn >= max_size) { + if (*dn >= nb * MAX_ESCAPED_BYTES) { *dn = memcchr_quote_unsafe(sp, nb, dp, tab); return nb; } @@ -924,4 +924,6 @@ ssize_t html_escape(const char *sp, ssize_t nb, char *dp, ssize_t *dn) { /* all done */ *dn = dp - ds; return sp - ss; -} \ No newline at end of file +} + +#undef MAX_ESCAPED_BYTES \ No newline at end of file