|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
; jdsample.asm - upsampling (64-bit AVX2)
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB</ossman@cendio.se>
|
|
shun-iwasawa |
82a8f5 |
; Copyright (C) 2009, 2016, D. R. Commander.
|
|
shun-iwasawa |
82a8f5 |
; Copyright (C) 2015, Intel Corporation.
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
; Based on the x86 SIMD extension for IJG JPEG library
|
|
shun-iwasawa |
82a8f5 |
; Copyright (C) 1999-2006, MIYASAKA Masaru.
|
|
shun-iwasawa |
82a8f5 |
; For conditions of distribution and use, see copyright notice in jsimdext.inc
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
; This file should be assembled with NASM (Netwide Assembler),
|
|
shun-iwasawa |
82a8f5 |
; can *not* be assembled with Microsoft's MASM or any compatible
|
|
shun-iwasawa |
82a8f5 |
; assembler (including Borland's Turbo Assembler).
|
|
shun-iwasawa |
82a8f5 |
; NASM is available from http://nasm.sourceforge.net/ or
|
|
shun-iwasawa |
82a8f5 |
; http://sourceforge.net/project/showfiles.php?group_id=6208
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
%include "jsimdext.inc"
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
; --------------------------------------------------------------------------
|
|
shun-iwasawa |
82a8f5 |
SECTION SEG_CONST
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
alignz 32
|
|
shun-iwasawa |
82a8f5 |
GLOBAL_DATA(jconst_fancy_upsample_avx2)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
EXTN(jconst_fancy_upsample_avx2):
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
PW_ONE times 16 dw 1
|
|
shun-iwasawa |
82a8f5 |
PW_TWO times 16 dw 2
|
|
shun-iwasawa |
82a8f5 |
PW_THREE times 16 dw 3
|
|
shun-iwasawa |
82a8f5 |
PW_SEVEN times 16 dw 7
|
|
shun-iwasawa |
82a8f5 |
PW_EIGHT times 16 dw 8
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
alignz 32
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
; --------------------------------------------------------------------------
|
|
shun-iwasawa |
82a8f5 |
SECTION SEG_TEXT
|
|
shun-iwasawa |
82a8f5 |
BITS 64
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
; Fancy processing for the common case of 2:1 horizontal and 1:1 vertical.
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
; The upsampling algorithm is linear interpolation between pixel centers,
|
|
shun-iwasawa |
82a8f5 |
; also known as a "triangle filter". This is a good compromise between
|
|
shun-iwasawa |
82a8f5 |
; speed and visual quality. The centers of the output pixels are 1/4 and 3/4
|
|
shun-iwasawa |
82a8f5 |
; of the way between input pixel centers.
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
; GLOBAL(void)
|
|
shun-iwasawa |
82a8f5 |
; jsimd_h2v1_fancy_upsample_avx2(int max_v_samp_factor,
|
|
shun-iwasawa |
82a8f5 |
; JDIMENSION downsampled_width,
|
|
shun-iwasawa |
82a8f5 |
; JSAMPARRAY input_data,
|
|
shun-iwasawa |
82a8f5 |
; JSAMPARRAY *output_data_ptr);
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
; r10 = int max_v_samp_factor
|
|
shun-iwasawa |
82a8f5 |
; r11d = JDIMENSION downsampled_width
|
|
shun-iwasawa |
82a8f5 |
; r12 = JSAMPARRAY input_data
|
|
shun-iwasawa |
82a8f5 |
; r13 = JSAMPARRAY *output_data_ptr
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
align 32
|
|
shun-iwasawa |
82a8f5 |
GLOBAL_FUNCTION(jsimd_h2v1_fancy_upsample_avx2)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
EXTN(jsimd_h2v1_fancy_upsample_avx2):
|
|
shun-iwasawa |
82a8f5 |
push rbp
|
|
shun-iwasawa |
82a8f5 |
mov rax, rsp
|
|
shun-iwasawa |
82a8f5 |
mov rbp, rsp
|
|
shun-iwasawa |
82a8f5 |
push_xmm 3
|
|
shun-iwasawa |
82a8f5 |
collect_args 4
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov eax, r11d ; colctr
|
|
shun-iwasawa |
82a8f5 |
test rax, rax
|
|
shun-iwasawa |
82a8f5 |
jz near .return
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov rcx, r10 ; rowctr
|
|
shun-iwasawa |
82a8f5 |
test rcx, rcx
|
|
shun-iwasawa |
82a8f5 |
jz near .return
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov rsi, r12 ; input_data
|
|
shun-iwasawa |
82a8f5 |
mov rdi, r13
|
|
shun-iwasawa |
82a8f5 |
mov rdi, JSAMPARRAY [rdi] ; output_data
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpxor ymm0, ymm0, ymm0 ; ymm0=(all 0's)
|
|
shun-iwasawa |
82a8f5 |
vpcmpeqb xmm9, xmm9, xmm9
|
|
shun-iwasawa |
82a8f5 |
vpsrldq xmm10, xmm9, (SIZEOF_XMMWORD-1) ; (ff -- -- -- ... -- --) LSB is ff
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpslldq xmm9, xmm9, (SIZEOF_XMMWORD-1)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm9, ymm9, ymm9, 1 ; (---- ---- ... ---- ---- ff) MSB is ff
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
.rowloop:
|
|
shun-iwasawa |
82a8f5 |
push rax ; colctr
|
|
shun-iwasawa |
82a8f5 |
push rdi
|
|
shun-iwasawa |
82a8f5 |
push rsi
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov rsi, JSAMPROW [rsi] ; inptr
|
|
shun-iwasawa |
82a8f5 |
mov rdi, JSAMPROW [rdi] ; outptr
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
test rax, SIZEOF_YMMWORD-1
|
|
shun-iwasawa |
82a8f5 |
jz short .skip
|
|
shun-iwasawa |
82a8f5 |
mov dl, JSAMPLE [rsi+(rax-1)*SIZEOF_JSAMPLE]
|
|
shun-iwasawa |
82a8f5 |
mov JSAMPLE [rsi+rax*SIZEOF_JSAMPLE], dl ; insert a dummy sample
|
|
shun-iwasawa |
82a8f5 |
.skip:
|
|
shun-iwasawa |
82a8f5 |
vpand ymm7, ymm10, YMMWORD [rsi+0*SIZEOF_YMMWORD]
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
add rax, byte SIZEOF_YMMWORD-1
|
|
shun-iwasawa |
82a8f5 |
and rax, byte -SIZEOF_YMMWORD
|
|
shun-iwasawa |
82a8f5 |
cmp rax, byte SIZEOF_YMMWORD
|
|
shun-iwasawa |
82a8f5 |
ja short .columnloop
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
.columnloop_last:
|
|
shun-iwasawa |
82a8f5 |
vpand ymm6, ymm9, YMMWORD [rsi+0*SIZEOF_YMMWORD]
|
|
shun-iwasawa |
82a8f5 |
jmp short .upsample
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
.columnloop:
|
|
shun-iwasawa |
82a8f5 |
vmovdqu ymm6, YMMWORD [rsi+1*SIZEOF_YMMWORD]
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm6, ymm0, ymm6, 0x20
|
|
shun-iwasawa |
82a8f5 |
vpslldq ymm6, ymm6, 15
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
.upsample:
|
|
shun-iwasawa |
82a8f5 |
vmovdqu ymm1, YMMWORD [rsi+0*SIZEOF_YMMWORD] ; ymm1=( 0 1 2 ... 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm2, ymm0, ymm1, 0x20
|
|
shun-iwasawa |
82a8f5 |
vpalignr ymm2, ymm1, ymm2, 15 ; ymm2=(-- 0 1 ... 28 29 30)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm4, ymm0, ymm1, 0x03
|
|
shun-iwasawa |
82a8f5 |
vpalignr ymm3, ymm4, ymm1, 1 ; ymm3=( 1 2 3 ... 30 31 --)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpor ymm2, ymm2, ymm7 ; ymm2=(-1 0 1 ... 28 29 30)
|
|
shun-iwasawa |
82a8f5 |
vpor ymm3, ymm3, ymm6 ; ymm3=( 1 2 3 ... 30 31 32)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpsrldq ymm7, ymm4, (SIZEOF_XMMWORD-1) ; ymm7=(31 -- -- ... -- -- --)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpunpckhbw ymm4, ymm1, ymm0 ; ymm4=( 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
vpunpcklbw ymm5, ymm1, ymm0 ; ymm5=( 0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm1, ymm5, ymm4, 0x20 ; ymm1=( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm4, ymm5, ymm4, 0x31 ; ymm4=(16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpunpckhbw ymm5, ymm2, ymm0 ; ymm5=( 7 8 9 10 11 12 13 14 23 24 25 26 27 28 29 30)
|
|
shun-iwasawa |
82a8f5 |
vpunpcklbw ymm6, ymm2, ymm0 ; ymm6=(-1 0 1 2 3 4 5 6 15 16 17 18 19 20 21 22)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm2, ymm6, ymm5, 0x20 ; ymm2=(-1 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm5, ymm6, ymm5, 0x31 ; ymm5=(15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpunpckhbw ymm6, ymm3, ymm0 ; ymm6=( 1 2 3 4 5 6 7 8 17 18 19 20 21 22 23 24)
|
|
shun-iwasawa |
82a8f5 |
vpunpcklbw ymm8, ymm3, ymm0 ; ymm8=( 9 10 11 12 13 14 15 16 25 26 27 28 29 30 31 32)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm3, ymm8, ymm6, 0x20 ; ymm3=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm6, ymm8, ymm6, 0x31 ; ymm6=(17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpmullw ymm1, ymm1, [rel PW_THREE]
|
|
shun-iwasawa |
82a8f5 |
vpmullw ymm4, ymm4, [rel PW_THREE]
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm2, ymm2, [rel PW_ONE]
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm5, ymm5, [rel PW_ONE]
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm3, ymm3, [rel PW_TWO]
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm6, ymm6, [rel PW_TWO]
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm2, ymm2, ymm1
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm5, ymm5, ymm4
|
|
shun-iwasawa |
82a8f5 |
vpsrlw ymm2, ymm2, 2 ; ymm2=OutLE=( 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30)
|
|
shun-iwasawa |
82a8f5 |
vpsrlw ymm5, ymm5, 2 ; ymm5=OutHE=(32 34 36 38 40 42 44 46 48 50 52 54 56 58 60 62)
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm3, ymm3, ymm1
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm6, ymm6, ymm4
|
|
shun-iwasawa |
82a8f5 |
vpsrlw ymm3, ymm3, 2 ; ymm3=OutLO=( 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31)
|
|
shun-iwasawa |
82a8f5 |
vpsrlw ymm6, ymm6, 2 ; ymm6=OutHO=(33 35 37 39 41 43 45 47 49 51 53 55 57 59 61 63)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpsllw ymm3, ymm3, BYTE_BIT
|
|
shun-iwasawa |
82a8f5 |
vpsllw ymm6, ymm6, BYTE_BIT
|
|
shun-iwasawa |
82a8f5 |
vpor ymm2, ymm2, ymm3 ; ymm2=OutL=( 0 1 2 ... 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
vpor ymm5, ymm5, ymm6 ; ymm5=OutH=(32 33 34 ... 61 62 63)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymm2
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdi+1*SIZEOF_YMMWORD], ymm5
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
sub rax, byte SIZEOF_YMMWORD
|
|
shun-iwasawa |
82a8f5 |
add rsi, byte 1*SIZEOF_YMMWORD ; inptr
|
|
shun-iwasawa |
82a8f5 |
add rdi, byte 2*SIZEOF_YMMWORD ; outptr
|
|
shun-iwasawa |
82a8f5 |
cmp rax, byte SIZEOF_YMMWORD
|
|
shun-iwasawa |
82a8f5 |
ja near .columnloop
|
|
shun-iwasawa |
82a8f5 |
test eax, eax
|
|
shun-iwasawa |
82a8f5 |
jnz near .columnloop_last
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
pop rsi
|
|
shun-iwasawa |
82a8f5 |
pop rdi
|
|
shun-iwasawa |
82a8f5 |
pop rax
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
add rsi, byte SIZEOF_JSAMPROW ; input_data
|
|
shun-iwasawa |
82a8f5 |
add rdi, byte SIZEOF_JSAMPROW ; output_data
|
|
shun-iwasawa |
82a8f5 |
dec rcx ; rowctr
|
|
shun-iwasawa |
82a8f5 |
jg near .rowloop
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
.return:
|
|
shun-iwasawa |
82a8f5 |
vzeroupper
|
|
shun-iwasawa |
82a8f5 |
uncollect_args 4
|
|
shun-iwasawa |
82a8f5 |
pop_xmm 3
|
|
shun-iwasawa |
82a8f5 |
pop rbp
|
|
shun-iwasawa |
82a8f5 |
ret
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
; --------------------------------------------------------------------------
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
; Fancy processing for the common case of 2:1 horizontal and 2:1 vertical.
|
|
shun-iwasawa |
82a8f5 |
; Again a triangle filter; see comments for h2v1 case, above.
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
; GLOBAL(void)
|
|
shun-iwasawa |
82a8f5 |
; jsimd_h2v2_fancy_upsample_avx2(int max_v_samp_factor,
|
|
shun-iwasawa |
82a8f5 |
; JDIMENSION downsampled_width,
|
|
shun-iwasawa |
82a8f5 |
; JSAMPARRAY input_data,
|
|
shun-iwasawa |
82a8f5 |
; JSAMPARRAY *output_data_ptr);
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
; r10 = int max_v_samp_factor
|
|
shun-iwasawa |
82a8f5 |
; r11d = JDIMENSION downsampled_width
|
|
shun-iwasawa |
82a8f5 |
; r12 = JSAMPARRAY input_data
|
|
shun-iwasawa |
82a8f5 |
; r13 = JSAMPARRAY *output_data_ptr
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
%define wk(i) rbp - (WK_NUM - (i)) * SIZEOF_YMMWORD ; ymmword wk[WK_NUM]
|
|
shun-iwasawa |
82a8f5 |
%define WK_NUM 4
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
align 32
|
|
shun-iwasawa |
82a8f5 |
GLOBAL_FUNCTION(jsimd_h2v2_fancy_upsample_avx2)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
EXTN(jsimd_h2v2_fancy_upsample_avx2):
|
|
shun-iwasawa |
82a8f5 |
push rbp
|
|
shun-iwasawa |
82a8f5 |
mov rax, rsp ; rax = original rbp
|
|
shun-iwasawa |
82a8f5 |
sub rsp, byte 4
|
|
shun-iwasawa |
82a8f5 |
and rsp, byte (-SIZEOF_YMMWORD) ; align to 256 bits
|
|
shun-iwasawa |
82a8f5 |
mov [rsp], rax
|
|
shun-iwasawa |
82a8f5 |
mov rbp, rsp ; rbp = aligned rbp
|
|
shun-iwasawa |
82a8f5 |
lea rsp, [wk(0)]
|
|
shun-iwasawa |
82a8f5 |
push_xmm 3
|
|
shun-iwasawa |
82a8f5 |
collect_args 4
|
|
shun-iwasawa |
82a8f5 |
push rbx
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov eax, r11d ; colctr
|
|
shun-iwasawa |
82a8f5 |
test rax, rax
|
|
shun-iwasawa |
82a8f5 |
jz near .return
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov rcx, r10 ; rowctr
|
|
shun-iwasawa |
82a8f5 |
test rcx, rcx
|
|
shun-iwasawa |
82a8f5 |
jz near .return
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov rsi, r12 ; input_data
|
|
shun-iwasawa |
82a8f5 |
mov rdi, r13
|
|
shun-iwasawa |
82a8f5 |
mov rdi, JSAMPARRAY [rdi] ; output_data
|
|
shun-iwasawa |
82a8f5 |
.rowloop:
|
|
shun-iwasawa |
82a8f5 |
push rax ; colctr
|
|
shun-iwasawa |
82a8f5 |
push rcx
|
|
shun-iwasawa |
82a8f5 |
push rdi
|
|
shun-iwasawa |
82a8f5 |
push rsi
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov rcx, JSAMPROW [rsi-1*SIZEOF_JSAMPROW] ; inptr1(above)
|
|
shun-iwasawa |
82a8f5 |
mov rbx, JSAMPROW [rsi+0*SIZEOF_JSAMPROW] ; inptr0
|
|
shun-iwasawa |
82a8f5 |
mov rsi, JSAMPROW [rsi+1*SIZEOF_JSAMPROW] ; inptr1(below)
|
|
shun-iwasawa |
82a8f5 |
mov rdx, JSAMPROW [rdi+0*SIZEOF_JSAMPROW] ; outptr0
|
|
shun-iwasawa |
82a8f5 |
mov rdi, JSAMPROW [rdi+1*SIZEOF_JSAMPROW] ; outptr1
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpxor ymm8, ymm8, ymm8 ; ymm8=(all 0's)
|
|
shun-iwasawa |
82a8f5 |
vpcmpeqb xmm9, xmm9, xmm9
|
|
shun-iwasawa |
82a8f5 |
vpsrldq xmm10, xmm9, (SIZEOF_XMMWORD-2) ; (ffff ---- ---- ... ---- ----) LSB is ffff
|
|
shun-iwasawa |
82a8f5 |
vpslldq xmm9, xmm9, (SIZEOF_XMMWORD-2)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm9, ymm9, ymm9, 1 ; (---- ---- ... ---- ---- ffff) MSB is ffff
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
test rax, SIZEOF_YMMWORD-1
|
|
shun-iwasawa |
82a8f5 |
jz short .skip
|
|
shun-iwasawa |
82a8f5 |
push rdx
|
|
shun-iwasawa |
82a8f5 |
mov dl, JSAMPLE [rcx+(rax-1)*SIZEOF_JSAMPLE]
|
|
shun-iwasawa |
82a8f5 |
mov JSAMPLE [rcx+rax*SIZEOF_JSAMPLE], dl
|
|
shun-iwasawa |
82a8f5 |
mov dl, JSAMPLE [rbx+(rax-1)*SIZEOF_JSAMPLE]
|
|
shun-iwasawa |
82a8f5 |
mov JSAMPLE [rbx+rax*SIZEOF_JSAMPLE], dl
|
|
shun-iwasawa |
82a8f5 |
mov dl, JSAMPLE [rsi+(rax-1)*SIZEOF_JSAMPLE]
|
|
shun-iwasawa |
82a8f5 |
mov JSAMPLE [rsi+rax*SIZEOF_JSAMPLE], dl ; insert a dummy sample
|
|
shun-iwasawa |
82a8f5 |
pop rdx
|
|
shun-iwasawa |
82a8f5 |
.skip:
|
|
shun-iwasawa |
82a8f5 |
; -- process the first column block
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqu ymm0, YMMWORD [rbx+0*SIZEOF_YMMWORD] ; ymm0=row[ 0][0]
|
|
shun-iwasawa |
82a8f5 |
vmovdqu ymm1, YMMWORD [rcx+0*SIZEOF_YMMWORD] ; ymm1=row[-1][0]
|
|
shun-iwasawa |
82a8f5 |
vmovdqu ymm2, YMMWORD [rsi+0*SIZEOF_YMMWORD] ; ymm2=row[+1][0]
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpunpckhbw ymm4, ymm0, ymm8 ; ymm4=row[ 0]( 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
vpunpcklbw ymm5, ymm0, ymm8 ; ymm5=row[ 0]( 0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm0, ymm5, ymm4, 0x20 ; ymm0=row[ 0]( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm4, ymm5, ymm4, 0x31 ; ymm4=row[ 0](16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpunpckhbw ymm5, ymm1, ymm8 ; ymm5=row[-1]( 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
vpunpcklbw ymm6, ymm1, ymm8 ; ymm6=row[-1]( 0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm1, ymm6, ymm5, 0x20 ; ymm1=row[-1]( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm5, ymm6, ymm5, 0x31 ; ymm5=row[-1](16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpunpckhbw ymm6, ymm2, ymm8 ; ymm6=row[+1]( 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
vpunpcklbw ymm3, ymm2, ymm8 ; ymm3=row[+1]( 0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm2, ymm3, ymm6, 0x20 ; ymm2=row[+1]( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm6, ymm3, ymm6, 0x31 ; ymm6=row[+1](16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpmullw ymm0, ymm0, [rel PW_THREE]
|
|
shun-iwasawa |
82a8f5 |
vpmullw ymm4, ymm4, [rel PW_THREE]
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm1, ymm1, ymm0 ; ymm1=Int0L=( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm5, ymm5, ymm4 ; ymm5=Int0H=(16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm2, ymm2, ymm0 ; ymm2=Int1L=( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm6, ymm6, ymm4 ; ymm6=Int1H=(16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdx+0*SIZEOF_YMMWORD], ymm1 ; temporarily save
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdx+1*SIZEOF_YMMWORD], ymm5 ; the intermediate data
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymm2
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdi+1*SIZEOF_YMMWORD], ymm6
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpand ymm1, ymm1, ymm10 ; ymm1=( 0 -- -- -- -- -- -- -- -- -- -- -- -- -- -- --)
|
|
shun-iwasawa |
82a8f5 |
vpand ymm2, ymm2, ymm10 ; ymm2=( 0 -- -- -- -- -- -- -- -- -- -- -- -- -- -- --)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqa YMMWORD [wk(0)], ymm1
|
|
shun-iwasawa |
82a8f5 |
vmovdqa YMMWORD [wk(1)], ymm2
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
add rax, byte SIZEOF_YMMWORD-1
|
|
shun-iwasawa |
82a8f5 |
and rax, byte -SIZEOF_YMMWORD
|
|
shun-iwasawa |
82a8f5 |
cmp rax, byte SIZEOF_YMMWORD
|
|
shun-iwasawa |
82a8f5 |
ja short .columnloop
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
.columnloop_last:
|
|
shun-iwasawa |
82a8f5 |
; -- process the last column block
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpand ymm1, ymm9, YMMWORD [rdx+1*SIZEOF_YMMWORD]
|
|
shun-iwasawa |
82a8f5 |
vpand ymm2, ymm9, YMMWORD [rdi+1*SIZEOF_YMMWORD]
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqa YMMWORD [wk(2)], ymm1 ; ymm1=(-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- 31)
|
|
shun-iwasawa |
82a8f5 |
vmovdqa YMMWORD [wk(3)], ymm2 ; ymm2=(-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- 31)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
jmp near .upsample
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
.columnloop:
|
|
shun-iwasawa |
82a8f5 |
; -- process the next column block
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqu ymm0, YMMWORD [rbx+1*SIZEOF_YMMWORD] ; ymm0=row[ 0][1]
|
|
shun-iwasawa |
82a8f5 |
vmovdqu ymm1, YMMWORD [rcx+1*SIZEOF_YMMWORD] ; ymm1=row[-1][1]
|
|
shun-iwasawa |
82a8f5 |
vmovdqu ymm2, YMMWORD [rsi+1*SIZEOF_YMMWORD] ; ymm2=row[+1][1]
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpunpckhbw ymm4, ymm0, ymm8 ; ymm4=row[ 0]( 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
vpunpcklbw ymm5, ymm0, ymm8 ; ymm5=row[ 0]( 0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm0, ymm5, ymm4, 0x20 ; ymm0=row[ 0]( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm4, ymm5, ymm4, 0x31 ; ymm4=row[ 0](16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpunpckhbw ymm5, ymm1, ymm8 ; ymm5=row[-1]( 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
vpunpcklbw ymm6, ymm1, ymm8 ; ymm6=row[-1]( 0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm1, ymm6, ymm5, 0x20 ; ymm1=row[-1]( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm5, ymm6, ymm5, 0x31 ; ymm5=row[-1](16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpunpckhbw ymm6, ymm2, ymm8 ; ymm6=row[+1]( 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
vpunpcklbw ymm7, ymm2, ymm8 ; ymm7=row[+1]( 0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm2, ymm7, ymm6, 0x20 ; ymm2=row[+1]( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm6, ymm7, ymm6, 0x31 ; ymm6=row[+1](16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpmullw ymm0, ymm0, [rel PW_THREE]
|
|
shun-iwasawa |
82a8f5 |
vpmullw ymm4, ymm4, [rel PW_THREE]
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm1, ymm1, ymm0 ; ymm1=Int0L=( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm5, ymm5, ymm4 ; ymm5=Int0H=(16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm2, ymm2, ymm0 ; ymm2=Int1L=( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm6, ymm6, ymm4 ; ymm6=Int1H=(16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdx+2*SIZEOF_YMMWORD], ymm1 ; temporarily save
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdx+3*SIZEOF_YMMWORD], ymm5 ; the intermediate data
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdi+2*SIZEOF_YMMWORD], ymm2
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdi+3*SIZEOF_YMMWORD], ymm6
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm1, ymm8, ymm1, 0x20
|
|
shun-iwasawa |
82a8f5 |
vpslldq ymm1, ymm1, 14 ; ymm1=(-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- 0)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm2, ymm8, ymm2, 0x20
|
|
shun-iwasawa |
82a8f5 |
vpslldq ymm2, ymm2, 14 ; ymm2=(-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- 0)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqa YMMWORD [wk(2)], ymm1
|
|
shun-iwasawa |
82a8f5 |
vmovdqa YMMWORD [wk(3)], ymm2
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
.upsample:
|
|
shun-iwasawa |
82a8f5 |
; -- process the upper row
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqu ymm7, YMMWORD [rdx+0*SIZEOF_YMMWORD] ; ymm7=Int0L=( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
|
|
shun-iwasawa |
82a8f5 |
vmovdqu ymm3, YMMWORD [rdx+1*SIZEOF_YMMWORD] ; ymm3=Int0H=(16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm0, ymm8, ymm7, 0x03
|
|
shun-iwasawa |
82a8f5 |
vpalignr ymm0, ymm0, ymm7, 2 ; ymm0=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 --)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm4, ymm8, ymm3, 0x20
|
|
shun-iwasawa |
82a8f5 |
vpslldq ymm4, ymm4, 14 ; ymm4=(-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- 16)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm5, ymm8, ymm7, 0x03
|
|
shun-iwasawa |
82a8f5 |
vpsrldq ymm5, ymm5, 14 ; ymm5=(15 -- -- -- -- -- -- -- -- -- -- -- -- -- -- --)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm6, ymm8, ymm3, 0x20
|
|
shun-iwasawa |
82a8f5 |
vpalignr ymm6, ymm3, ymm6, 14 ; ymm6=(-- 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpor ymm0, ymm0, ymm4 ; ymm0=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16)
|
|
shun-iwasawa |
82a8f5 |
vpor ymm5, ymm5, ymm6 ; ymm5=(15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm2, ymm8, ymm3, 0x03
|
|
shun-iwasawa |
82a8f5 |
vpalignr ymm2, ymm2, ymm3, 2 ; ymm2=(17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 --)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm4, ymm8, ymm3, 0x03
|
|
shun-iwasawa |
82a8f5 |
vpsrldq ymm4, ymm4, 14 ; ymm4=(31 -- -- -- -- -- -- -- -- -- -- -- -- -- -- --)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm1, ymm8, ymm7, 0x20
|
|
shun-iwasawa |
82a8f5 |
vpalignr ymm1, ymm7, ymm1, 14 ; ymm1=(-- 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpor ymm1, ymm1, YMMWORD [wk(0)] ; ymm1=(-1 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14)
|
|
shun-iwasawa |
82a8f5 |
vpor ymm2, ymm2, YMMWORD [wk(2)] ; ymm2=(17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqa YMMWORD [wk(0)], ymm4
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpmullw ymm7, ymm7, [rel PW_THREE]
|
|
shun-iwasawa |
82a8f5 |
vpmullw ymm3, ymm3, [rel PW_THREE]
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm1, ymm1, [rel PW_EIGHT]
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm5, ymm5, [rel PW_EIGHT]
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm0, ymm0, [rel PW_SEVEN]
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm2, [rel PW_SEVEN]
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm1, ymm1, ymm7
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm5, ymm5, ymm3
|
|
shun-iwasawa |
82a8f5 |
vpsrlw ymm1, ymm1, 4 ; ymm1=Out0LE=( 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30)
|
|
shun-iwasawa |
82a8f5 |
vpsrlw ymm5, ymm5, 4 ; ymm5=Out0HE=(32 34 36 38 40 42 44 46 48 50 52 54 56 58 60 62)
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm0, ymm0, ymm7
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm2, ymm2, ymm3
|
|
shun-iwasawa |
82a8f5 |
vpsrlw ymm0, ymm0, 4 ; ymm0=Out0LO=( 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31)
|
|
shun-iwasawa |
82a8f5 |
vpsrlw ymm2, ymm2, 4 ; ymm2=Out0HO=(33 35 37 39 41 43 45 47 49 51 53 55 57 59 61 63)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpsllw ymm0, ymm0, BYTE_BIT
|
|
shun-iwasawa |
82a8f5 |
vpsllw ymm2, ymm2, BYTE_BIT
|
|
shun-iwasawa |
82a8f5 |
vpor ymm1, ymm1, ymm0 ; ymm1=Out0L=( 0 1 2 ... 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
vpor ymm5, ymm5, ymm2 ; ymm5=Out0H=(32 33 34 ... 61 62 63)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdx+0*SIZEOF_YMMWORD], ymm1
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdx+1*SIZEOF_YMMWORD], ymm5
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
; -- process the lower row
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqu ymm6, YMMWORD [rdi+0*SIZEOF_YMMWORD] ; ymm6=Int1L=( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
|
|
shun-iwasawa |
82a8f5 |
vmovdqu ymm4, YMMWORD [rdi+1*SIZEOF_YMMWORD] ; ymm4=Int1H=(16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm7, ymm8, ymm6, 0x03
|
|
shun-iwasawa |
82a8f5 |
vpalignr ymm7, ymm7, ymm6, 2 ; ymm7=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 --)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm3, ymm8, ymm4, 0x20
|
|
shun-iwasawa |
82a8f5 |
vpslldq ymm3, ymm3, 14 ; ymm3=(-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- 16)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm0, ymm8, ymm6, 0x03
|
|
shun-iwasawa |
82a8f5 |
vpsrldq ymm0, ymm0, 14 ; ymm0=(15 -- -- -- -- -- -- -- -- -- -- -- -- -- -- --)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm2, ymm8, ymm4, 0x20
|
|
shun-iwasawa |
82a8f5 |
vpalignr ymm2, ymm4, ymm2, 14 ; ymm2=(-- 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpor ymm7, ymm7, ymm3 ; ymm7=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16)
|
|
shun-iwasawa |
82a8f5 |
vpor ymm0, ymm0, ymm2 ; ymm0=(15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm5, ymm8, ymm4, 0x03
|
|
shun-iwasawa |
82a8f5 |
vpalignr ymm5, ymm5, ymm4, 2 ; ymm5=(17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 --)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm3, ymm8, ymm4, 0x03
|
|
shun-iwasawa |
82a8f5 |
vpsrldq ymm3, ymm3, 14 ; ymm3=(31 -- -- -- -- -- -- -- -- -- -- -- -- -- -- --)
|
|
shun-iwasawa |
82a8f5 |
vperm2i128 ymm1, ymm8, ymm6, 0x20
|
|
shun-iwasawa |
82a8f5 |
vpalignr ymm1, ymm6, ymm1, 14 ; ymm1=(-- 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpor ymm1, ymm1, YMMWORD [wk(1)] ; ymm1=(-1 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14)
|
|
shun-iwasawa |
82a8f5 |
vpor ymm5, ymm5, YMMWORD [wk(3)] ; ymm5=(17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqa YMMWORD [wk(1)], ymm3
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpmullw ymm6, ymm6, [rel PW_THREE]
|
|
shun-iwasawa |
82a8f5 |
vpmullw ymm4, ymm4, [rel PW_THREE]
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm1, ymm1, [rel PW_EIGHT]
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm0, ymm0, [rel PW_EIGHT]
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm7, ymm7, [rel PW_SEVEN]
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm5, ymm5, [rel PW_SEVEN]
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm1, ymm1, ymm6
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm0, ymm0, ymm4
|
|
shun-iwasawa |
82a8f5 |
vpsrlw ymm1, ymm1, 4 ; ymm1=Out1LE=( 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30)
|
|
shun-iwasawa |
82a8f5 |
vpsrlw ymm0, ymm0, 4 ; ymm0=Out1HE=(32 34 36 38 40 42 44 46 48 50 52 54 56 58 60 62)
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm7, ymm7, ymm6
|
|
shun-iwasawa |
82a8f5 |
vpaddw ymm5, ymm5, ymm4
|
|
shun-iwasawa |
82a8f5 |
vpsrlw ymm7, ymm7, 4 ; ymm7=Out1LO=( 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31)
|
|
shun-iwasawa |
82a8f5 |
vpsrlw ymm5, ymm5, 4 ; ymm5=Out1HO=(33 35 37 39 41 43 45 47 49 51 53 55 57 59 61 63)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpsllw ymm7, ymm7, BYTE_BIT
|
|
shun-iwasawa |
82a8f5 |
vpsllw ymm5, ymm5, BYTE_BIT
|
|
shun-iwasawa |
82a8f5 |
vpor ymm1, ymm1, ymm7 ; ymm1=Out1L=( 0 1 2 ... 29 30 31)
|
|
shun-iwasawa |
82a8f5 |
vpor ymm0, ymm0, ymm5 ; ymm0=Out1H=(32 33 34 ... 61 62 63)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymm1
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdi+1*SIZEOF_YMMWORD], ymm0
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
sub rax, byte SIZEOF_YMMWORD
|
|
shun-iwasawa |
82a8f5 |
add rcx, byte 1*SIZEOF_YMMWORD ; inptr1(above)
|
|
shun-iwasawa |
82a8f5 |
add rbx, byte 1*SIZEOF_YMMWORD ; inptr0
|
|
shun-iwasawa |
82a8f5 |
add rsi, byte 1*SIZEOF_YMMWORD ; inptr1(below)
|
|
shun-iwasawa |
82a8f5 |
add rdx, byte 2*SIZEOF_YMMWORD ; outptr0
|
|
shun-iwasawa |
82a8f5 |
add rdi, byte 2*SIZEOF_YMMWORD ; outptr1
|
|
shun-iwasawa |
82a8f5 |
cmp rax, byte SIZEOF_YMMWORD
|
|
shun-iwasawa |
82a8f5 |
ja near .columnloop
|
|
shun-iwasawa |
82a8f5 |
test rax, rax
|
|
shun-iwasawa |
82a8f5 |
jnz near .columnloop_last
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
pop rsi
|
|
shun-iwasawa |
82a8f5 |
pop rdi
|
|
shun-iwasawa |
82a8f5 |
pop rcx
|
|
shun-iwasawa |
82a8f5 |
pop rax
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
add rsi, byte 1*SIZEOF_JSAMPROW ; input_data
|
|
shun-iwasawa |
82a8f5 |
add rdi, byte 2*SIZEOF_JSAMPROW ; output_data
|
|
shun-iwasawa |
82a8f5 |
sub rcx, byte 2 ; rowctr
|
|
shun-iwasawa |
82a8f5 |
jg near .rowloop
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
.return:
|
|
shun-iwasawa |
82a8f5 |
pop rbx
|
|
shun-iwasawa |
82a8f5 |
vzeroupper
|
|
shun-iwasawa |
82a8f5 |
uncollect_args 4
|
|
shun-iwasawa |
82a8f5 |
pop_xmm 3
|
|
shun-iwasawa |
82a8f5 |
mov rsp, rbp ; rsp <- aligned rbp
|
|
shun-iwasawa |
82a8f5 |
pop rsp ; rsp <- original rbp
|
|
shun-iwasawa |
82a8f5 |
pop rbp
|
|
shun-iwasawa |
82a8f5 |
ret
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
; --------------------------------------------------------------------------
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
; Fast processing for the common case of 2:1 horizontal and 1:1 vertical.
|
|
shun-iwasawa |
82a8f5 |
; It's still a box filter.
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
; GLOBAL(void)
|
|
shun-iwasawa |
82a8f5 |
; jsimd_h2v1_upsample_avx2(int max_v_samp_factor, JDIMENSION output_width,
|
|
shun-iwasawa |
82a8f5 |
; JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr);
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
; r10 = int max_v_samp_factor
|
|
shun-iwasawa |
82a8f5 |
; r11d = JDIMENSION output_width
|
|
shun-iwasawa |
82a8f5 |
; r12 = JSAMPARRAY input_data
|
|
shun-iwasawa |
82a8f5 |
; r13 = JSAMPARRAY *output_data_ptr
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
align 32
|
|
shun-iwasawa |
82a8f5 |
GLOBAL_FUNCTION(jsimd_h2v1_upsample_avx2)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
EXTN(jsimd_h2v1_upsample_avx2):
|
|
shun-iwasawa |
82a8f5 |
push rbp
|
|
shun-iwasawa |
82a8f5 |
mov rax, rsp
|
|
shun-iwasawa |
82a8f5 |
mov rbp, rsp
|
|
shun-iwasawa |
82a8f5 |
collect_args 4
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov edx, r11d
|
|
shun-iwasawa |
82a8f5 |
add rdx, byte (SIZEOF_YMMWORD-1)
|
|
shun-iwasawa |
82a8f5 |
and rdx, -SIZEOF_YMMWORD
|
|
shun-iwasawa |
82a8f5 |
jz near .return
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov rcx, r10 ; rowctr
|
|
shun-iwasawa |
82a8f5 |
test rcx, rcx
|
|
shun-iwasawa |
82a8f5 |
jz short .return
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov rsi, r12 ; input_data
|
|
shun-iwasawa |
82a8f5 |
mov rdi, r13
|
|
shun-iwasawa |
82a8f5 |
mov rdi, JSAMPARRAY [rdi] ; output_data
|
|
shun-iwasawa |
82a8f5 |
.rowloop:
|
|
shun-iwasawa |
82a8f5 |
push rdi
|
|
shun-iwasawa |
82a8f5 |
push rsi
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov rsi, JSAMPROW [rsi] ; inptr
|
|
shun-iwasawa |
82a8f5 |
mov rdi, JSAMPROW [rdi] ; outptr
|
|
shun-iwasawa |
82a8f5 |
mov rax, rdx ; colctr
|
|
shun-iwasawa |
82a8f5 |
.columnloop:
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
cmp rax, byte SIZEOF_YMMWORD
|
|
shun-iwasawa |
82a8f5 |
ja near .above_16
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqu xmm0, XMMWORD [rsi+0*SIZEOF_YMMWORD]
|
|
shun-iwasawa |
82a8f5 |
vpunpckhbw xmm1, xmm0, xmm0
|
|
shun-iwasawa |
82a8f5 |
vpunpcklbw xmm0, xmm0, xmm0
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmm0
|
|
shun-iwasawa |
82a8f5 |
vmovdqu XMMWORD [rdi+1*SIZEOF_XMMWORD], xmm1
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
jmp short .nextrow
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
.above_16:
|
|
shun-iwasawa |
82a8f5 |
vmovdqu ymm0, YMMWORD [rsi+0*SIZEOF_YMMWORD]
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpermq ymm0, ymm0, 0xd8
|
|
shun-iwasawa |
82a8f5 |
vpunpckhbw ymm1, ymm0, ymm0
|
|
shun-iwasawa |
82a8f5 |
vpunpcklbw ymm0, ymm0, ymm0
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymm0
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdi+1*SIZEOF_YMMWORD], ymm1
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
sub rax, byte 2*SIZEOF_YMMWORD
|
|
shun-iwasawa |
82a8f5 |
jz short .nextrow
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
add rsi, byte SIZEOF_YMMWORD ; inptr
|
|
shun-iwasawa |
82a8f5 |
add rdi, byte 2*SIZEOF_YMMWORD ; outptr
|
|
shun-iwasawa |
82a8f5 |
jmp short .columnloop
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
.nextrow:
|
|
shun-iwasawa |
82a8f5 |
pop rsi
|
|
shun-iwasawa |
82a8f5 |
pop rdi
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
add rsi, byte SIZEOF_JSAMPROW ; input_data
|
|
shun-iwasawa |
82a8f5 |
add rdi, byte SIZEOF_JSAMPROW ; output_data
|
|
shun-iwasawa |
82a8f5 |
dec rcx ; rowctr
|
|
shun-iwasawa |
82a8f5 |
jg short .rowloop
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
.return:
|
|
shun-iwasawa |
82a8f5 |
vzeroupper
|
|
shun-iwasawa |
82a8f5 |
uncollect_args 4
|
|
shun-iwasawa |
82a8f5 |
pop rbp
|
|
shun-iwasawa |
82a8f5 |
ret
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
; --------------------------------------------------------------------------
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
; Fast processing for the common case of 2:1 horizontal and 2:1 vertical.
|
|
shun-iwasawa |
82a8f5 |
; It's still a box filter.
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
; GLOBAL(void)
|
|
shun-iwasawa |
82a8f5 |
; jsimd_h2v2_upsample_avx2(int max_v_samp_factor, JDIMENSION output_width,
|
|
shun-iwasawa |
82a8f5 |
; JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr);
|
|
shun-iwasawa |
82a8f5 |
;
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
; r10 = int max_v_samp_factor
|
|
shun-iwasawa |
82a8f5 |
; r11d = JDIMENSION output_width
|
|
shun-iwasawa |
82a8f5 |
; r12 = JSAMPARRAY input_data
|
|
shun-iwasawa |
82a8f5 |
; r13 = JSAMPARRAY *output_data_ptr
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
align 32
|
|
shun-iwasawa |
82a8f5 |
GLOBAL_FUNCTION(jsimd_h2v2_upsample_avx2)
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
EXTN(jsimd_h2v2_upsample_avx2):
|
|
shun-iwasawa |
82a8f5 |
push rbp
|
|
shun-iwasawa |
82a8f5 |
mov rax, rsp
|
|
shun-iwasawa |
82a8f5 |
mov rbp, rsp
|
|
shun-iwasawa |
82a8f5 |
collect_args 4
|
|
shun-iwasawa |
82a8f5 |
push rbx
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov edx, r11d
|
|
shun-iwasawa |
82a8f5 |
add rdx, byte (SIZEOF_YMMWORD-1)
|
|
shun-iwasawa |
82a8f5 |
and rdx, -SIZEOF_YMMWORD
|
|
shun-iwasawa |
82a8f5 |
jz near .return
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov rcx, r10 ; rowctr
|
|
shun-iwasawa |
82a8f5 |
test rcx, rcx
|
|
shun-iwasawa |
82a8f5 |
jz near .return
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov rsi, r12 ; input_data
|
|
shun-iwasawa |
82a8f5 |
mov rdi, r13
|
|
shun-iwasawa |
82a8f5 |
mov rdi, JSAMPARRAY [rdi] ; output_data
|
|
shun-iwasawa |
82a8f5 |
.rowloop:
|
|
shun-iwasawa |
82a8f5 |
push rdi
|
|
shun-iwasawa |
82a8f5 |
push rsi
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
mov rsi, JSAMPROW [rsi] ; inptr
|
|
shun-iwasawa |
82a8f5 |
mov rbx, JSAMPROW [rdi+0*SIZEOF_JSAMPROW] ; outptr0
|
|
shun-iwasawa |
82a8f5 |
mov rdi, JSAMPROW [rdi+1*SIZEOF_JSAMPROW] ; outptr1
|
|
shun-iwasawa |
82a8f5 |
mov rax, rdx ; colctr
|
|
shun-iwasawa |
82a8f5 |
.columnloop:
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
cmp rax, byte SIZEOF_YMMWORD
|
|
shun-iwasawa |
82a8f5 |
ja short .above_16
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqu xmm0, XMMWORD [rsi+0*SIZEOF_XMMWORD]
|
|
shun-iwasawa |
82a8f5 |
vpunpckhbw xmm1, xmm0, xmm0
|
|
shun-iwasawa |
82a8f5 |
vpunpcklbw xmm0, xmm0, xmm0
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqu XMMWORD [rbx+0*SIZEOF_XMMWORD], xmm0
|
|
shun-iwasawa |
82a8f5 |
vmovdqu XMMWORD [rbx+1*SIZEOF_XMMWORD], xmm1
|
|
shun-iwasawa |
82a8f5 |
vmovdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmm0
|
|
shun-iwasawa |
82a8f5 |
vmovdqu XMMWORD [rdi+1*SIZEOF_XMMWORD], xmm1
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
jmp near .nextrow
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
.above_16:
|
|
shun-iwasawa |
82a8f5 |
vmovdqu ymm0, YMMWORD [rsi+0*SIZEOF_YMMWORD]
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vpermq ymm0, ymm0, 0xd8
|
|
shun-iwasawa |
82a8f5 |
vpunpckhbw ymm1, ymm0, ymm0
|
|
shun-iwasawa |
82a8f5 |
vpunpcklbw ymm0, ymm0, ymm0
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rbx+0*SIZEOF_YMMWORD], ymm0
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rbx+1*SIZEOF_YMMWORD], ymm1
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymm0
|
|
shun-iwasawa |
82a8f5 |
vmovdqu YMMWORD [rdi+1*SIZEOF_YMMWORD], ymm1
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
sub rax, byte 2*SIZEOF_YMMWORD
|
|
shun-iwasawa |
82a8f5 |
jz short .nextrow
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
add rsi, byte SIZEOF_YMMWORD ; inptr
|
|
shun-iwasawa |
82a8f5 |
add rbx, 2*SIZEOF_YMMWORD ; outptr0
|
|
shun-iwasawa |
82a8f5 |
add rdi, 2*SIZEOF_YMMWORD ; outptr1
|
|
shun-iwasawa |
82a8f5 |
jmp short .columnloop
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
.nextrow:
|
|
shun-iwasawa |
82a8f5 |
pop rsi
|
|
shun-iwasawa |
82a8f5 |
pop rdi
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
add rsi, byte 1*SIZEOF_JSAMPROW ; input_data
|
|
shun-iwasawa |
82a8f5 |
add rdi, byte 2*SIZEOF_JSAMPROW ; output_data
|
|
shun-iwasawa |
82a8f5 |
sub rcx, byte 2 ; rowctr
|
|
shun-iwasawa |
82a8f5 |
jg near .rowloop
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
.return:
|
|
shun-iwasawa |
82a8f5 |
pop rbx
|
|
shun-iwasawa |
82a8f5 |
vzeroupper
|
|
shun-iwasawa |
82a8f5 |
uncollect_args 4
|
|
shun-iwasawa |
82a8f5 |
pop rbp
|
|
shun-iwasawa |
82a8f5 |
ret
|
|
shun-iwasawa |
82a8f5 |
|
|
shun-iwasawa |
82a8f5 |
; For some reason, the OS X linker does not honor the request to align the
|
|
shun-iwasawa |
82a8f5 |
; segment unless we do this.
|
|
shun-iwasawa |
82a8f5 |
align 32
|