1 ;******************************************************************************
2 ;* VP8 MMXEXT optimizations
3 ;* Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
4 ;* Copyright (c) 2010 Jason Garrett-Glaser <darkshikari@gmail.com>
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
24 %include "x86util.asm"
28 fourtap_filter_hw_m: times 4 dw -6, 123
37 sixtap_filter_hw_m: times 4 dw 2, -11
47 fourtap_filter_hb_m: times 8 db -6, 123
56 sixtap_filter_hb_m: times 8 db 2, 1
66 fourtap_filter_v_m: times 8 dw -6
83 sixtap_filter_v_m: times 8 dw 2
102 bilinear_filter_vw_m: times 8 dw 1
110 bilinear_filter_vb_m: times 8 db 7, 1
119 %define fourtap_filter_hw r11
120 %define sixtap_filter_hw r11
121 %define fourtap_filter_hb r11
122 %define sixtap_filter_hb r11
123 %define fourtap_filter_v r11
124 %define sixtap_filter_v r11
125 %define bilinear_filter_vw r11
126 %define bilinear_filter_vb r11
128 %define fourtap_filter_hw fourtap_filter_hw_m
129 %define sixtap_filter_hw sixtap_filter_hw_m
130 %define fourtap_filter_hb fourtap_filter_hb_m
131 %define sixtap_filter_hb sixtap_filter_hb_m
132 %define fourtap_filter_v fourtap_filter_v_m
133 %define sixtap_filter_v sixtap_filter_v_m
134 %define bilinear_filter_vw bilinear_filter_vw_m
135 %define bilinear_filter_vb bilinear_filter_vb_m
138 filter_h2_shuf: db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
139 filter_h4_shuf: db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
141 filter_h6_shuf1: db 0, 5, 1, 6, 2, 7, 3, 8, 4, 9, 5, 10, 6, 11, 7, 12
142 filter_h6_shuf2: db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
143 filter_h6_shuf3: db 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
145 pw_20091: times 4 dw 20091
146 pw_17734: times 4 dw 17734
164 ;-----------------------------------------------------------------------------
165 ; subpel MC functions:
167 ; void put_vp8_epel<size>_h<htap>v<vtap>_<opt>(uint8_t *dst, int deststride,
168 ; uint8_t *src, int srcstride,
169 ; int height, int mx, int my);
170 ;-----------------------------------------------------------------------------
172 %macro FILTER_SSSE3 3
173 cglobal put_vp8_epel%1_h6_ssse3, 6, 6, %2
175 mova m3, [filter_h6_shuf2]
176 mova m4, [filter_h6_shuf3]
178 lea r11, [sixtap_filter_hb_m]
180 mova m5, [sixtap_filter_hb+r5*8-48] ; set up 6tap filter in bytes
181 mova m6, [sixtap_filter_hb+r5*8-32]
182 mova m7, [sixtap_filter_hb+r5*8-16]
189 ; For epel4, we need 9 bytes, but only 8 get loaded; to compensate, do the
190 ; shuffle with a memory operand
193 pshufb m0, [filter_h6_shuf1]
205 movh [r0], m0 ; store
214 cglobal put_vp8_epel%1_h4_ssse3, 6, 6, %3
217 mova m3, [filter_h2_shuf]
218 mova m4, [filter_h4_shuf]
220 lea r11, [fourtap_filter_hb_m]
222 mova m5, [fourtap_filter_hb+r5-16] ; set up 4tap filter in bytes
223 mova m6, [fourtap_filter_hb+r5]
236 movh [r0], m0 ; store
245 cglobal put_vp8_epel%1_v4_ssse3, 7, 7, %2
248 lea r11, [fourtap_filter_hb_m]
250 mova m5, [fourtap_filter_hb+r6-16]
251 mova m6, [fourtap_filter_hb+r6]
262 movh m3, [r2+2*r3] ; read new row
284 cglobal put_vp8_epel%1_v6_ssse3, 7, 7, %2
287 lea r11, [sixtap_filter_hb_m]
289 lea r6, [sixtap_filter_hb+r6*8]
303 movh m5, [r2+2*r3] ; read new row
310 pmaddubsw m6, [r6-48]
311 pmaddubsw m1, [r6-32]
312 pmaddubsw m7, [r6-16]
337 ; 4x4 block, H-only 4-tap filter
338 cglobal put_vp8_epel4_h4_mmxext, 6, 6
341 lea r11, [fourtap_filter_hw_m]
343 movq mm4, [fourtap_filter_hw+r5-16] ; set up 4tap filter in words
344 movq mm5, [fourtap_filter_hw+r5]
349 movq mm1, [r2-1] ; (ABCDEFGH) load 8 horizontal pixels
351 ; first set of 2 pixels
352 movq mm2, mm1 ; byte ABCD..
353 punpcklbw mm1, mm6 ; byte->word ABCD
354 pshufw mm0, mm2, 9 ; byte CDEF..
355 punpcklbw mm0, mm6 ; byte->word CDEF
356 pshufw mm3, mm1, 0x94 ; word ABBC
357 pshufw mm1, mm0, 0x94 ; word CDDE
358 pmaddwd mm3, mm4 ; multiply 2px with F0/F1
359 movq mm0, mm1 ; backup for second set of pixels
360 pmaddwd mm1, mm5 ; multiply 2px with F2/F3
361 paddd mm3, mm1 ; finish 1st 2px
363 ; second set of 2 pixels, use backup of above
364 punpckhbw mm2, mm6 ; byte->word EFGH
365 pmaddwd mm0, mm4 ; multiply backed up 2px with F0/F1
366 pshufw mm1, mm2, 0x94 ; word EFFG
367 pmaddwd mm1, mm5 ; multiply 2px with F2/F3
368 paddd mm0, mm1 ; finish 2nd 2px
370 ; merge two sets of 2 pixels into one set of 4, round/clip/store
371 packssdw mm3, mm0 ; merge dword->word (4px)
372 paddsw mm3, mm7 ; rounding
374 packuswb mm3, mm6 ; clip and word->bytes
375 movd [r0], mm3 ; store
384 ; 4x4 block, H-only 6-tap filter
385 cglobal put_vp8_epel4_h6_mmxext, 6, 6
388 lea r11, [sixtap_filter_hw_m]
390 movq mm4, [sixtap_filter_hw+r5*8-48] ; set up 4tap filter in words
391 movq mm5, [sixtap_filter_hw+r5*8-32]
392 movq mm6, [sixtap_filter_hw+r5*8-16]
397 movq mm1, [r2-2] ; (ABCDEFGH) load 8 horizontal pixels
399 ; first set of 2 pixels
400 movq mm2, mm1 ; byte ABCD..
401 punpcklbw mm1, mm3 ; byte->word ABCD
402 pshufw mm0, mm2, 0x9 ; byte CDEF..
403 punpckhbw mm2, mm3 ; byte->word EFGH
404 punpcklbw mm0, mm3 ; byte->word CDEF
405 pshufw mm1, mm1, 0x94 ; word ABBC
406 pshufw mm2, mm2, 0x94 ; word EFFG
407 pmaddwd mm1, mm4 ; multiply 2px with F0/F1
408 pshufw mm3, mm0, 0x94 ; word CDDE
409 movq mm0, mm3 ; backup for second set of pixels
410 pmaddwd mm3, mm5 ; multiply 2px with F2/F3
411 paddd mm1, mm3 ; add to 1st 2px cache
412 movq mm3, mm2 ; backup for second set of pixels
413 pmaddwd mm2, mm6 ; multiply 2px with F4/F5
414 paddd mm1, mm2 ; finish 1st 2px
416 ; second set of 2 pixels, use backup of above
417 movd mm2, [r2+3] ; byte FGHI (prevent overreads)
418 pmaddwd mm0, mm4 ; multiply 1st backed up 2px with F0/F1
419 pmaddwd mm3, mm5 ; multiply 2nd backed up 2px with F2/F3
420 paddd mm0, mm3 ; add to 2nd 2px cache
422 punpcklbw mm2, mm3 ; byte->word FGHI
423 pshufw mm2, mm2, 0xE9 ; word GHHI
424 pmaddwd mm2, mm6 ; multiply 2px with F4/F5
425 paddd mm0, mm2 ; finish 2nd 2px
427 ; merge two sets of 2 pixels into one set of 4, round/clip/store
428 packssdw mm1, mm0 ; merge dword->word (4px)
429 paddsw mm1, mm7 ; rounding
431 packuswb mm1, mm3 ; clip and word->bytes
432 movd [r0], mm1 ; store
441 ; 4x4 block, H-only 4-tap filter
443 cglobal put_vp8_epel8_h4_sse2, 6, 6, 8
446 lea r11, [fourtap_filter_hw_m]
448 mova m5, [fourtap_filter_hw+r5-16] ; set up 4tap filter in words
449 mova m6, [fourtap_filter_hw+r5]
454 punpcklbw m0, m7 ; ABCDEFGH
458 psrldq m1, 2 ; BCDEFGH
459 psrldq m2, 4 ; CDEFGH
461 punpcklwd m0, m1 ; ABBCCDDE
462 punpcklwd m2, m3 ; CDDEEFFG
468 punpcklbw m1, m7 ; ABCDEFGH
472 psrldq m2, 2 ; BCDEFGH
473 psrldq m3, 4 ; CDEFGH
475 punpcklwd m1, m2 ; ABBCCDDE
476 punpcklwd m3, m4 ; CDDEEFFG
485 movh [r0], m0 ; store
494 cglobal put_vp8_epel8_h6_sse2, 6, 6, 8
497 lea r11, [sixtap_filter_hw_m]
499 lea r5, [sixtap_filter_hw+r5*8]
506 punpcklbw m0, m7 ; ABCDEFGHI
510 psrldq m1, 2 ; BCDEFGH
511 psrldq m2, 4 ; CDEFGH
514 punpcklbw m4, m7 ; EFGH
517 punpcklwd m0, m1 ; ABBCCDDE
518 punpcklwd m2, m3 ; CDDEEFFG
519 punpcklwd m4, m5 ; EFFGGHHI
528 punpcklbw m6, m7 ; ABCDEFGHI
532 psrldq m1, 2 ; BCDEFGH
533 psrldq m2, 4 ; CDEFGH
536 punpcklbw m4, m7 ; EFGH
539 punpcklwd m6, m1 ; ABBCCDDE
540 punpcklwd m2, m3 ; CDDEEFFG
541 punpcklwd m4, m5 ; EFFGGHHI
552 movh [r0], m0 ; store
562 ; 4x4 block, V-only 4-tap filter
563 cglobal put_vp8_epel%2_v4_%1, 7, 7, %3
566 lea r11, [fourtap_filter_v_m]
568 lea r6, [fourtap_filter_v+r6-32]
584 ; first calculate negative taps (to prevent losing positive overflows)
585 movh m4, [r2+2*r3] ; read new row
592 ; then calculate positive taps
615 ; 4x4 block, V-only 6-tap filter
616 cglobal put_vp8_epel%2_v6_%1, 7, 7, %3
620 lea r11, [sixtap_filter_v_m]
622 lea r6, [sixtap_filter_v+r6-96]
642 ; first calculate negative taps (to prevent losing positive overflows)
649 ; then calculate positive taps
650 movh m5, [r2+2*r3] ; read new row
681 FILTER_V mmxext, 4, 0
685 %macro FILTER_BILINEAR 3
686 cglobal put_vp8_bilinear%2_v_%1, 7,7,%3
691 lea r11, [bilinear_filter_vw_m]
694 mova m4, [bilinear_filter_vw+r5-16]
695 mova m5, [bilinear_filter_vw+r6-16]
731 cglobal put_vp8_bilinear%2_h_%1, 7,7,%3
736 lea r11, [bilinear_filter_vw_m]
739 mova m4, [bilinear_filter_vw+r6-16]
740 mova m5, [bilinear_filter_vw+r5-16]
779 FILTER_BILINEAR mmxext, 4, 0
781 FILTER_BILINEAR sse2, 8, 7
783 %macro FILTER_BILINEAR_SSSE3 1
784 cglobal put_vp8_bilinear%1_v_ssse3, 7,7
787 lea r11, [bilinear_filter_vb_m]
790 mova m3, [bilinear_filter_vb+r6-16]
820 cglobal put_vp8_bilinear%1_h_ssse3, 7,7
823 lea r11, [bilinear_filter_vb_m]
826 mova m2, [filter_h2_shuf]
827 mova m3, [bilinear_filter_vb+r5-16]
858 FILTER_BILINEAR_SSSE3 4
860 FILTER_BILINEAR_SSSE3 8
862 cglobal put_vp8_pixels8_mmx, 5,5
874 cglobal put_vp8_pixels16_mmx, 5,5
876 movq mm0, [r2+r3*0+0]
877 movq mm1, [r2+r3*0+8]
878 movq mm2, [r2+r3*1+0]
879 movq mm3, [r2+r3*1+8]
881 movq [r0+r1*0+0], mm0
882 movq [r0+r1*0+8], mm1
883 movq [r0+r1*1+0], mm2
884 movq [r0+r1*1+8], mm3
890 cglobal put_vp8_pixels16_sse, 5,5,2
892 movups xmm0, [r2+r3*0]
893 movups xmm1, [r2+r3*1]
895 movaps [r0+r1*0], xmm0
896 movaps [r0+r1*1], xmm1
902 ;-----------------------------------------------------------------------------
903 ; void vp8_idct_dc_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
904 ;-----------------------------------------------------------------------------
926 cglobal vp8_idct_dc_add_mmx, 3, 3
945 ADD_DC m0, m1, 0, movh
949 cglobal vp8_idct_dc_add_sse4, 3, 3, 6
973 pextrd [r0+r2], m2, 1
975 pextrd [r1+r2], m2, 3
978 ;-----------------------------------------------------------------------------
979 ; void vp8_idct_dc_add4y_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
980 ;-----------------------------------------------------------------------------
983 cglobal vp8_idct_dc_add4y_mmx, 3, 3
985 movd m0, [r1+32*0] ; A
986 movd m1, [r1+32*2] ; C
987 punpcklwd m0, [r1+32*1] ; A B
988 punpcklwd m1, [r1+32*3] ; C D
989 punpckldq m0, m1 ; A B C D
1002 punpcklbw m0, m0 ; AABBCCDD
1003 punpcklbw m6, m6 ; AABBCCDD
1006 punpcklbw m0, m0 ; AAAABBBB
1007 punpckhbw m1, m1 ; CCCCDDDD
1008 punpcklbw m6, m6 ; AAAABBBB
1009 punpckhbw m7, m7 ; CCCCDDDD
1013 ADD_DC m0, m6, 0, mova
1014 ADD_DC m1, m7, 8, mova
1018 cglobal vp8_idct_dc_add4y_sse2, 3, 3, 6
1020 movd m0, [r1+32*0] ; A
1021 movd m1, [r1+32*2] ; C
1022 punpcklwd m0, [r1+32*1] ; A B
1023 punpcklwd m1, [r1+32*3] ; C D
1024 punpckldq m0, m1 ; A B C D
1044 ADD_DC m0, m1, 0, mova
1047 ;-----------------------------------------------------------------------------
1048 ; void vp8_idct_dc_add4uv_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
1049 ;-----------------------------------------------------------------------------
1052 cglobal vp8_idct_dc_add4uv_mmx, 3, 3
1054 movd m0, [r1+32*0] ; A
1055 movd m1, [r1+32*2] ; C
1056 punpcklwd m0, [r1+32*1] ; A B
1057 punpcklwd m1, [r1+32*3] ; C D
1058 punpckldq m0, m1 ; A B C D
1071 punpcklbw m0, m0 ; AABBCCDD
1072 punpcklbw m6, m6 ; AABBCCDD
1075 punpcklbw m0, m0 ; AAAABBBB
1076 punpckhbw m1, m1 ; CCCCDDDD
1077 punpcklbw m6, m6 ; AAAABBBB
1078 punpckhbw m7, m7 ; CCCCDDDD
1082 ADD_DC m0, m6, 0, mova
1085 ADD_DC m1, m7, 0, mova
1088 ;-----------------------------------------------------------------------------
1089 ; void vp8_idct_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
1090 ;-----------------------------------------------------------------------------
1092 ; calculate %1=mul_35468(%1)-mul_20091(%2); %2=mul_20091(%1)+mul_35468(%2)
1093 ; this macro assumes that m6/m7 have words for 20091/17734 loaded
1094 %macro VP8_MULTIPLY_SUMSUB 4
1097 pmulhw %3, m6 ;20091(1)
1098 pmulhw %4, m6 ;20091(2)
1103 pmulhw %1, m7 ;35468(1)
1104 pmulhw %2, m7 ;35468(2)
1109 ; calculate x0=%1+%3; x1=%1-%3
1110 ; x2=mul_35468(%2)-mul_20091(%4); x3=mul_20091(%2)+mul_35468(%4)
1111 ; %1=x0+x3 (tmp0); %2=x1+x2 (tmp1); %3=x1-x2 (tmp2); %4=x0-x3 (tmp3)
1112 ; %5/%6 are temporary registers
1113 ; we assume m6/m7 have constant words 20091/17734 loaded in them
1114 %macro VP8_IDCT_TRANSFORM4x4_1D 6
1115 SUMSUB_BA m%3, m%1, m%5 ;t0, t1
1116 VP8_MULTIPLY_SUMSUB m%2, m%4, m%5,m%6 ;t2, t3
1117 SUMSUB_BA m%4, m%3, m%5 ;tmp0, tmp3
1118 SUMSUB_BA m%2, m%1, m%5 ;tmp1, tmp2
1124 %macro VP8_IDCT_ADD 1
1125 cglobal vp8_idct_add_%1, 3, 3
1135 movaps [r1+ 0], xmm0
1136 movaps [r1+16], xmm0
1146 VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
1147 TRANSPOSE4x4W 0, 1, 2, 3, 4
1149 VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
1150 TRANSPOSE4x4W 0, 1, 2, 3, 4
1155 STORE_DIFFx2 m0, m1, m6, m7, m4, 3, r0, r2
1156 STORE_DIFFx2 m2, m3, m6, m7, m4, 3, r1, r2
1164 ;-----------------------------------------------------------------------------
1165 ; void vp8_luma_dc_wht_mmxext(DCTELEM block[4][4][16], DCTELEM dc[16])
1166 ;-----------------------------------------------------------------------------
1168 %macro SCATTER_WHT 3
1171 mov [r0+2*16*(0+%3)], r1w
1172 mov [r0+2*16*(1+%3)], r2w
1177 mov [r0+2*16*(4+%3)], r1w
1178 mov [r0+2*16*(5+%3)], r2w
1181 mov [r0+2*16*(8+%3)], r1w
1182 mov [r0+2*16*(9+%3)], r2w
1185 mov [r0+2*16*(12+%3)], r1w
1186 mov [r0+2*16*(13+%3)], r2w
1189 %macro HADAMARD4_1D 4
1190 SUMSUB_BADC m%2, m%1, m%4, m%3
1191 SUMSUB_BADC m%4, m%2, m%3, m%1
1196 cglobal vp8_luma_dc_wht_mmx, 2,3
1201 HADAMARD4_1D 0, 1, 2, 3
1202 TRANSPOSE4x4W 0, 1, 2, 3, 4
1204 HADAMARD4_1D 0, 1, 2, 3
1213 ;-----------------------------------------------------------------------------
1214 ; void vp8_h/v_loop_filter_simple_<opt>(uint8_t *dst, int stride, int flim);
1215 ;-----------------------------------------------------------------------------
1217 ; macro called with 7 mm register indexes as argument, and 4 regular registers
1219 ; first 4 mm registers will carry the transposed pixel data
1220 ; the other three are scratchspace (one would be sufficient, but this allows
1221 ; for more spreading/pipelining and thus faster execution on OOE CPUs)
1223 ; first two regular registers are buf+4*stride and buf+5*stride
1224 ; third is -stride, fourth is +stride
1225 %macro READ_8x4_INTERLEAVED 11
1226 ; interleave 8 (A-H) rows of 4 pixels each
1227 movd m%1, [%8+%10*4] ; A0-3
1228 movd m%5, [%9+%10*4] ; B0-3
1229 movd m%2, [%8+%10*2] ; C0-3
1230 movd m%6, [%8+%10] ; D0-3
1231 movd m%3, [%8] ; E0-3
1232 movd m%7, [%9] ; F0-3
1233 movd m%4, [%9+%11] ; G0-3
1234 punpcklbw m%1, m%5 ; A/B interleaved
1235 movd m%5, [%9+%11*2] ; H0-3
1236 punpcklbw m%2, m%6 ; C/D interleaved
1237 punpcklbw m%3, m%7 ; E/F interleaved
1238 punpcklbw m%4, m%5 ; G/H interleaved
1241 ; macro called with 7 mm register indexes as argument, and 5 regular registers
1242 ; first 11 mean the same as READ_8x4_TRANSPOSED above
1243 ; fifth regular register is scratchspace to reach the bottom 8 rows, it
1244 ; will be set to second regular register + 8*stride at the end
1245 %macro READ_16x4_INTERLEAVED 12
1246 ; transpose 16 (A-P) rows of 4 pixels each
1249 ; read (and interleave) those addressable by %8 (=r0), A/C/D/E/I/K/L/M
1250 movd m%1, [%8+%10*4] ; A0-3
1251 movd m%3, [%12+%10*4] ; I0-3
1252 movd m%2, [%8+%10*2] ; C0-3
1253 movd m%4, [%12+%10*2] ; K0-3
1254 movd m%6, [%8+%10] ; D0-3
1255 movd m%5, [%12+%10] ; L0-3
1256 movd m%7, [%12] ; M0-3
1258 punpcklbw m%1, m%3 ; A/I
1259 movd m%3, [%8] ; E0-3
1260 punpcklbw m%2, m%4 ; C/K
1261 punpcklbw m%6, m%5 ; D/L
1262 punpcklbw m%3, m%7 ; E/M
1263 punpcklbw m%2, m%6 ; C/D/K/L interleaved
1265 ; read (and interleave) those addressable by %9 (=r4), B/F/G/H/J/N/O/P
1266 movd m%5, [%9+%10*4] ; B0-3
1267 movd m%4, [%12+%10*4] ; J0-3
1268 movd m%7, [%9] ; F0-3
1269 movd m%6, [%12] ; N0-3
1270 punpcklbw m%5, m%4 ; B/J
1271 punpcklbw m%7, m%6 ; F/N
1272 punpcklbw m%1, m%5 ; A/B/I/J interleaved
1273 punpcklbw m%3, m%7 ; E/F/M/N interleaved
1274 movd m%4, [%9+%11] ; G0-3
1275 movd m%6, [%12+%11] ; O0-3
1276 movd m%5, [%9+%11*2] ; H0-3
1277 movd m%7, [%12+%11*2] ; P0-3
1278 punpcklbw m%4, m%6 ; G/O
1279 punpcklbw m%5, m%7 ; H/P
1280 punpcklbw m%4, m%5 ; G/H/O/P interleaved
1283 ; write 4 mm registers of 2 dwords each
1284 ; first four arguments are mm register indexes containing source data
1285 ; last four are registers containing buf+4*stride, buf+5*stride,
1286 ; -stride and +stride
1288 ; write out (2 dwords per register)
1303 ; write 4 xmm registers of 4 dwords each
1304 ; arguments same as WRITE_2x4D, but with an extra register, so that the 5 regular
1305 ; registers contain buf+4*stride, buf+5*stride, buf+12*stride, -stride and +stride
1306 ; we add 1*stride to the third regular registry in the process
1307 ; the 10th argument is 16 if it's a Y filter (i.e. all regular registers cover the
1308 ; same memory region), or 8 if they cover two separate buffers (third one points to
1309 ; a different memory region than the first two), allowing for more optimal code for
1311 %macro WRITE_4x4D 10
1312 ; write out (4 dwords per register), start with dwords zero
1363 %macro SPLATB_REG_MMX 2-3
1370 %macro SPLATB_REG_MMXEXT 2-3
1376 %macro SPLATB_REG_SSE2 2-3
1383 %macro SPLATB_REG_SSSE3 3
1388 %macro SIMPLE_LOOPFILTER 3
1389 cglobal vp8_%2_loop_filter_simple_%1, 3, %3
1391 mov r5, rsp ; backup stack pointer
1392 and rsp, ~(mmsize-1) ; align stack
1394 %if mmsize == 8 ; mmx/mmxext
1397 %ifnidn %1, sse2 && mmsize == 16
1400 SPLATB_REG m7, r2, m0 ; splat "flim" into register
1402 ; set up indexes to address 4 rows
1407 sub rsp, mmsize*2 ; (aligned) storage space for saving p1/q1
1410 %if mmsize == 8 ; mmx / mmxext
1414 ; read 4 half/full rows of pixels
1415 mova m0, [r0+r1*2] ; p1
1416 mova m1, [r0+r1] ; p0
1418 mova m3, [r0+r2] ; q1
1422 %if mmsize == 8 ; mmx/mmxext
1423 READ_8x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, r0, r4, r1, r2
1425 READ_16x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, r0, r4, r1, r2, r3
1427 TRANSPOSE4x4W 0, 1, 2, 3, 4
1429 mova [rsp], m0 ; store p1
1430 mova [rsp+mmsize], m3 ; store q1
1434 mova m5, m2 ; m5=backup of q0
1435 mova m6, m1 ; m6=backup of p0
1436 psubusb m1, m2 ; p0-q0
1437 psubusb m2, m6 ; q0-p0
1438 por m1, m2 ; FFABS(p0-q0)
1439 paddusb m1, m1 ; m1=FFABS(p0-q0)*2
1443 psubusb m3, m0 ; q1-p1
1444 psubusb m0, m4 ; p1-q1
1445 por m3, m0 ; FFABS(p1-q1)
1449 psubsb m2, m4 ; m2=p1-q1 (signed) backup for below
1451 psrlq m3, 1 ; m3=FFABS(p1-q1)/2, this can be used signed
1455 pcmpeqb m3, m1 ; abs(p0-q0)*2+abs(p1-q1)/2<=flim mask(0xff/0x0)
1457 ; filter_common (use m2/p1-q1, m4=q0, m6=p0, m5/q0-p0 and m3/mask)
1461 psubsb m5, m0 ; q0-p0 (signed)
1464 paddsb m2, m5 ; a=(p1-q1) + 3*(q0-p0)
1465 pand m2, m3 ; apply filter mask (m3)
1469 paddsb m2, [pb_4] ; f1<<3=a+4
1470 paddsb m1, [pb_3] ; f2<<3=a+3
1472 pand m1, m3 ; cache f2<<3
1476 pcmpgtb m0, m2 ; which values are <0?
1477 psubb m3, m2 ; -f1<<3
1483 paddusb m4, m3 ; q0-f1
1487 pcmpgtb m0, m1 ; which values are <0?
1488 psubb m3, m1 ; -f2<<3
1494 psubusb m6, m3 ; p0+f2
1504 mova m3, [rsp+mmsize] ; q1
1506 TRANSPOSE4x4B 0, 1, 2, 3, 4
1507 %if mmsize == 16 ; sse2
1508 add r3, r1 ; change from r4*8*stride to r0+8*stride
1509 WRITE_4x4D 0, 1, 2, 3, r0, r4, r3, r1, r2, 16
1511 WRITE_4x2D 0, 1, 2, 3, r0, r4, r1, r2
1515 %if mmsize == 8 ; mmx/mmxext
1518 add r0, 8 ; advance 8 cols = pixels
1520 lea r0, [r0+r2*8] ; advance 8 rows = lines
1527 mov rsp, r5 ; restore stack pointer
1532 mov rsp, r5 ; restore stack pointer
1539 %define SPLATB_REG SPLATB_REG_MMX
1540 SIMPLE_LOOPFILTER mmx, v, 4
1541 SIMPLE_LOOPFILTER mmx, h, 6
1542 %define SPLATB_REG SPLATB_REG_MMXEXT
1543 SIMPLE_LOOPFILTER mmxext, v, 4
1544 SIMPLE_LOOPFILTER mmxext, h, 6
1546 %define SPLATB_REG SPLATB_REG_SSE2
1547 SIMPLE_LOOPFILTER sse2, v, 3
1548 SIMPLE_LOOPFILTER sse2, h, 6
1549 %define SPLATB_REG SPLATB_REG_SSSE3
1550 SIMPLE_LOOPFILTER ssse3, v, 3
1551 SIMPLE_LOOPFILTER ssse3, h, 6
1553 ;-----------------------------------------------------------------------------
1554 ; void vp8_h/v_loop_filter<size>_inner_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
1555 ; int flimE, int flimI, int hev_thr);
1556 ;-----------------------------------------------------------------------------
1558 %macro INNER_LOOPFILTER 5
1559 %if %4 == 8 ; chroma
1560 cglobal vp8_%2_loop_filter8uv_inner_%1, 6, %3, %5
1562 %define mstride_reg r2
1565 %define hev_thr_reg r5
1567 cglobal vp8_%2_loop_filter16y_inner_%1, 5, %3, %5
1568 %define mstride_reg r1
1571 %define hev_thr_reg r4
1572 %ifdef m8 ; x86-64, sse2
1574 %elif mmsize == 16 ; x86-32, sse2
1576 %else ; x86-32, mmx/mmxext
1581 %define stride_reg E_reg
1582 %define dst2_reg I_reg
1584 %define stack_reg hev_thr_reg
1587 %ifnidn %1, sse2 && mmsize == 16
1591 %ifndef m8 ; mmx/mmxext or sse2 on x86-32
1592 ; splat function arguments
1593 SPLATB_REG m0, E_reg, m7 ; E
1594 SPLATB_REG m1, I_reg, m7 ; I
1595 SPLATB_REG m2, hev_thr_reg, m7 ; hev_thresh
1598 mov stack_reg, rsp ; backup stack pointer
1599 and rsp, ~(mmsize-1) ; align stack
1601 sub rsp, mmsize * 4 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
1604 sub rsp, mmsize * 5 ; extra storage space for transposes
1607 %define flim_E [rsp]
1608 %define flim_I [rsp+mmsize]
1609 %define hev_thr [rsp+mmsize*2]
1610 %define mask_res [rsp+mmsize*3]
1611 %define p0backup [rsp+mmsize*3]
1612 %define q0backup [rsp+mmsize*4]
1618 %else ; sse2 on x86-64
1623 %define mask_res m12
1624 %define p0backup m12
1627 ; splat function arguments
1628 SPLATB_REG flim_E, E_reg, m7 ; E
1629 SPLATB_REG flim_I, I_reg, m7 ; I
1630 SPLATB_REG hev_thr, hev_thr_reg, m7 ; hev_thresh
1633 %if mmsize == 8 && %4 == 16 ; mmx/mmxext
1636 mov stride_reg, mstride_reg
1639 lea dst_reg, [dst_reg + stride_reg*4-4]
1641 lea dst8_reg, [dst8_reg+ stride_reg*4-4]
1649 lea dst2_reg, [dst_reg + stride_reg]
1651 %if %4 == 8 && mmsize == 16
1656 movrow m0, [dst_reg +mstride_reg*4] ; p3
1657 movrow m1, [dst2_reg+mstride_reg*4] ; p2
1658 movrow m2, [dst_reg +mstride_reg*2] ; p1
1659 movrow m5, [dst2_reg] ; q1
1660 movrow m6, [dst2_reg+ stride_reg] ; q2
1661 movrow m7, [dst2_reg+ stride_reg*2] ; q3
1662 %if mmsize == 16 && %4 == 8
1663 movhps m0, [dst8_reg+mstride_reg*4]
1664 movhps m2, [dst8_reg+mstride_reg*2]
1665 add dst8_reg, stride_reg
1666 movhps m1, [dst8_reg+mstride_reg*4]
1667 movhps m5, [dst8_reg]
1668 movhps m6, [dst8_reg+ stride_reg]
1669 movhps m7, [dst8_reg+ stride_reg*2]
1670 add dst8_reg, mstride_reg
1672 %elif mmsize == 8 ; mmx/mmxext (h)
1673 ; read 8 rows of 8px each
1674 movu m0, [dst_reg +mstride_reg*4]
1675 movu m1, [dst2_reg+mstride_reg*4]
1676 movu m2, [dst_reg +mstride_reg*2]
1677 movu m3, [dst_reg +mstride_reg]
1680 movu m6, [dst2_reg+ stride_reg]
1683 TRANSPOSE4x4B 0, 1, 2, 3, 7
1685 movu m7, [dst2_reg+ stride_reg*2]
1686 TRANSPOSE4x4B 4, 5, 6, 7, 1
1687 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
1688 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
1689 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
1691 mova q0backup, m2 ; store q0
1692 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
1693 mova p0backup, m5 ; store p0
1700 lea dst8_reg, [dst_reg + stride_reg*8]
1703 ; read 16 rows of 8px each, interleave
1704 movh m0, [dst_reg +mstride_reg*4]
1705 movh m1, [dst8_reg+mstride_reg*4]
1706 movh m2, [dst_reg +mstride_reg*2]
1707 movh m5, [dst8_reg+mstride_reg*2]
1708 movh m3, [dst_reg +mstride_reg]
1709 movh m6, [dst8_reg+mstride_reg]
1712 punpcklbw m0, m1 ; A/I
1713 punpcklbw m2, m5 ; C/K
1714 punpcklbw m3, m6 ; D/L
1715 punpcklbw m4, m7 ; E/M
1717 add dst8_reg, stride_reg
1718 movh m1, [dst2_reg+mstride_reg*4]
1719 movh m6, [dst8_reg+mstride_reg*4]
1722 punpcklbw m1, m6 ; B/J
1723 punpcklbw m5, m7 ; F/N
1724 movh m6, [dst2_reg+ stride_reg]
1725 movh m7, [dst8_reg+ stride_reg]
1726 punpcklbw m6, m7 ; G/O
1729 TRANSPOSE4x4B 0, 1, 2, 3, 7
1735 movh m7, [dst2_reg+ stride_reg*2]
1736 movh m1, [dst8_reg+ stride_reg*2]
1737 punpcklbw m7, m1 ; H/P
1738 TRANSPOSE4x4B 4, 5, 6, 7, 1
1739 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
1740 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
1741 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
1747 mova q0backup, m2 ; store q0
1749 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
1753 mova p0backup, m5 ; store p0
1761 ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
1764 psubusb m4, m0 ; p2-p3
1765 psubusb m0, m1 ; p3-p2
1766 por m0, m4 ; abs(p3-p2)
1770 psubusb m4, m1 ; p1-p2
1771 psubusb m1, m2 ; p2-p1
1772 por m1, m4 ; abs(p2-p1)
1776 psubusb m4, m7 ; q2-q3
1777 psubusb m7, m6 ; q3-q2
1778 por m7, m4 ; abs(q3-q2)
1782 psubusb m4, m6 ; q1-q2
1783 psubusb m6, m5 ; q2-q1
1784 por m6, m4 ; abs(q2-q1)
1793 pcmpeqb m0, m3 ; abs(p3-p2) <= I
1794 pcmpeqb m1, m3 ; abs(p2-p1) <= I
1795 pcmpeqb m7, m3 ; abs(q3-q2) <= I
1796 pcmpeqb m6, m3 ; abs(q2-q1) <= I
1806 ; normal_limit and high_edge_variance for p1-p0, q1-q0
1807 SWAP 7, 3 ; now m7 is zero
1809 movrow m3, [dst_reg +mstride_reg] ; p0
1810 %if mmsize == 16 && %4 == 8
1811 movhps m3, [dst8_reg+mstride_reg]
1823 psubusb m1, m3 ; p1-p0
1824 psubusb m6, m2 ; p0-p1
1825 por m1, m6 ; abs(p1-p0)
1830 pcmpeqb m1, m7 ; abs(p1-p0) <= I
1831 pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
1835 pmaxub m0, m1 ; max_I
1836 SWAP 1, 4 ; max_hev_thresh
1839 SWAP 6, 4 ; now m6 is I
1841 movrow m4, [dst_reg] ; q0
1842 %if mmsize == 16 && %4 == 8
1843 movhps m4, [dst8_reg]
1854 psubusb m1, m5 ; q0-q1
1855 psubusb m7, m4 ; q1-q0
1856 por m1, m7 ; abs(q1-q0)
1862 pcmpeqb m1, m6 ; abs(q1-q0) <= I
1863 pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
1865 pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
1873 pcmpeqb m0, m7 ; max(abs(..)) <= I
1874 pcmpeqb m6, m7 ; !(max(abs..) > thresh)
1879 mova mask_res, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
1885 mova m6, m4 ; keep copies of p0/q0 around for later use
1887 psubusb m1, m4 ; p0-q0
1888 psubusb m6, m3 ; q0-p0
1889 por m1, m6 ; abs(q0-p0)
1890 paddusb m1, m1 ; m1=2*abs(q0-p0)
1896 psubusb m7, m5 ; p1-q1
1897 psubusb m6, m2 ; q1-p1
1898 por m7, m6 ; abs(q1-p1)
1901 psrlq m7, 1 ; abs(q1-p1)/2
1902 paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
1904 pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
1905 pand m0, m7 ; normal_limit result
1907 ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
1908 %ifdef m8 ; x86-64 && sse2
1910 %define pb_80_var m8
1911 %else ; x86-32 or mmx/mmxext
1912 %define pb_80_var [pb_80]
1918 psubsb m1, m7 ; (signed) q0-p0
1923 psubsb m6, m7 ; (signed) p1-q1
1928 paddsb m7, m1 ; 3*(q0-p0)+is4tap?(p1-q1)
1947 paddusb m3, m1 ; p0+f2
1958 paddusb m4, m1 ; q0-f1
1986 paddusb m5, m1 ; q1-a
1987 paddusb m2, m0 ; p1+a
1991 movrow [dst_reg +mstride_reg*2], m2
1992 movrow [dst_reg +mstride_reg ], m3
1993 movrow [dst_reg], m4
1994 movrow [dst_reg + stride_reg ], m5
1995 %if mmsize == 16 && %4 == 8
1996 movhps [dst8_reg+mstride_reg*2], m2
1997 movhps [dst8_reg+mstride_reg ], m3
1998 movhps [dst8_reg], m4
1999 movhps [dst8_reg+ stride_reg ], m5
2006 TRANSPOSE4x4B 2, 3, 4, 5, 6
2008 %if mmsize == 8 ; mmx/mmxext (h)
2009 WRITE_4x2D 2, 3, 4, 5, dst_reg, dst2_reg, mstride_reg, stride_reg
2011 lea dst8_reg, [dst8_reg+mstride_reg+2]
2012 WRITE_4x4D 2, 3, 4, 5, dst_reg, dst2_reg, dst8_reg, mstride_reg, stride_reg, %4
2017 %if %4 == 8 ; chroma
2021 cmp dst_reg, dst8_reg
2022 mov dst_reg, dst8_reg
2026 lea dst_reg, [dst_reg + stride_reg*8-2]
2035 %ifndef m8 ; sse2 on x86-32 or mmx/mmxext
2036 mov rsp, stack_reg ; restore stack pointer
2042 %define SPLATB_REG SPLATB_REG_MMX
2043 INNER_LOOPFILTER mmx, v, 6, 16, 0
2044 INNER_LOOPFILTER mmx, h, 6, 16, 0
2045 INNER_LOOPFILTER mmx, v, 6, 8, 0
2046 INNER_LOOPFILTER mmx, h, 6, 8, 0
2048 %define SPLATB_REG SPLATB_REG_MMXEXT
2049 INNER_LOOPFILTER mmxext, v, 6, 16, 0
2050 INNER_LOOPFILTER mmxext, h, 6, 16, 0
2051 INNER_LOOPFILTER mmxext, v, 6, 8, 0
2052 INNER_LOOPFILTER mmxext, h, 6, 8, 0
2055 %define SPLATB_REG SPLATB_REG_SSE2
2056 INNER_LOOPFILTER sse2, v, 5, 16, 13
2058 INNER_LOOPFILTER sse2, h, 5, 16, 13
2060 INNER_LOOPFILTER sse2, h, 6, 16, 13
2062 INNER_LOOPFILTER sse2, v, 6, 8, 13
2063 INNER_LOOPFILTER sse2, h, 6, 8, 13
2065 %define SPLATB_REG SPLATB_REG_SSSE3
2066 INNER_LOOPFILTER ssse3, v, 5, 16, 13
2068 INNER_LOOPFILTER ssse3, h, 5, 16, 13
2070 INNER_LOOPFILTER ssse3, h, 6, 16, 13
2072 INNER_LOOPFILTER ssse3, v, 6, 8, 13
2073 INNER_LOOPFILTER ssse3, h, 6, 8, 13
2075 ;-----------------------------------------------------------------------------
2076 ; void vp8_h/v_loop_filter<size>_mbedge_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
2077 ; int flimE, int flimI, int hev_thr);
2078 ;-----------------------------------------------------------------------------
2080 ; write 4 or 8 words in the mmx/xmm registers as 8 lines
2081 ; 1 and 2 are the registers to write, this can be the same (for SSE2)
2083 ; 3 is a general-purpose register that we will clobber
2085 ; 3 is a pointer to the destination's 5th line
2086 ; 4 is a pointer to the destination's 4th line
2087 ; 5/6 is -stride and +stride
2088 ; 7 is optimization string
2091 pextrw [%4+%5*4], %1, 0
2092 pextrw [%3+%5*4], %1, 1
2093 pextrw [%4+%5*2], %1, 2
2094 pextrw [%4+%5 ], %1, 3
2097 pextrw [%3+%6 ], %1, 6
2098 pextrw [%3+%6*2], %1, 7
2141 %macro MBEDGE_LOOPFILTER 5
2142 %if %4 == 8 ; chroma
2143 cglobal vp8_%2_loop_filter8uv_mbedge_%1, 6, %3, %5
2145 %define mstride_reg r2
2148 %define hev_thr_reg r5
2150 cglobal vp8_%2_loop_filter16y_mbedge_%1, 5, %3, %5
2151 %define mstride_reg r1
2154 %define hev_thr_reg r4
2155 %ifdef m8 ; x86-64, sse2
2157 %elif mmsize == 16 ; x86-32, sse2
2159 %else ; x86-32, mmx/mmxext
2164 %define stride_reg E_reg
2165 %define dst2_reg I_reg
2167 %define stack_reg hev_thr_reg
2170 %ifnidn %1, sse2 && mmsize == 16
2174 %ifndef m8 ; mmx/mmxext or sse2 on x86-32
2175 ; splat function arguments
2176 SPLATB_REG m0, E_reg, m7 ; E
2177 SPLATB_REG m1, I_reg, m7 ; I
2178 SPLATB_REG m2, hev_thr_reg, m7 ; hev_thresh
2181 mov stack_reg, rsp ; backup stack pointer
2182 and rsp, ~(mmsize-1) ; align stack
2183 sub rsp, mmsize * 8 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
2185 ; [4]=filter tmp result
2186 ; [5]/[6] = p2/q2 backup
2187 ; [7]=lim_res sign result
2189 %define flim_E [rsp]
2190 %define flim_I [rsp+mmsize]
2191 %define hev_thr [rsp+mmsize*2]
2192 %define mask_res [rsp+mmsize*3]
2193 %define lim_res [rsp+mmsize*4]
2194 %define p0backup [rsp+mmsize*3]
2195 %define q0backup [rsp+mmsize*4]
2196 %define p2backup [rsp+mmsize*5]
2197 %define q2backup [rsp+mmsize*6]
2198 %define lim_sign [rsp+mmsize*7]
2204 %else ; sse2 on x86-64
2209 %define mask_res m12
2211 %define p0backup m12
2213 %define p2backup m13
2214 %define q2backup m14
2215 %define lim_sign m15
2217 ; splat function arguments
2218 SPLATB_REG flim_E, E_reg, m7 ; E
2219 SPLATB_REG flim_I, I_reg, m7 ; I
2220 SPLATB_REG hev_thr, hev_thr_reg, m7 ; hev_thresh
2223 %if mmsize == 8 && %4 == 16 ; mmx/mmxext
2226 mov stride_reg, mstride_reg
2229 lea dst_reg, [dst_reg + stride_reg*4-4]
2231 lea dst8_reg, [dst8_reg+ stride_reg*4-4]
2239 lea dst2_reg, [dst_reg + stride_reg]
2241 %if %4 == 8 && mmsize == 16
2246 movrow m0, [dst_reg +mstride_reg*4] ; p3
2247 movrow m1, [dst2_reg+mstride_reg*4] ; p2
2248 movrow m2, [dst_reg +mstride_reg*2] ; p1
2249 movrow m5, [dst2_reg] ; q1
2250 movrow m6, [dst2_reg+ stride_reg] ; q2
2251 movrow m7, [dst2_reg+ stride_reg*2] ; q3
2252 %if mmsize == 16 && %4 == 8
2253 movhps m0, [dst8_reg+mstride_reg*4]
2254 movhps m2, [dst8_reg+mstride_reg*2]
2255 add dst8_reg, stride_reg
2256 movhps m1, [dst8_reg+mstride_reg*4]
2257 movhps m5, [dst8_reg]
2258 movhps m6, [dst8_reg+ stride_reg]
2259 movhps m7, [dst8_reg+ stride_reg*2]
2260 add dst8_reg, mstride_reg
2262 %elif mmsize == 8 ; mmx/mmxext (h)
2263 ; read 8 rows of 8px each
2264 movu m0, [dst_reg +mstride_reg*4]
2265 movu m1, [dst2_reg+mstride_reg*4]
2266 movu m2, [dst_reg +mstride_reg*2]
2267 movu m3, [dst_reg +mstride_reg]
2270 movu m6, [dst2_reg+ stride_reg]
2273 TRANSPOSE4x4B 0, 1, 2, 3, 7
2275 movu m7, [dst2_reg+ stride_reg*2]
2276 TRANSPOSE4x4B 4, 5, 6, 7, 1
2277 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
2278 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
2279 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
2281 mova q0backup, m2 ; store q0
2282 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
2283 mova p0backup, m5 ; store p0
2290 lea dst8_reg, [dst_reg + stride_reg*8]
2293 ; read 16 rows of 8px each, interleave
2294 movh m0, [dst_reg +mstride_reg*4]
2295 movh m1, [dst8_reg+mstride_reg*4]
2296 movh m2, [dst_reg +mstride_reg*2]
2297 movh m5, [dst8_reg+mstride_reg*2]
2298 movh m3, [dst_reg +mstride_reg]
2299 movh m6, [dst8_reg+mstride_reg]
2302 punpcklbw m0, m1 ; A/I
2303 punpcklbw m2, m5 ; C/K
2304 punpcklbw m3, m6 ; D/L
2305 punpcklbw m4, m7 ; E/M
2307 add dst8_reg, stride_reg
2308 movh m1, [dst2_reg+mstride_reg*4]
2309 movh m6, [dst8_reg+mstride_reg*4]
2312 punpcklbw m1, m6 ; B/J
2313 punpcklbw m5, m7 ; F/N
2314 movh m6, [dst2_reg+ stride_reg]
2315 movh m7, [dst8_reg+ stride_reg]
2316 punpcklbw m6, m7 ; G/O
2319 TRANSPOSE4x4B 0, 1, 2, 3, 7
2325 movh m7, [dst2_reg+ stride_reg*2]
2326 movh m1, [dst8_reg+ stride_reg*2]
2327 punpcklbw m7, m1 ; H/P
2328 TRANSPOSE4x4B 4, 5, 6, 7, 1
2329 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
2330 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
2331 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
2337 mova q0backup, m2 ; store q0
2339 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
2343 mova p0backup, m5 ; store p0
2351 ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
2354 psubusb m4, m0 ; p2-p3
2355 psubusb m0, m1 ; p3-p2
2356 por m0, m4 ; abs(p3-p2)
2360 psubusb m4, m1 ; p1-p2
2362 psubusb m1, m2 ; p2-p1
2363 por m1, m4 ; abs(p2-p1)
2367 psubusb m4, m7 ; q2-q3
2368 psubusb m7, m6 ; q3-q2
2369 por m7, m4 ; abs(q3-q2)
2373 psubusb m4, m6 ; q1-q2
2375 psubusb m6, m5 ; q2-q1
2376 por m6, m4 ; abs(q2-q1)
2385 pcmpeqb m0, m3 ; abs(p3-p2) <= I
2386 pcmpeqb m1, m3 ; abs(p2-p1) <= I
2387 pcmpeqb m7, m3 ; abs(q3-q2) <= I
2388 pcmpeqb m6, m3 ; abs(q2-q1) <= I
2398 ; normal_limit and high_edge_variance for p1-p0, q1-q0
2399 SWAP 7, 3 ; now m7 is zero
2401 movrow m3, [dst_reg +mstride_reg] ; p0
2402 %if mmsize == 16 && %4 == 8
2403 movhps m3, [dst8_reg+mstride_reg]
2415 psubusb m1, m3 ; p1-p0
2416 psubusb m6, m2 ; p0-p1
2417 por m1, m6 ; abs(p1-p0)
2422 pcmpeqb m1, m7 ; abs(p1-p0) <= I
2423 pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
2427 pmaxub m0, m1 ; max_I
2428 SWAP 1, 4 ; max_hev_thresh
2431 SWAP 6, 4 ; now m6 is I
2433 movrow m4, [dst_reg] ; q0
2434 %if mmsize == 16 && %4 == 8
2435 movhps m4, [dst8_reg]
2446 psubusb m1, m5 ; q0-q1
2447 psubusb m7, m4 ; q1-q0
2448 por m1, m7 ; abs(q1-q0)
2454 pcmpeqb m1, m6 ; abs(q1-q0) <= I
2455 pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
2457 pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
2465 pcmpeqb m0, m7 ; max(abs(..)) <= I
2466 pcmpeqb m6, m7 ; !(max(abs..) > thresh)
2471 mova mask_res, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
2477 mova m6, m4 ; keep copies of p0/q0 around for later use
2479 psubusb m1, m4 ; p0-q0
2480 psubusb m6, m3 ; q0-p0
2481 por m1, m6 ; abs(q0-p0)
2482 paddusb m1, m1 ; m1=2*abs(q0-p0)
2488 psubusb m7, m5 ; p1-q1
2489 psubusb m6, m2 ; q1-p1
2490 por m7, m6 ; abs(q1-p1)
2493 psrlq m7, 1 ; abs(q1-p1)/2
2494 paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
2496 pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
2497 pand m0, m7 ; normal_limit result
2499 ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
2500 %ifdef m8 ; x86-64 && sse2
2502 %define pb_80_var m8
2503 %else ; x86-32 or mmx/mmxext
2504 %define pb_80_var [pb_80]
2510 psubsb m1, m7 ; (signed) q0-p0
2515 psubsb m6, m7 ; (signed) p1-q1
2522 mova lim_res, m6 ; 3*(qp-p0)+(p1-q1) masked for filter_mbedge
2529 pandn m7, m6 ; 3*(q0-p0)+(p1-q1) masked for filter_common
2547 paddusb m3, m1 ; p0+f2
2558 paddusb m4, m1 ; q0-f1
2560 ; filter_mbedge (m2-m5 = p1-q1; lim_res carries w)
2569 pcmpgtb m0, m1 ; which are negative
2570 punpcklbw m6, m0 ; signed byte->word
2573 mova mask_res, m6 ; backup for later in filter
2581 packsswb m6, m1 ; a0
2588 paddusb m3, m0 ; p0+a0
2589 psubusb m4, m0 ; q0-a0
2600 packsswb m6, m1 ; a1
2607 paddusb m2, m0 ; p1+a1
2608 psubusb m5, m0 ; q1-a1
2628 packsswb m6, m1 ; a1
2642 paddusb m1, m7 ; p1+a1
2643 psubusb m6, m7 ; q1-a1
2647 movrow [dst2_reg+mstride_reg*4], m1
2648 movrow [dst_reg +mstride_reg*2], m2
2649 movrow [dst_reg +mstride_reg ], m3
2650 movrow [dst_reg], m4
2651 movrow [dst2_reg], m5
2652 movrow [dst2_reg+ stride_reg ], m6
2653 %if mmsize == 16 && %4 == 8
2654 add dst8_reg, mstride_reg
2655 movhps [dst8_reg+mstride_reg*2], m1
2656 movhps [dst8_reg+mstride_reg ], m2
2657 movhps [dst8_reg], m3
2658 add dst8_reg, stride_reg
2659 movhps [dst8_reg], m4
2660 movhps [dst8_reg+ stride_reg ], m5
2661 movhps [dst8_reg+ stride_reg*2], m6
2668 TRANSPOSE4x4B 1, 2, 3, 4, 0
2669 SBUTTERFLY bw, 5, 6, 0
2671 %if mmsize == 8 ; mmx/mmxext (h)
2672 WRITE_4x2D 1, 2, 3, 4, dst_reg, dst2_reg, mstride_reg, stride_reg
2674 WRITE_8W m5, m6, dst2_reg, dst_reg, mstride_reg, stride_reg, %4
2676 lea dst8_reg, [dst8_reg+mstride_reg+1]
2677 WRITE_4x4D 1, 2, 3, 4, dst_reg, dst2_reg, dst8_reg, mstride_reg, stride_reg, %4
2678 lea dst_reg, [dst2_reg+mstride_reg+4]
2679 lea dst8_reg, [dst8_reg+mstride_reg+4]
2680 WRITE_8W m5, m5, dst2_reg, dst_reg, mstride_reg, stride_reg, %2
2682 lea dst2_reg, [dst8_reg+ stride_reg]
2684 WRITE_8W m6, m6, dst2_reg, dst8_reg, mstride_reg, stride_reg, %2
2689 %if %4 == 8 ; chroma
2693 cmp dst_reg, dst8_reg
2694 mov dst_reg, dst8_reg
2698 lea dst_reg, [dst_reg + stride_reg*8-5]
2707 %ifndef m8 ; sse2 on x86-32 or mmx/mmxext
2708 mov rsp, stack_reg ; restore stack pointer
2714 %define SPLATB_REG SPLATB_REG_MMX
2715 MBEDGE_LOOPFILTER mmx, v, 6, 16, 0
2716 MBEDGE_LOOPFILTER mmx, h, 6, 16, 0
2717 MBEDGE_LOOPFILTER mmx, v, 6, 8, 0
2718 MBEDGE_LOOPFILTER mmx, h, 6, 8, 0
2720 %define SPLATB_REG SPLATB_REG_MMXEXT
2721 MBEDGE_LOOPFILTER mmxext, v, 6, 16, 0
2722 MBEDGE_LOOPFILTER mmxext, h, 6, 16, 0
2723 MBEDGE_LOOPFILTER mmxext, v, 6, 8, 0
2724 MBEDGE_LOOPFILTER mmxext, h, 6, 8, 0
2727 %define SPLATB_REG SPLATB_REG_SSE2
2728 MBEDGE_LOOPFILTER sse2, v, 5, 16, 16
2730 MBEDGE_LOOPFILTER sse2, h, 5, 16, 16
2732 MBEDGE_LOOPFILTER sse2, h, 6, 16, 16
2734 MBEDGE_LOOPFILTER sse2, v, 6, 8, 16
2735 MBEDGE_LOOPFILTER sse2, h, 6, 8, 16
2737 %define SPLATB_REG SPLATB_REG_SSSE3
2738 MBEDGE_LOOPFILTER ssse3, v, 5, 16, 16
2740 MBEDGE_LOOPFILTER ssse3, h, 5, 16, 16
2742 MBEDGE_LOOPFILTER ssse3, h, 6, 16, 16
2744 MBEDGE_LOOPFILTER ssse3, v, 6, 8, 16
2745 MBEDGE_LOOPFILTER ssse3, h, 6, 8, 16
2748 MBEDGE_LOOPFILTER sse4, h, 5, 16, 16
2750 MBEDGE_LOOPFILTER sse4, h, 6, 16, 16
2752 MBEDGE_LOOPFILTER sse4, h, 6, 8, 16