0a7e414c5c40058950f2ca486d924867c299e6f9
[x264.git] / common / x86 / mc-c.c
1 /*****************************************************************************
2  * mc-c.c: x86 motion compensation
3  *****************************************************************************
4  * Copyright (C) 2003-2017 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include "common/common.h"
29 #include "mc.h"
30
31 #define DECL_SUF( func, args )\
32     void func##_mmx2 args;\
33     void func##_sse2 args;\
34     void func##_ssse3 args;\
35     void func##_avx2 args;\
36     void func##_avx512 args;
37
38 DECL_SUF( x264_pixel_avg_16x16, ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
39 DECL_SUF( x264_pixel_avg_16x8,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
40 DECL_SUF( x264_pixel_avg_8x16,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
41 DECL_SUF( x264_pixel_avg_8x8,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
42 DECL_SUF( x264_pixel_avg_8x4,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
43 DECL_SUF( x264_pixel_avg_4x16,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
44 DECL_SUF( x264_pixel_avg_4x8,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
45 DECL_SUF( x264_pixel_avg_4x4,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
46 DECL_SUF( x264_pixel_avg_4x2,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
47
48 #define MC_WEIGHT(w,type) \
49     void x264_mc_weight_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int );
50
51 #define MC_WEIGHT_OFFSET(w,type) \
52     void x264_mc_offsetadd_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ); \
53     void x264_mc_offsetsub_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ); \
54     MC_WEIGHT(w,type)
55
56 MC_WEIGHT_OFFSET( 4, mmx2 )
57 MC_WEIGHT_OFFSET( 8, mmx2 )
58 MC_WEIGHT_OFFSET( 12, mmx2 )
59 MC_WEIGHT_OFFSET( 16, mmx2 )
60 MC_WEIGHT_OFFSET( 20, mmx2 )
61 MC_WEIGHT_OFFSET( 12, sse2 )
62 MC_WEIGHT_OFFSET( 16, sse2 )
63 MC_WEIGHT_OFFSET( 20, sse2 )
64 #if HIGH_BIT_DEPTH
65 MC_WEIGHT_OFFSET( 8, sse2 )
66 #endif
67 MC_WEIGHT( 8, sse2  )
68 MC_WEIGHT( 4, ssse3 )
69 MC_WEIGHT( 8, ssse3 )
70 MC_WEIGHT( 12, ssse3 )
71 MC_WEIGHT( 16, ssse3 )
72 MC_WEIGHT( 20, ssse3 )
73 MC_WEIGHT( 8, avx2 )
74 MC_WEIGHT( 16, avx2 )
75 MC_WEIGHT( 20, avx2 )
76 #undef MC_OFFSET
77 #undef MC_WEIGHT
78
79 void x264_mc_copy_w4_mmx ( pixel *, intptr_t, pixel *, intptr_t, int );
80 void x264_mc_copy_w8_mmx ( pixel *, intptr_t, pixel *, intptr_t, int );
81 void x264_mc_copy_w8_sse ( pixel *, intptr_t, pixel *, intptr_t, int );
82 void x264_mc_copy_w16_mmx( pixel *, intptr_t, pixel *, intptr_t, int );
83 void x264_mc_copy_w16_sse( pixel *, intptr_t, pixel *, intptr_t, int );
84 void x264_mc_copy_w16_aligned_sse( pixel *, intptr_t, pixel *, intptr_t, int );
85 void x264_mc_copy_w16_avx( uint16_t *, intptr_t, uint16_t *, intptr_t, int );
86 void x264_mc_copy_w16_aligned_avx( uint16_t *, intptr_t, uint16_t *, intptr_t, int );
87 void x264_prefetch_fenc_420_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
88 void x264_prefetch_fenc_422_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
89 void x264_prefetch_ref_mmx2( pixel *, intptr_t, int );
90 void x264_plane_copy_core_sse( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
91 void x264_plane_copy_core_avx( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
92 void x264_plane_copy_swap_core_ssse3( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
93 void x264_plane_copy_swap_core_avx2 ( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
94 void x264_plane_copy_interleave_core_mmx2( pixel *dst,  intptr_t i_dst,
95                                            pixel *srcu, intptr_t i_srcu,
96                                            pixel *srcv, intptr_t i_srcv, int w, int h );
97 void x264_plane_copy_interleave_core_sse2( pixel *dst,  intptr_t i_dst,
98                                            pixel *srcu, intptr_t i_srcu,
99                                            pixel *srcv, intptr_t i_srcv, int w, int h );
100 void x264_plane_copy_interleave_core_avx( pixel *dst,  intptr_t i_dst,
101                                           pixel *srcu, intptr_t i_srcu,
102                                           pixel *srcv, intptr_t i_srcv, int w, int h );
103 void x264_plane_copy_deinterleave_sse2( pixel *dsta, intptr_t i_dsta,
104                                         pixel *dstb, intptr_t i_dstb,
105                                         pixel *src,  intptr_t i_src, int w, int h );
106 void x264_plane_copy_deinterleave_ssse3( uint8_t *dsta, intptr_t i_dsta,
107                                          uint8_t *dstb, intptr_t i_dstb,
108                                          uint8_t *src,  intptr_t i_src, int w, int h );
109 void x264_plane_copy_deinterleave_avx( uint16_t *dsta, intptr_t i_dsta,
110                                        uint16_t *dstb, intptr_t i_dstb,
111                                        uint16_t *src,  intptr_t i_src, int w, int h );
112 void x264_plane_copy_deinterleave_avx2( pixel *dsta, intptr_t i_dsta,
113                                         pixel *dstb, intptr_t i_dstb,
114                                         pixel *src,  intptr_t i_src, int w, int h );
115 void x264_plane_copy_deinterleave_rgb_sse2 ( pixel *dsta, intptr_t i_dsta,
116                                              pixel *dstb, intptr_t i_dstb,
117                                              pixel *dstc, intptr_t i_dstc,
118                                              pixel *src,  intptr_t i_src, int pw, int w, int h );
119 void x264_plane_copy_deinterleave_rgb_ssse3( pixel *dsta, intptr_t i_dsta,
120                                              pixel *dstb, intptr_t i_dstb,
121                                              pixel *dstc, intptr_t i_dstc,
122                                              pixel *src,  intptr_t i_src, int pw, int w, int h );
123 void x264_plane_copy_deinterleave_rgb_avx2 ( pixel *dsta, intptr_t i_dsta,
124                                              pixel *dstb, intptr_t i_dstb,
125                                              pixel *dstc, intptr_t i_dstc,
126                                              pixel *src,  intptr_t i_src, int pw, int w, int h );
127 void x264_plane_copy_deinterleave_v210_ssse3 ( uint16_t *dstu, intptr_t i_dstu,
128                                                uint16_t *dstv, intptr_t i_dstv,
129                                                uint32_t *src,  intptr_t i_src, int w, int h );
130 void x264_plane_copy_deinterleave_v210_avx   ( uint16_t *dstu, intptr_t i_dstu,
131                                                uint16_t *dstv, intptr_t i_dstv,
132                                                uint32_t *src,  intptr_t i_src, int w, int h );
133 void x264_plane_copy_deinterleave_v210_avx2  ( uint16_t *dstu, intptr_t i_dstu,
134                                                uint16_t *dstv, intptr_t i_dstv,
135                                                uint32_t *src,  intptr_t i_src, int w, int h );
136 void x264_plane_copy_deinterleave_v210_avx512( uint16_t *dstu, intptr_t i_dstu,
137                                                uint16_t *dstv, intptr_t i_dstv,
138                                                uint32_t *src,  intptr_t i_src, int w, int h );
139 void x264_store_interleave_chroma_mmx2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
140 void x264_store_interleave_chroma_sse2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
141 void x264_store_interleave_chroma_avx ( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
142 void x264_load_deinterleave_chroma_fenc_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
143 void x264_load_deinterleave_chroma_fenc_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
144 void x264_load_deinterleave_chroma_fenc_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
145 void x264_load_deinterleave_chroma_fenc_avx2( pixel *dst, pixel *src, intptr_t i_src, int height );
146 void x264_load_deinterleave_chroma_fdec_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
147 void x264_load_deinterleave_chroma_fdec_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
148 void x264_load_deinterleave_chroma_fdec_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
149 void x264_load_deinterleave_chroma_fdec_avx2( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
150 void *x264_memcpy_aligned_sse   ( void *dst, const void *src, size_t n );
151 void *x264_memcpy_aligned_avx   ( void *dst, const void *src, size_t n );
152 void *x264_memcpy_aligned_avx512( void *dst, const void *src, size_t n );
153 void x264_memzero_aligned_sse   ( void *dst, size_t n );
154 void x264_memzero_aligned_avx   ( void *dst, size_t n );
155 void x264_memzero_aligned_avx512( void *dst, size_t n );
156 void x264_integral_init4h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
157 void x264_integral_init4h_avx2( uint16_t *sum, uint8_t *pix, intptr_t stride );
158 void x264_integral_init8h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
159 void x264_integral_init8h_avx ( uint16_t *sum, uint8_t *pix, intptr_t stride );
160 void x264_integral_init8h_avx2( uint16_t *sum, uint8_t *pix, intptr_t stride );
161 void x264_integral_init4v_mmx  ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
162 void x264_integral_init4v_sse2 ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
163 void x264_integral_init4v_ssse3( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
164 void x264_integral_init4v_avx2( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
165 void x264_integral_init8v_mmx ( uint16_t *sum8, intptr_t stride );
166 void x264_integral_init8v_sse2( uint16_t *sum8, intptr_t stride );
167 void x264_integral_init8v_avx2( uint16_t *sum8, intptr_t stride );
168 void x264_mbtree_propagate_cost_sse2  ( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
169                                         uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
170 void x264_mbtree_propagate_cost_avx   ( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
171                                         uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
172 void x264_mbtree_propagate_cost_fma4  ( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
173                                         uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
174 void x264_mbtree_propagate_cost_avx2  ( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
175                                         uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
176 void x264_mbtree_propagate_cost_avx512( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
177                                         uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
178 void x264_mbtree_fix8_pack_ssse3( uint16_t *dst, float *src, int count );
179 void x264_mbtree_fix8_pack_avx2 ( uint16_t *dst, float *src, int count );
180 void x264_mbtree_fix8_unpack_ssse3( float *dst, uint16_t *src, int count );
181 void x264_mbtree_fix8_unpack_avx2 ( float *dst, uint16_t *src, int count );
182
183 #define MC_CHROMA(cpu)\
184 void x264_mc_chroma_##cpu( pixel *dstu, pixel *dstv, intptr_t i_dst, pixel *src, intptr_t i_src,\
185                            int dx, int dy, int i_width, int i_height );
186 MC_CHROMA(mmx2)
187 MC_CHROMA(sse2)
188 MC_CHROMA(ssse3)
189 MC_CHROMA(cache64_ssse3)
190 MC_CHROMA(avx)
191 MC_CHROMA(avx2)
192
193 #define LOWRES(cpu)\
194 void x264_frame_init_lowres_core_##cpu( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,\
195                                         intptr_t src_stride, intptr_t dst_stride, int width, int height );
196 LOWRES(mmx2)
197 LOWRES(cache32_mmx2)
198 LOWRES(sse2)
199 LOWRES(ssse3)
200 LOWRES(avx)
201 LOWRES(xop)
202 LOWRES(avx2)
203
204 #define PIXEL_AVG_W(width,cpu)\
205 void x264_pixel_avg2_w##width##_##cpu( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t );
206 /* This declares some functions that don't exist, but that isn't a problem. */
207 #define PIXEL_AVG_WALL(cpu)\
208 PIXEL_AVG_W(4,cpu); PIXEL_AVG_W(8,cpu); PIXEL_AVG_W(10,cpu); PIXEL_AVG_W(12,cpu); PIXEL_AVG_W(16,cpu); PIXEL_AVG_W(18,cpu); PIXEL_AVG_W(20,cpu);
209
210 PIXEL_AVG_WALL(mmx2)
211 PIXEL_AVG_WALL(cache32_mmx2)
212 PIXEL_AVG_WALL(cache64_mmx2)
213 PIXEL_AVG_WALL(cache64_sse2)
214 PIXEL_AVG_WALL(sse2)
215 PIXEL_AVG_WALL(cache64_ssse3)
216 PIXEL_AVG_WALL(avx2)
217
218 #define PIXEL_AVG_WTAB(instr, name1, name2, name3, name4, name5)\
219 static void (* const x264_pixel_avg_wtab_##instr[6])( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t ) =\
220 {\
221     NULL,\
222     x264_pixel_avg2_w4_##name1,\
223     x264_pixel_avg2_w8_##name2,\
224     x264_pixel_avg2_w12_##name3,\
225     x264_pixel_avg2_w16_##name4,\
226     x264_pixel_avg2_w20_##name5,\
227 };
228
229 #if HIGH_BIT_DEPTH
230 /* we can replace w12/w20 with w10/w18 as only 9/17 pixels in fact are important */
231 #define x264_pixel_avg2_w12_mmx2       x264_pixel_avg2_w10_mmx2
232 #define x264_pixel_avg2_w20_mmx2       x264_pixel_avg2_w18_mmx2
233 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w10_sse2
234 #define x264_pixel_avg2_w20_sse2         x264_pixel_avg2_w18_sse2
235 #define x264_pixel_avg2_w12_avx2         x264_pixel_avg2_w16_avx2
236 #define x264_pixel_avg2_w20_avx2         x264_pixel_avg2_w18_avx2
237 #else
238 /* w16 sse2 is faster than w12 mmx as long as the cacheline issue is resolved */
239 #define x264_pixel_avg2_w12_cache64_ssse3 x264_pixel_avg2_w16_cache64_ssse3
240 #define x264_pixel_avg2_w12_cache64_sse2 x264_pixel_avg2_w16_cache64_sse2
241 #define x264_pixel_avg2_w12_sse3         x264_pixel_avg2_w16_sse3
242 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w16_sse2
243 #endif // HIGH_BIT_DEPTH
244
245 PIXEL_AVG_WTAB(mmx2, mmx2, mmx2, mmx2, mmx2, mmx2)
246 #if HIGH_BIT_DEPTH
247 PIXEL_AVG_WTAB(sse2, mmx2, sse2, sse2, sse2, sse2)
248 PIXEL_AVG_WTAB(avx2, mmx2, sse2, avx2, avx2, avx2)
249 #else // !HIGH_BIT_DEPTH
250 #if ARCH_X86
251 PIXEL_AVG_WTAB(cache32_mmx2, mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2)
252 PIXEL_AVG_WTAB(cache64_mmx2, mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2)
253 #endif
254 PIXEL_AVG_WTAB(sse2, mmx2, mmx2, sse2, sse2, sse2)
255 PIXEL_AVG_WTAB(cache64_sse2, mmx2, cache64_mmx2, cache64_sse2, cache64_sse2, cache64_sse2)
256 PIXEL_AVG_WTAB(cache64_ssse3, mmx2, cache64_mmx2, cache64_ssse3, cache64_ssse3, cache64_sse2)
257 PIXEL_AVG_WTAB(cache64_ssse3_atom, mmx2, mmx2, cache64_ssse3, cache64_ssse3, sse2)
258 PIXEL_AVG_WTAB(avx2, mmx2, mmx2, sse2, sse2, avx2)
259 #endif // HIGH_BIT_DEPTH
260
261 #define MC_COPY_WTAB(instr, name1, name2, name3)\
262 static void (* const x264_mc_copy_wtab_##instr[5])( pixel *, intptr_t, pixel *, intptr_t, int ) =\
263 {\
264     NULL,\
265     x264_mc_copy_w4_##name1,\
266     x264_mc_copy_w8_##name2,\
267     NULL,\
268     x264_mc_copy_w16_##name3,\
269 };
270
271 MC_COPY_WTAB(mmx,mmx,mmx,mmx)
272 #if HIGH_BIT_DEPTH
273 MC_COPY_WTAB(sse,mmx,sse,sse)
274 MC_COPY_WTAB(avx,mmx,sse,avx)
275 #else
276 MC_COPY_WTAB(sse,mmx,mmx,sse)
277 #endif
278
279 #define MC_WEIGHT_WTAB(function, instr, name1, name2, w12version)\
280     static void (* x264_mc_##function##_wtab_##instr[6])( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ) =\
281 {\
282     x264_mc_##function##_w4_##name1,\
283     x264_mc_##function##_w4_##name1,\
284     x264_mc_##function##_w8_##name2,\
285     x264_mc_##function##_w##w12version##_##instr,\
286     x264_mc_##function##_w16_##instr,\
287     x264_mc_##function##_w20_##instr,\
288 };
289
290 #if HIGH_BIT_DEPTH
291 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
292 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
293 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
294 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,12)
295 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,sse2,16)
296 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,sse2,16)
297
298 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
299 {
300     if( w->i_scale == 1<<w->i_denom )
301     {
302         if( w->i_offset < 0 )
303             w->weightfn = h->mc.offsetsub;
304         else
305             w->weightfn = h->mc.offsetadd;
306         for( int i = 0; i < 8; i++ )
307             w->cachea[i] = abs(w->i_offset<<(BIT_DEPTH-8));
308         return;
309     }
310     w->weightfn = h->mc.weight;
311     int den1 = 1<<w->i_denom;
312     int den2 = w->i_scale<<1;
313     int den3 = 1+(w->i_offset<<(BIT_DEPTH-8+1));
314     for( int i = 0; i < 8; i++ )
315     {
316         w->cachea[i] = den1;
317         w->cacheb[i] = i&1 ? den3 : den2;
318     }
319 }
320 #else
321 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
322 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
323 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
324 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,16)
325 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,mmx2,16)
326 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,mmx2,16)
327 MC_WEIGHT_WTAB(weight,ssse3,ssse3,ssse3,16)
328 MC_WEIGHT_WTAB(weight,avx2,ssse3,avx2,16)
329
330 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
331 {
332     int i;
333     int16_t den1;
334
335     if( w->i_scale == 1<<w->i_denom )
336     {
337         if( w->i_offset < 0 )
338             w->weightfn = h->mc.offsetsub;
339         else
340             w->weightfn = h->mc.offsetadd;
341         memset( w->cachea, abs(w->i_offset), sizeof(w->cachea) );
342         return;
343     }
344     w->weightfn = h->mc.weight;
345     den1 = 1 << (w->i_denom - 1) | w->i_offset << w->i_denom;
346     for( i = 0; i < 8; i++ )
347     {
348         w->cachea[i] = w->i_scale;
349         w->cacheb[i] = den1;
350     }
351 }
352
353 static void x264_weight_cache_ssse3( x264_t *h, x264_weight_t *w )
354 {
355     int i, den1;
356     if( w->i_scale == 1<<w->i_denom )
357     {
358         if( w->i_offset < 0 )
359             w->weightfn = h->mc.offsetsub;
360         else
361             w->weightfn = h->mc.offsetadd;
362
363         memset( w->cachea, abs( w->i_offset ), sizeof(w->cachea) );
364         return;
365     }
366     w->weightfn = h->mc.weight;
367     den1 = w->i_scale << (8 - w->i_denom);
368     for( i = 0; i < 8; i++ )
369     {
370         w->cachea[i] = den1;
371         w->cacheb[i] = w->i_offset;
372     }
373 }
374 #endif // !HIGH_BIT_DEPTH
375
376 #define MC_LUMA(name,instr1,instr2)\
377 static void mc_luma_##name( pixel *dst,    intptr_t i_dst_stride,\
378                             pixel *src[4], intptr_t i_src_stride,\
379                             int mvx, int mvy,\
380                             int i_width, int i_height, const x264_weight_t *weight )\
381 {\
382     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
383     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
384     pixel *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
385     if( qpel_idx & 5 ) /* qpel interpolation needed */\
386     {\
387         pixel *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
388         x264_pixel_avg_wtab_##instr1[i_width>>2](\
389                 dst, i_dst_stride, src1, i_src_stride,\
390                 src2, i_height );\
391         if( weight->weightfn )\
392             weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );\
393     }\
394     else if( weight->weightfn )\
395         weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );\
396     else\
397         x264_mc_copy_wtab_##instr2[i_width>>2](dst, i_dst_stride, src1, i_src_stride, i_height );\
398 }
399
400 MC_LUMA(mmx2,mmx2,mmx)
401 MC_LUMA(sse2,sse2,sse)
402 #if HIGH_BIT_DEPTH
403 MC_LUMA(avx2,avx2,avx)
404 #else
405 #if ARCH_X86
406 MC_LUMA(cache32_mmx2,cache32_mmx2,mmx)
407 MC_LUMA(cache64_mmx2,cache64_mmx2,mmx)
408 #endif
409 MC_LUMA(cache64_sse2,cache64_sse2,sse)
410 MC_LUMA(cache64_ssse3,cache64_ssse3,sse)
411 MC_LUMA(cache64_ssse3_atom,cache64_ssse3_atom,sse)
412 #endif // !HIGH_BIT_DEPTH
413
414 #define GET_REF(name)\
415 static pixel *get_ref_##name( pixel *dst,   intptr_t *i_dst_stride,\
416                               pixel *src[4], intptr_t i_src_stride,\
417                               int mvx, int mvy,\
418                               int i_width, int i_height, const x264_weight_t *weight )\
419 {\
420     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
421     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
422     pixel *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
423     if( qpel_idx & 5 ) /* qpel interpolation needed */\
424     {\
425         pixel *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
426         x264_pixel_avg_wtab_##name[i_width>>2](\
427                 dst, *i_dst_stride, src1, i_src_stride,\
428                 src2, i_height );\
429         if( weight->weightfn )\
430             weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );\
431         return dst;\
432     }\
433     else if( weight->weightfn )\
434     {\
435         weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );\
436         return dst;\
437     }\
438     else\
439     {\
440         *i_dst_stride = i_src_stride;\
441         return src1;\
442     }\
443 }
444
445 GET_REF(mmx2)
446 GET_REF(sse2)
447 GET_REF(avx2)
448 #if !HIGH_BIT_DEPTH
449 #if ARCH_X86
450 GET_REF(cache32_mmx2)
451 GET_REF(cache64_mmx2)
452 #endif
453 GET_REF(cache64_sse2)
454 GET_REF(cache64_ssse3)
455 GET_REF(cache64_ssse3_atom)
456 #endif // !HIGH_BIT_DEPTH
457
458 #define HPEL(align, cpu, cpuv, cpuc, cpuh)\
459 void x264_hpel_filter_v_##cpuv( pixel *dst, pixel *src, int16_t *buf, intptr_t stride, intptr_t width);\
460 void x264_hpel_filter_c_##cpuc( pixel *dst, int16_t *buf, intptr_t width );\
461 void x264_hpel_filter_h_##cpuh( pixel *dst, pixel *src, intptr_t width );\
462 static void x264_hpel_filter_##cpu( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,\
463                                     intptr_t stride, int width, int height, int16_t *buf )\
464 {\
465     intptr_t realign = (intptr_t)src & (align-1);\
466     src -= realign;\
467     dstv -= realign;\
468     dstc -= realign;\
469     dsth -= realign;\
470     width += realign;\
471     while( height-- )\
472     {\
473         x264_hpel_filter_v_##cpuv( dstv, src, buf+16, stride, width );\
474         x264_hpel_filter_c_##cpuc( dstc, buf+16, width );\
475         x264_hpel_filter_h_##cpuh( dsth, src, width );\
476         dsth += stride;\
477         dstv += stride;\
478         dstc += stride;\
479         src  += stride;\
480     }\
481     x264_sfence();\
482 }
483
484 HPEL(8, mmx2, mmx2, mmx2, mmx2)
485 #if HIGH_BIT_DEPTH
486 HPEL(16, sse2, sse2, sse2, sse2)
487 #else // !HIGH_BIT_DEPTH
488 HPEL(16, sse2_amd, mmx2, mmx2, sse2)
489 #if ARCH_X86_64
490 void x264_hpel_filter_sse2 ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
491 void x264_hpel_filter_ssse3( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
492 void x264_hpel_filter_avx  ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
493 void x264_hpel_filter_avx2 ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
494 #else
495 HPEL(16, sse2, sse2, sse2, sse2)
496 HPEL(16, ssse3, ssse3, ssse3, ssse3)
497 HPEL(16, avx, avx, avx, avx)
498 HPEL(32, avx2, avx2, avx2, avx2)
499 #endif
500 #endif // HIGH_BIT_DEPTH
501
502 PLANE_COPY(16, sse)
503 PLANE_COPY(32, avx)
504
505 PLANE_COPY_SWAP(16, ssse3)
506 PLANE_COPY_SWAP(32, avx2)
507
508 #if HIGH_BIT_DEPTH
509 PLANE_COPY_YUYV(64, sse2)
510 PLANE_COPY_YUYV(64, avx)
511 #else
512 PLANE_COPY_YUYV(32, sse2)
513 PLANE_COPY_YUYV(32, ssse3)
514 #endif
515 PLANE_COPY_YUYV(64, avx2)
516
517 PLANE_INTERLEAVE(mmx2)
518 PLANE_INTERLEAVE(sse2)
519 #if HIGH_BIT_DEPTH
520 PLANE_INTERLEAVE(avx)
521 #endif
522
523 #if HAVE_X86_INLINE_ASM
524 #undef MC_CLIP_ADD
525 #define MC_CLIP_ADD(s,x)\
526 do\
527 {\
528     int temp;\
529     asm("movd       %0, %%xmm0     \n"\
530         "movd       %2, %%xmm1     \n"\
531         "paddsw %%xmm1, %%xmm0     \n"\
532         "movd   %%xmm0, %1         \n"\
533         :"+m"(s), "=&r"(temp)\
534         :"m"(x)\
535     );\
536     s = temp;\
537 } while( 0 )
538
539 #undef MC_CLIP_ADD2
540 #define MC_CLIP_ADD2(s,x)\
541 do\
542 {\
543     asm("movd       %0, %%xmm0     \n"\
544         "movd       %1, %%xmm1     \n"\
545         "paddsw %%xmm1, %%xmm0     \n"\
546         "movd   %%xmm0, %0         \n"\
547         :"+m"(M32(s))\
548         :"m"(M32(x))\
549     );\
550 } while( 0 )
551 #endif
552
553 PROPAGATE_LIST(ssse3)
554 PROPAGATE_LIST(avx)
555 PROPAGATE_LIST(avx2)
556
557 #if ARCH_X86_64
558 void x264_mbtree_propagate_list_internal_avx512( size_t len, uint16_t *ref_costs, int16_t (*mvs)[2], int16_t *propagate_amount,
559                                                  uint16_t *lowres_costs, int bipred_weight, int mb_y,
560                                                  int width, int height, int stride, int list_mask );
561
562 static void x264_mbtree_propagate_list_avx512( x264_t *h, uint16_t *ref_costs, int16_t (*mvs)[2],
563                                                int16_t *propagate_amount, uint16_t *lowres_costs,
564                                                int bipred_weight, int mb_y, int len, int list )
565 {
566     x264_mbtree_propagate_list_internal_avx512( len, ref_costs, mvs, propagate_amount, lowres_costs, bipred_weight << 9,
567                                                 mb_y << 16, h->mb.i_mb_width, h->mb.i_mb_height, h->mb.i_mb_stride,
568                                                 (1 << LOWRES_COST_SHIFT) << list );
569 }
570 #endif
571
572 void x264_mc_init_mmx( int cpu, x264_mc_functions_t *pf )
573 {
574     if( !(cpu&X264_CPU_MMX) )
575         return;
576
577     pf->copy_16x16_unaligned = x264_mc_copy_w16_mmx;
578     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_mmx;
579     pf->copy[PIXEL_8x8]   = x264_mc_copy_w8_mmx;
580     pf->copy[PIXEL_4x4]   = x264_mc_copy_w4_mmx;
581     pf->integral_init4v = x264_integral_init4v_mmx;
582     pf->integral_init8v = x264_integral_init8v_mmx;
583
584     if( !(cpu&X264_CPU_MMX2) )
585         return;
586
587     pf->prefetch_fenc_420 = x264_prefetch_fenc_420_mmx2;
588     pf->prefetch_fenc_422 = x264_prefetch_fenc_422_mmx2;
589     pf->prefetch_ref  = x264_prefetch_ref_mmx2;
590
591     pf->plane_copy_interleave = x264_plane_copy_interleave_mmx2;
592     pf->store_interleave_chroma = x264_store_interleave_chroma_mmx2;
593
594     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_mmx2;
595     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_mmx2;
596     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_mmx2;
597     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_mmx2;
598     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_mmx2;
599     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_mmx2;
600     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_mmx2;
601     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_mmx2;
602     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_mmx2;
603
604     pf->mc_luma = mc_luma_mmx2;
605     pf->get_ref = get_ref_mmx2;
606     pf->mc_chroma = x264_mc_chroma_mmx2;
607     pf->hpel_filter = x264_hpel_filter_mmx2;
608     pf->weight = x264_mc_weight_wtab_mmx2;
609     pf->weight_cache = x264_weight_cache_mmx2;
610     pf->offsetadd = x264_mc_offsetadd_wtab_mmx2;
611     pf->offsetsub = x264_mc_offsetsub_wtab_mmx2;
612
613     pf->frame_init_lowres_core = x264_frame_init_lowres_core_mmx2;
614
615     if( cpu&X264_CPU_SSE )
616     {
617         pf->memcpy_aligned  = x264_memcpy_aligned_sse;
618         pf->memzero_aligned = x264_memzero_aligned_sse;
619         pf->plane_copy = x264_plane_copy_sse;
620     }
621
622 #if HIGH_BIT_DEPTH
623 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
624     if( cpu&(X264_CPU_CACHELINE_32|X264_CPU_CACHELINE_64) )
625         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
626 #endif
627
628     if( !(cpu&X264_CPU_SSE2) )
629         return;
630
631     pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
632
633     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_sse2;
634     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_sse2;
635
636     pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
637     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
638     pf->plane_copy_deinterleave_yuyv = x264_plane_copy_deinterleave_yuyv_sse2;
639
640     if( cpu&X264_CPU_SSE2_IS_FAST )
641     {
642         pf->get_ref = get_ref_sse2;
643         pf->mc_luma = mc_luma_sse2;
644         pf->hpel_filter = x264_hpel_filter_sse2;
645     }
646
647     pf->integral_init4v = x264_integral_init4v_sse2;
648     pf->integral_init8v = x264_integral_init8v_sse2;
649     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
650     pf->store_interleave_chroma = x264_store_interleave_chroma_sse2;
651     pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
652     pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
653
654     if( cpu&X264_CPU_SSE2_IS_SLOW )
655         return;
656
657     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
658     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
659     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_sse2;
660     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_sse2;
661     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_sse2;
662     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_sse2;
663     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_sse2;
664     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_sse2;
665     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_sse2;
666
667     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse;
668     pf->weight = x264_mc_weight_wtab_sse2;
669
670     if( !(cpu&X264_CPU_STACK_MOD4) )
671         pf->mc_chroma = x264_mc_chroma_sse2;
672
673     if( !(cpu&X264_CPU_SSSE3) )
674         return;
675
676     pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
677     pf->plane_copy_swap = x264_plane_copy_swap_ssse3;
678     pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_ssse3;
679     pf->mbtree_propagate_list = x264_mbtree_propagate_list_ssse3;
680     pf->mbtree_fix8_pack      = x264_mbtree_fix8_pack_ssse3;
681     pf->mbtree_fix8_unpack    = x264_mbtree_fix8_unpack_ssse3;
682
683     if( !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SLOW_ATOM|X264_CPU_SLOW_PALIGNR)) )
684         pf->integral_init4v = x264_integral_init4v_ssse3;
685
686     if( !(cpu&X264_CPU_AVX) )
687         return;
688
689     pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx;
690     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_avx;
691     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_avx;
692     pf->plane_copy_interleave        = x264_plane_copy_interleave_avx;
693     pf->plane_copy_deinterleave      = x264_plane_copy_deinterleave_avx;
694     pf->plane_copy_deinterleave_yuyv = x264_plane_copy_deinterleave_yuyv_avx;
695     pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_avx;
696     pf->store_interleave_chroma      = x264_store_interleave_chroma_avx;
697     pf->copy[PIXEL_16x16]            = x264_mc_copy_w16_aligned_avx;
698
699     if( !(cpu&X264_CPU_STACK_MOD4) )
700         pf->mc_chroma = x264_mc_chroma_avx;
701
702     if( cpu&X264_CPU_XOP )
703         pf->frame_init_lowres_core = x264_frame_init_lowres_core_xop;
704
705     if( cpu&X264_CPU_AVX2 )
706     {
707         pf->mc_luma = mc_luma_avx2;
708         pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_avx2;
709         pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_avx2;
710     }
711
712     if( cpu&X264_CPU_AVX512 )
713     {
714         pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_avx512;
715     }
716 #else // !HIGH_BIT_DEPTH
717
718 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
719     if( cpu&X264_CPU_CACHELINE_32 )
720     {
721         pf->mc_luma = mc_luma_cache32_mmx2;
722         pf->get_ref = get_ref_cache32_mmx2;
723         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
724     }
725     else if( cpu&X264_CPU_CACHELINE_64 )
726     {
727         pf->mc_luma = mc_luma_cache64_mmx2;
728         pf->get_ref = get_ref_cache64_mmx2;
729         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
730     }
731 #endif
732
733     if( !(cpu&X264_CPU_SSE2) )
734         return;
735
736     pf->integral_init4v = x264_integral_init4v_sse2;
737     pf->integral_init8v = x264_integral_init8v_sse2;
738     pf->hpel_filter = x264_hpel_filter_sse2_amd;
739     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
740     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
741     pf->plane_copy_deinterleave_yuyv = x264_plane_copy_deinterleave_yuyv_sse2;
742     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_sse2;
743     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_sse2;
744     pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_sse2;
745
746     if( !(cpu&X264_CPU_SSE2_IS_SLOW) )
747     {
748         pf->weight = x264_mc_weight_wtab_sse2;
749         if( !(cpu&X264_CPU_SLOW_ATOM) )
750         {
751             pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
752             pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
753         }
754
755         pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse;
756         pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
757         pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
758         pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_sse2;
759         pf->avg[PIXEL_8x8]  = x264_pixel_avg_8x8_sse2;
760         pf->avg[PIXEL_8x4]  = x264_pixel_avg_8x4_sse2;
761         pf->hpel_filter = x264_hpel_filter_sse2;
762         pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
763         if( !(cpu&X264_CPU_STACK_MOD4) )
764             pf->mc_chroma = x264_mc_chroma_sse2;
765
766         if( cpu&X264_CPU_SSE2_IS_FAST )
767         {
768             pf->store_interleave_chroma = x264_store_interleave_chroma_sse2; // FIXME sse2fast? sse2medium?
769             pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
770             pf->mc_luma = mc_luma_sse2;
771             pf->get_ref = get_ref_sse2;
772             if( cpu&X264_CPU_CACHELINE_64 )
773             {
774                 pf->mc_luma = mc_luma_cache64_sse2;
775                 pf->get_ref = get_ref_cache64_sse2;
776             }
777         }
778     }
779
780     if( !(cpu&X264_CPU_SSSE3) )
781         return;
782
783     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_ssse3;
784     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_ssse3;
785     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_ssse3;
786     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_ssse3;
787     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_ssse3;
788     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_ssse3;
789     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_ssse3;
790     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_ssse3;
791     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_ssse3;
792     pf->plane_copy_swap = x264_plane_copy_swap_ssse3;
793     pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_ssse3;
794     pf->mbtree_propagate_list = x264_mbtree_propagate_list_ssse3;
795     pf->mbtree_fix8_pack      = x264_mbtree_fix8_pack_ssse3;
796     pf->mbtree_fix8_unpack    = x264_mbtree_fix8_unpack_ssse3;
797
798     if( !(cpu&X264_CPU_SLOW_PSHUFB) )
799     {
800         pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_ssse3;
801         pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_ssse3;
802         pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_ssse3;
803         pf->plane_copy_deinterleave_yuyv = x264_plane_copy_deinterleave_yuyv_ssse3;
804     }
805
806     if( !(cpu&X264_CPU_SLOW_PALIGNR) )
807     {
808 #if ARCH_X86_64
809         if( !(cpu&X264_CPU_SLOW_ATOM) ) /* The 64-bit version is slower, but the 32-bit version is faster? */
810 #endif
811             pf->hpel_filter = x264_hpel_filter_ssse3;
812         pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
813     }
814     if( !(cpu&X264_CPU_STACK_MOD4) )
815         pf->mc_chroma = x264_mc_chroma_ssse3;
816
817     if( cpu&X264_CPU_CACHELINE_64 )
818     {
819         if( !(cpu&X264_CPU_STACK_MOD4) )
820             pf->mc_chroma = x264_mc_chroma_cache64_ssse3;
821         pf->mc_luma = mc_luma_cache64_ssse3;
822         pf->get_ref = get_ref_cache64_ssse3;
823         if( cpu&X264_CPU_SLOW_ATOM )
824         {
825             pf->mc_luma = mc_luma_cache64_ssse3_atom;
826             pf->get_ref = get_ref_cache64_ssse3_atom;
827         }
828     }
829
830     pf->weight_cache = x264_weight_cache_ssse3;
831     pf->weight = x264_mc_weight_wtab_ssse3;
832
833     if( !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SLOW_ATOM|X264_CPU_SLOW_PALIGNR)) )
834         pf->integral_init4v = x264_integral_init4v_ssse3;
835
836     if( !(cpu&X264_CPU_SSE4) )
837         return;
838
839     pf->integral_init4h = x264_integral_init4h_sse4;
840     pf->integral_init8h = x264_integral_init8h_sse4;
841
842     if( !(cpu&X264_CPU_AVX) )
843         return;
844
845     pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx;
846     pf->integral_init8h = x264_integral_init8h_avx;
847     pf->hpel_filter = x264_hpel_filter_avx;
848
849     if( !(cpu&X264_CPU_STACK_MOD4) )
850         pf->mc_chroma = x264_mc_chroma_avx;
851
852     if( cpu&X264_CPU_XOP )
853         pf->frame_init_lowres_core = x264_frame_init_lowres_core_xop;
854
855     if( cpu&X264_CPU_AVX2 )
856     {
857         pf->hpel_filter = x264_hpel_filter_avx2;
858         pf->mc_chroma = x264_mc_chroma_avx2;
859         pf->weight = x264_mc_weight_wtab_avx2;
860         pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_avx2;
861         pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_avx2;
862         pf->integral_init8v = x264_integral_init8v_avx2;
863         pf->integral_init4v = x264_integral_init4v_avx2;
864         pf->integral_init8h = x264_integral_init8h_avx2;
865         pf->integral_init4h = x264_integral_init4h_avx2;
866         pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx2;
867         pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_avx2;
868     }
869
870     if( cpu&X264_CPU_AVX512 )
871     {
872         pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_avx512;
873         pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_avx512;
874     }
875 #endif // HIGH_BIT_DEPTH
876
877     if( !(cpu&X264_CPU_AVX) )
878         return;
879     pf->memcpy_aligned  = x264_memcpy_aligned_avx;
880     pf->memzero_aligned = x264_memzero_aligned_avx;
881     pf->plane_copy = x264_plane_copy_avx;
882     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx;
883     pf->mbtree_propagate_list = x264_mbtree_propagate_list_avx;
884
885     if( cpu&X264_CPU_FMA4 )
886         pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_fma4;
887
888     if( !(cpu&X264_CPU_AVX2) )
889         return;
890     pf->plane_copy_swap = x264_plane_copy_swap_avx2;
891     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_avx2;
892     pf->plane_copy_deinterleave_yuyv = x264_plane_copy_deinterleave_yuyv_avx2;
893     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_avx2;
894     pf->get_ref = get_ref_avx2;
895     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx2;
896     pf->mbtree_propagate_list = x264_mbtree_propagate_list_avx2;
897     pf->mbtree_fix8_pack      = x264_mbtree_fix8_pack_avx2;
898     pf->mbtree_fix8_unpack    = x264_mbtree_fix8_unpack_avx2;
899
900     if( !(cpu&X264_CPU_AVX512) )
901         return;
902     pf->memcpy_aligned = x264_memcpy_aligned_avx512;
903     pf->memzero_aligned = x264_memzero_aligned_avx512;
904     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx512;
905 #if ARCH_X86_64
906     pf->mbtree_propagate_list = x264_mbtree_propagate_list_avx512;
907 #endif
908 }