Add partially accelerated mc_luma
[x262.git] / common / x86 / mc-c.c
1 /*****************************************************************************
2  * mc-c.c: x86 motion compensation
3  *****************************************************************************
4  * Copyright (C) 2003-2014 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Jason Garrett-Glaser <darkshikari@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31
32 #include "common/common.h"
33 #include "mc.h"
34
35 #define DECL_SUF( func, args )\
36     void func##_mmx2 args;\
37     void func##_sse2 args;\
38     void func##_ssse3 args;\
39     void func##_avx2 args;
40
41 DECL_SUF( x264_pixel_avg_16x16, ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
42 DECL_SUF( x264_pixel_avg_16x8,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
43 DECL_SUF( x264_pixel_avg_8x16,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
44 DECL_SUF( x264_pixel_avg_8x8,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
45 DECL_SUF( x264_pixel_avg_8x4,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
46 DECL_SUF( x264_pixel_avg_4x16,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
47 DECL_SUF( x264_pixel_avg_4x8,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
48 DECL_SUF( x264_pixel_avg_4x4,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
49 DECL_SUF( x264_pixel_avg_4x2,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
50
51 #define MC_WEIGHT(w,type) \
52     void x264_mc_weight_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int );
53
54 #define MC_WEIGHT_OFFSET(w,type) \
55     void x264_mc_offsetadd_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ); \
56     void x264_mc_offsetsub_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ); \
57     MC_WEIGHT(w,type)
58
59 MC_WEIGHT_OFFSET( 4, mmx2 )
60 MC_WEIGHT_OFFSET( 8, mmx2 )
61 MC_WEIGHT_OFFSET( 12, mmx2 )
62 MC_WEIGHT_OFFSET( 16, mmx2 )
63 MC_WEIGHT_OFFSET( 20, mmx2 )
64 MC_WEIGHT_OFFSET( 12, sse2 )
65 MC_WEIGHT_OFFSET( 16, sse2 )
66 MC_WEIGHT_OFFSET( 20, sse2 )
67 #if HIGH_BIT_DEPTH
68 MC_WEIGHT_OFFSET( 8, sse2 )
69 #endif
70 MC_WEIGHT( 8, sse2  )
71 MC_WEIGHT( 4, ssse3 )
72 MC_WEIGHT( 8, ssse3 )
73 MC_WEIGHT( 12, ssse3 )
74 MC_WEIGHT( 16, ssse3 )
75 MC_WEIGHT( 20, ssse3 )
76 MC_WEIGHT( 8, avx2 )
77 MC_WEIGHT( 16, avx2 )
78 MC_WEIGHT( 20, avx2 )
79 #undef MC_OFFSET
80 #undef MC_WEIGHT
81
82 void x264_mc_copy_w4_mmx ( pixel *, intptr_t, pixel *, intptr_t, int );
83 void x264_mc_copy_w8_mmx ( pixel *, intptr_t, pixel *, intptr_t, int );
84 void x264_mc_copy_w8_sse ( pixel *, intptr_t, pixel *, intptr_t, int );
85 void x264_mc_copy_w16_mmx( pixel *, intptr_t, pixel *, intptr_t, int );
86 void x264_mc_copy_w16_sse( pixel *, intptr_t, pixel *, intptr_t, int );
87 void x264_mc_copy_w16_aligned_sse( pixel *, intptr_t, pixel *, intptr_t, int );
88 void x264_mc_copy_w16_avx( uint16_t *, intptr_t, uint16_t *, intptr_t, int );
89 void x264_mc_copy_w16_aligned_avx( uint16_t *, intptr_t, uint16_t *, intptr_t, int );
90 void x264_prefetch_fenc_420_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
91 void x264_prefetch_fenc_422_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
92 void x264_prefetch_ref_mmx2( pixel *, intptr_t, int );
93 void x264_plane_copy_core_mmx2( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
94 void x264_plane_copy_c( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
95 void x264_plane_copy_interleave_core_mmx2( pixel *dst,  intptr_t i_dst,
96                                            pixel *srcu, intptr_t i_srcu,
97                                            pixel *srcv, intptr_t i_srcv, int w, int h );
98 void x264_plane_copy_interleave_core_sse2( pixel *dst,  intptr_t i_dst,
99                                            pixel *srcu, intptr_t i_srcu,
100                                            pixel *srcv, intptr_t i_srcv, int w, int h );
101 void x264_plane_copy_interleave_core_avx( pixel *dst,  intptr_t i_dst,
102                                           pixel *srcu, intptr_t i_srcu,
103                                           pixel *srcv, intptr_t i_srcv, int w, int h );
104 void x264_plane_copy_interleave_c( pixel *dst,  intptr_t i_dst,
105                                    pixel *srcu, intptr_t i_srcu,
106                                    pixel *srcv, intptr_t i_srcv, int w, int h );
107 void x264_plane_copy_deinterleave_mmx( pixel *dstu, intptr_t i_dstu,
108                                        pixel *dstv, intptr_t i_dstv,
109                                        pixel *src,  intptr_t i_src, int w, int h );
110 void x264_plane_copy_deinterleave_sse2( pixel *dstu, intptr_t i_dstu,
111                                         pixel *dstv, intptr_t i_dstv,
112                                         pixel *src,  intptr_t i_src, int w, int h );
113 void x264_plane_copy_deinterleave_ssse3( uint8_t *dstu, intptr_t i_dstu,
114                                          uint8_t *dstv, intptr_t i_dstv,
115                                          uint8_t *src,  intptr_t i_src, int w, int h );
116 void x264_plane_copy_deinterleave_avx( uint16_t *dstu, intptr_t i_dstu,
117                                        uint16_t *dstv, intptr_t i_dstv,
118                                        uint16_t *src,  intptr_t i_src, int w, int h );
119 void x264_plane_copy_deinterleave_rgb_sse2 ( pixel *dsta, intptr_t i_dsta,
120                                              pixel *dstb, intptr_t i_dstb,
121                                              pixel *dstc, intptr_t i_dstc,
122                                              pixel *src,  intptr_t i_src, int pw, int w, int h );
123 void x264_plane_copy_deinterleave_rgb_ssse3( pixel *dsta, intptr_t i_dsta,
124                                              pixel *dstb, intptr_t i_dstb,
125                                              pixel *dstc, intptr_t i_dstc,
126                                              pixel *src,  intptr_t i_src, int pw, int w, int h );
127 void x264_plane_copy_deinterleave_v210_ssse3( uint16_t *dstu, intptr_t i_dstu,
128                                               uint16_t *dstv, intptr_t i_dstv,
129                                               uint32_t *src,  intptr_t i_src, int w, int h );
130 void x264_plane_copy_deinterleave_v210_avx  ( uint16_t *dstu, intptr_t i_dstu,
131                                               uint16_t *dstv, intptr_t i_dstv,
132                                               uint32_t *src,  intptr_t i_src, int w, int h );
133 void x264_plane_copy_deinterleave_v210_avx2 ( uint16_t *dstu, intptr_t i_dstu,
134                                               uint16_t *dstv, intptr_t i_dstv,
135                                               uint32_t *src,  intptr_t i_src, int w, int h );
136 void x264_store_interleave_chroma_mmx2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
137 void x264_store_interleave_chroma_sse2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
138 void x264_store_interleave_chroma_avx ( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
139 void x264_load_deinterleave_chroma_fenc_mmx ( pixel *dst, pixel *src, intptr_t i_src, int height );
140 void x264_load_deinterleave_chroma_fenc_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
141 void x264_load_deinterleave_chroma_fenc_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
142 void x264_load_deinterleave_chroma_fenc_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
143 void x264_load_deinterleave_chroma_fdec_mmx ( pixel *dst, pixel *src, intptr_t i_src, int height );
144 void x264_load_deinterleave_chroma_fdec_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
145 void x264_load_deinterleave_chroma_fdec_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
146 void x264_load_deinterleave_chroma_fdec_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
147 void *x264_memcpy_aligned_mmx( void *dst, const void *src, size_t n );
148 void *x264_memcpy_aligned_sse( void *dst, const void *src, size_t n );
149 void x264_memzero_aligned_mmx( void *dst, size_t n );
150 void x264_memzero_aligned_sse( void *dst, size_t n );
151 void x264_memzero_aligned_avx( void *dst, size_t n );
152 void x264_integral_init4h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
153 void x264_integral_init4h_avx2( uint16_t *sum, uint8_t *pix, intptr_t stride );
154 void x264_integral_init8h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
155 void x264_integral_init8h_avx ( uint16_t *sum, uint8_t *pix, intptr_t stride );
156 void x264_integral_init8h_avx2( uint16_t *sum, uint8_t *pix, intptr_t stride );
157 void x264_integral_init4v_mmx  ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
158 void x264_integral_init4v_sse2 ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
159 void x264_integral_init4v_ssse3( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
160 void x264_integral_init4v_avx2( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
161 void x264_integral_init8v_mmx ( uint16_t *sum8, intptr_t stride );
162 void x264_integral_init8v_sse2( uint16_t *sum8, intptr_t stride );
163 void x264_integral_init8v_avx2( uint16_t *sum8, intptr_t stride );
164 void x264_mbtree_propagate_cost_sse2( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
165                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
166 void x264_mbtree_propagate_cost_avx ( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
167                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
168 void x264_mbtree_propagate_cost_fma4( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
169                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
170 void x264_mbtree_propagate_cost_avx2_fma3( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
171                                            uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
172
173 #define MC_CHROMA(cpu)\
174 void x264_mc_chroma_##cpu( pixel *dstu, pixel *dstv, intptr_t i_dst, pixel *src, intptr_t i_src,\
175                            int dx, int dy, int i_width, int i_height );
176 MC_CHROMA(mmx2)
177 MC_CHROMA(sse2)
178 MC_CHROMA(ssse3)
179 MC_CHROMA(ssse3_cache64)
180 MC_CHROMA(avx)
181 MC_CHROMA(avx2)
182
183 #define LOWRES(cpu)\
184 void x264_frame_init_lowres_core_##cpu( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,\
185                                         intptr_t src_stride, intptr_t dst_stride, int width, int height );
186 LOWRES(mmx2)
187 LOWRES(cache32_mmx2)
188 LOWRES(sse2)
189 LOWRES(ssse3)
190 LOWRES(avx)
191 LOWRES(xop)
192 LOWRES(avx2)
193
194 #define PIXEL_AVG_W(width,cpu)\
195 void x264_pixel_avg2_w##width##_##cpu( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t );
196 /* This declares some functions that don't exist, but that isn't a problem. */
197 #define PIXEL_AVG_WALL(cpu)\
198 PIXEL_AVG_W(4,cpu); PIXEL_AVG_W(8,cpu); PIXEL_AVG_W(10,cpu); PIXEL_AVG_W(12,cpu); PIXEL_AVG_W(16,cpu); PIXEL_AVG_W(18,cpu); PIXEL_AVG_W(20,cpu);
199
200 PIXEL_AVG_WALL(mmx2)
201 PIXEL_AVG_WALL(cache32_mmx2)
202 PIXEL_AVG_WALL(cache64_mmx2)
203 PIXEL_AVG_WALL(cache64_sse2)
204 PIXEL_AVG_WALL(sse2)
205 PIXEL_AVG_WALL(cache64_ssse3)
206 PIXEL_AVG_WALL(avx2)
207
208 #define PIXEL_AVG_WTAB(instr, name1, name2, name3, name4, name5)\
209 static void (* const x264_pixel_avg_wtab_##instr[6])( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t ) =\
210 {\
211     NULL,\
212     x264_pixel_avg2_w4_##name1,\
213     x264_pixel_avg2_w8_##name2,\
214     x264_pixel_avg2_w12_##name3,\
215     x264_pixel_avg2_w16_##name4,\
216     x264_pixel_avg2_w20_##name5,\
217 };
218
219 #if HIGH_BIT_DEPTH
220 /* we can replace w12/w20 with w10/w18 as only 9/17 pixels in fact are important */
221 #define x264_pixel_avg2_w12_mmx2       x264_pixel_avg2_w10_mmx2
222 #define x264_pixel_avg2_w20_mmx2       x264_pixel_avg2_w18_mmx2
223 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w10_sse2
224 #define x264_pixel_avg2_w20_sse2         x264_pixel_avg2_w18_sse2
225 #define x264_pixel_avg2_w12_avx2         x264_pixel_avg2_w16_avx2
226 #define x264_pixel_avg2_w20_avx2         x264_pixel_avg2_w18_avx2
227 #else
228 /* w16 sse2 is faster than w12 mmx as long as the cacheline issue is resolved */
229 #define x264_pixel_avg2_w12_cache64_ssse3 x264_pixel_avg2_w16_cache64_ssse3
230 #define x264_pixel_avg2_w12_cache64_sse2 x264_pixel_avg2_w16_cache64_sse2
231 #define x264_pixel_avg2_w12_sse3         x264_pixel_avg2_w16_sse3
232 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w16_sse2
233 #endif // HIGH_BIT_DEPTH
234
235 PIXEL_AVG_WTAB(mmx2, mmx2, mmx2, mmx2, mmx2, mmx2)
236 #if HIGH_BIT_DEPTH
237 PIXEL_AVG_WTAB(sse2, mmx2, sse2, sse2, sse2, sse2)
238 PIXEL_AVG_WTAB(avx2, mmx2, sse2, avx2, avx2, avx2)
239 #else // !HIGH_BIT_DEPTH
240 #if ARCH_X86
241 PIXEL_AVG_WTAB(cache32_mmx2, mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2)
242 PIXEL_AVG_WTAB(cache64_mmx2, mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2)
243 #endif
244 PIXEL_AVG_WTAB(sse2, mmx2, mmx2, sse2, sse2, sse2)
245 PIXEL_AVG_WTAB(cache64_sse2, mmx2, cache64_mmx2, cache64_sse2, cache64_sse2, cache64_sse2)
246 PIXEL_AVG_WTAB(cache64_ssse3, mmx2, cache64_mmx2, cache64_ssse3, cache64_ssse3, cache64_sse2)
247 PIXEL_AVG_WTAB(cache64_ssse3_atom, mmx2, mmx2, cache64_ssse3, cache64_ssse3, sse2)
248 PIXEL_AVG_WTAB(avx2, mmx2, mmx2, sse2, sse2, avx2)
249 #endif // HIGH_BIT_DEPTH
250
251 #define MC_COPY_WTAB(instr, name1, name2, name3)\
252 static void (* const x264_mc_copy_wtab_##instr[5])( pixel *, intptr_t, pixel *, intptr_t, int ) =\
253 {\
254     NULL,\
255     x264_mc_copy_w4_##name1,\
256     x264_mc_copy_w8_##name2,\
257     NULL,\
258     x264_mc_copy_w16_##name3,\
259 };
260
261 MC_COPY_WTAB(mmx,mmx,mmx,mmx)
262 #if HIGH_BIT_DEPTH
263 MC_COPY_WTAB(sse,mmx,sse,sse)
264 MC_COPY_WTAB(avx,mmx,sse,avx)
265 #else
266 MC_COPY_WTAB(sse,mmx,mmx,sse)
267 #endif
268
269 #define MC_WEIGHT_WTAB(function, instr, name1, name2, w12version)\
270     static void (* x264_mc_##function##_wtab_##instr[6])( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ) =\
271 {\
272     x264_mc_##function##_w4_##name1,\
273     x264_mc_##function##_w4_##name1,\
274     x264_mc_##function##_w8_##name2,\
275     x264_mc_##function##_w##w12version##_##instr,\
276     x264_mc_##function##_w16_##instr,\
277     x264_mc_##function##_w20_##instr,\
278 };
279
280 #if HIGH_BIT_DEPTH
281 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
282 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
283 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
284 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,12)
285 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,sse2,16)
286 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,sse2,16)
287
288 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
289 {
290     if( w->i_scale == 1<<w->i_denom )
291     {
292         if( w->i_offset < 0 )
293             w->weightfn = h->mc.offsetsub;
294         else
295             w->weightfn = h->mc.offsetadd;
296         for( int i = 0; i < 8; i++ )
297             w->cachea[i] = abs(w->i_offset<<(BIT_DEPTH-8));
298         return;
299     }
300     w->weightfn = h->mc.weight;
301     int den1 = 1<<w->i_denom;
302     int den2 = w->i_scale<<1;
303     int den3 = 1+(w->i_offset<<(BIT_DEPTH-8+1));
304     for( int i = 0; i < 8; i++ )
305     {
306         w->cachea[i] = den1;
307         w->cacheb[i] = i&1 ? den3 : den2;
308     }
309 }
310 #else
311 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
312 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
313 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
314 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,16)
315 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,mmx2,16)
316 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,mmx2,16)
317 MC_WEIGHT_WTAB(weight,ssse3,ssse3,ssse3,16)
318 MC_WEIGHT_WTAB(weight,avx2,ssse3,avx2,16)
319
320 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
321 {
322     int i;
323     int16_t den1;
324
325     if( w->i_scale == 1<<w->i_denom )
326     {
327         if( w->i_offset < 0 )
328             w->weightfn = h->mc.offsetsub;
329         else
330             w->weightfn = h->mc.offsetadd;
331         memset( w->cachea, abs(w->i_offset), sizeof(w->cachea) );
332         return;
333     }
334     w->weightfn = h->mc.weight;
335     den1 = 1 << (w->i_denom - 1) | w->i_offset << w->i_denom;
336     for( i = 0; i < 8; i++ )
337     {
338         w->cachea[i] = w->i_scale;
339         w->cacheb[i] = den1;
340     }
341 }
342
343 static void x264_weight_cache_ssse3( x264_t *h, x264_weight_t *w )
344 {
345     int i, den1;
346     if( w->i_scale == 1<<w->i_denom )
347     {
348         if( w->i_offset < 0 )
349             w->weightfn = h->mc.offsetsub;
350         else
351             w->weightfn = h->mc.offsetadd;
352
353         memset( w->cachea, abs( w->i_offset ), sizeof(w->cachea) );
354         return;
355     }
356     w->weightfn = h->mc.weight;
357     den1 = w->i_scale << (8 - w->i_denom);
358     for( i = 0; i < 8; i++ )
359     {
360         w->cachea[i] = den1;
361         w->cacheb[i] = w->i_offset;
362     }
363 }
364 #endif // !HIGH_BIT_DEPTH
365
366 static const uint8_t hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
367 static const uint8_t hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
368
369 #define MC_LUMA(name,instr1,instr2)\
370 static void mc_luma_##name( pixel *dst,    intptr_t i_dst_stride,\
371                             pixel *src[4], intptr_t i_src_stride,\
372                             int mvx, int mvy,\
373                             int i_width, int i_height, const x264_weight_t *weight )\
374 {\
375     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
376     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
377     pixel *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
378     if( qpel_idx & 5 ) /* qpel interpolation needed */\
379     {\
380         pixel *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
381         x264_pixel_avg_wtab_##instr1[i_width>>2](\
382                 dst, i_dst_stride, src1, i_src_stride,\
383                 src2, i_height );\
384         if( weight->weightfn )\
385             weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );\
386     }\
387     else if( weight->weightfn )\
388         weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );\
389     else\
390         x264_mc_copy_wtab_##instr2[i_width>>2](dst, i_dst_stride, src1, i_src_stride, i_height );\
391 }
392
393 MC_LUMA(mmx2,mmx2,mmx)
394 MC_LUMA(sse2,sse2,sse)
395 #if HIGH_BIT_DEPTH
396 MC_LUMA(avx2,avx2,avx)
397 #else
398 #if ARCH_X86
399 MC_LUMA(cache32_mmx2,cache32_mmx2,mmx)
400 MC_LUMA(cache64_mmx2,cache64_mmx2,mmx)
401 #endif
402 MC_LUMA(cache64_sse2,cache64_sse2,sse)
403 MC_LUMA(cache64_ssse3,cache64_ssse3,sse)
404 MC_LUMA(cache64_ssse3_atom,cache64_ssse3_atom,sse)
405 #endif // !HIGH_BIT_DEPTH
406
407 #define MC_LUMA_MPEG2(name,instr1,instr2)\
408 static void mc_luma_mpeg2_##name( pixel *dst,    intptr_t i_dst_stride,\
409                                   pixel *src[4], intptr_t i_src_stride,\
410                                   int mvx, int mvy,\
411                                   int i_width, int i_height, const x264_weight_t *weight )\
412 {\
413     mvx >>= 1;\
414     mvy >>= 1;\
415     int offset = (mvy>>1)*i_src_stride + (mvx>>1);\
416     pixel *src1 = src[0] + offset;\
417     pixel *srcp = src1 + i_src_stride;\
418     if( !((mvx|mvy)&1) ) /* fullpel */ \
419         x264_mc_copy_wtab_##instr2[i_width>>2](dst, i_dst_stride, src1, i_src_stride, i_height );\
420     else if( (mvx&mvy)&1 ) /* centre hpel */\
421     {\
422         for( int y = 0; y < i_height; y++ )\
423         {\
424             for( int x = 0; x < i_width; x++ )\
425                 dst[x] = ( src1[x] + src1[x+1] + srcp[x] + srcp[x+1] + 2 ) >> 2;\
426             dst  += i_dst_stride;\
427             src1  = srcp;\
428             srcp += i_src_stride;\
429         }\
430     }\
431     else /* horizontal/vertical hpel positions */\
432     {\
433         pixel *src2 = src1 + (mvy&1)*i_src_stride + (mvx&1);\
434         x264_pixel_avg_wtab_##name[i_width>>2](\
435                 dst,  i_dst_stride, src1, i_src_stride,\
436                 src2, i_height );\
437     }\
438 }\
439
440 MC_LUMA_MPEG2(mmx2,mmx2,mmx)
441 MC_LUMA_MPEG2(sse2,sse2,sse)
442
443 #define GET_REF(name)\
444 static pixel *get_ref_##name( pixel *dst,   intptr_t *i_dst_stride,\
445                               pixel *src[4], intptr_t i_src_stride,\
446                               int mvx, int mvy,\
447                               int i_width, int i_height, const x264_weight_t *weight )\
448 {\
449     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
450     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
451     pixel *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
452     if( qpel_idx & 5 ) /* qpel interpolation needed */\
453     {\
454         pixel *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
455         x264_pixel_avg_wtab_##name[i_width>>2](\
456                 dst, *i_dst_stride, src1, i_src_stride,\
457                 src2, i_height );\
458         if( weight->weightfn )\
459             weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );\
460         return dst;\
461     }\
462     else if( weight->weightfn )\
463     {\
464         weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );\
465         return dst;\
466     }\
467     else\
468     {\
469         *i_dst_stride = i_src_stride;\
470         return src1;\
471     }\
472 }
473
474 GET_REF(mmx2)
475 GET_REF(sse2)
476 GET_REF(avx2)
477 #if !HIGH_BIT_DEPTH
478 #if ARCH_X86
479 GET_REF(cache32_mmx2)
480 GET_REF(cache64_mmx2)
481 #endif
482 GET_REF(cache64_sse2)
483 GET_REF(cache64_ssse3)
484 GET_REF(cache64_ssse3_atom)
485 #endif // !HIGH_BIT_DEPTH
486
487 #define GET_REF_MPEG2(name)\
488 static pixel *get_ref_mpeg2_##name( pixel *dst,   intptr_t *i_dst_stride,\
489                                     pixel *src[4], intptr_t i_src_stride,\
490                                     int mvx, int mvy,\
491                                     int i_width, int i_height, const x264_weight_t *weight )\
492 {\
493     mvx >>= 1;\
494     mvy >>= 1;\
495     pixel *src1 = src[0] + (mvy>>1)*i_src_stride + (mvx>>1);\
496     pixel *dst_bak = dst;\
497     if( !((mvx|mvy)&1) )\
498     {\
499         *i_dst_stride = i_src_stride;\
500         return src1;\
501     }\
502     else if( (mvx&mvy)&1 ) /* centre hpel positions */\
503     {\
504         pixel *srcp = src1 + i_src_stride;\
505         for( int y = 0; y < i_height; y++ )\
506         {\
507             for( int x = 0; x < i_width; x++ )\
508                 dst[x] = ( src1[x] + src1[x+1] + srcp[x] + srcp[x+1] + 2 ) >> 2;\
509             dst  += *i_dst_stride;\
510             src1  = srcp;\
511             srcp += i_src_stride;\
512         }\
513     }\
514     else /* horizontal/vertical hpel positions */\
515     {\
516         pixel *src2 = src1 + (mvy&1)*i_src_stride + (mvx&1);\
517         x264_pixel_avg_wtab_##name[i_width>>2](\
518                 dst, *i_dst_stride, src1, i_src_stride,\
519                 src2, i_height );\
520     }\
521     return dst_bak;\
522 }
523
524 GET_REF_MPEG2(mmx2)
525 GET_REF_MPEG2(sse2)
526 GET_REF_MPEG2(avx2)
527
528 #define HPEL(align, cpu, cpuv, cpuc, cpuh)\
529 void x264_hpel_filter_v_##cpuv( pixel *dst, pixel *src, int16_t *buf, intptr_t stride, intptr_t width);\
530 void x264_hpel_filter_c_##cpuc( pixel *dst, int16_t *buf, intptr_t width );\
531 void x264_hpel_filter_h_##cpuh( pixel *dst, pixel *src, intptr_t width );\
532 static void x264_hpel_filter_##cpu( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,\
533                                     intptr_t stride, int width, int height, int16_t *buf )\
534 {\
535     intptr_t realign = (intptr_t)src & (align-1);\
536     src -= realign;\
537     dstv -= realign;\
538     dstc -= realign;\
539     dsth -= realign;\
540     width += realign;\
541     while( height-- )\
542     {\
543         x264_hpel_filter_v_##cpuv( dstv, src, buf+16, stride, width );\
544         x264_hpel_filter_c_##cpuc( dstc, buf+16, width );\
545         x264_hpel_filter_h_##cpuh( dsth, src, width );\
546         dsth += stride;\
547         dstv += stride;\
548         dstc += stride;\
549         src  += stride;\
550     }\
551     x264_sfence();\
552 }
553
554 HPEL(8, mmx2, mmx2, mmx2, mmx2)
555 #if HIGH_BIT_DEPTH
556 HPEL(16, sse2, sse2, sse2, sse2)
557 #else // !HIGH_BIT_DEPTH
558 HPEL(16, sse2_amd, mmx2, mmx2, sse2)
559 #if ARCH_X86_64
560 void x264_hpel_filter_sse2 ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
561 void x264_hpel_filter_ssse3( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
562 void x264_hpel_filter_avx  ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
563 void x264_hpel_filter_avx2 ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
564 #else
565 HPEL(16, sse2, sse2, sse2, sse2)
566 HPEL(16, ssse3, ssse3, ssse3, ssse3)
567 HPEL(16, avx, avx, avx, avx)
568 HPEL(32, avx2, avx2, avx2, avx2)
569 #endif
570 #endif // HIGH_BIT_DEPTH
571
572 static void x264_plane_copy_mmx2( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h )
573 {
574     int c_w = 16/sizeof(pixel) - 1;
575     if( w < 256 ) { // tiny resolutions don't want non-temporal hints. dunno the exact threshold.
576         x264_plane_copy_c( dst, i_dst, src, i_src, w, h );
577     } else if( !(w&c_w) ) {
578         x264_plane_copy_core_mmx2( dst, i_dst, src, i_src, w, h );
579     } else if( i_src > 0 ) {
580         // have to use plain memcpy on the last line (in memory order) to avoid overreading src
581         x264_plane_copy_core_mmx2( dst, i_dst, src, i_src, (w+c_w)&~c_w, h-1 );
582         memcpy( dst+i_dst*(h-1), src+i_src*(h-1), w*sizeof(pixel) );
583     } else {
584         memcpy( dst, src, w*sizeof(pixel) );
585         x264_plane_copy_core_mmx2( dst+i_dst, i_dst, src+i_src, i_src, (w+c_w)&~c_w, h-1 );
586     }
587 }
588
589 #define PLANE_INTERLEAVE(cpu) \
590 static void x264_plane_copy_interleave_##cpu( pixel *dst,  intptr_t i_dst,\
591                                               pixel *srcu, intptr_t i_srcu,\
592                                               pixel *srcv, intptr_t i_srcv, int w, int h )\
593 {\
594     if( !(w&15) ) {\
595         x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
596     } else if( w < 16 || (i_srcu ^ i_srcv) ) {\
597         x264_plane_copy_interleave_c( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
598     } else if( i_srcu > 0 ) {\
599         x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, (w+15)&~15, h-1 );\
600         x264_plane_copy_interleave_c( dst+i_dst*(h-1), 0, srcu+i_srcu*(h-1), 0, srcv+i_srcv*(h-1), 0, w, 1 );\
601     } else {\
602         x264_plane_copy_interleave_c( dst, 0, srcu, 0, srcv, 0, w, 1 );\
603         x264_plane_copy_interleave_core_##cpu( dst+i_dst, i_dst, srcu+i_srcu, i_srcu, srcv+i_srcv, i_srcv, (w+15)&~15, h-1 );\
604     }\
605 }
606
607 PLANE_INTERLEAVE(mmx2)
608 PLANE_INTERLEAVE(sse2)
609 #if HIGH_BIT_DEPTH
610 PLANE_INTERLEAVE(avx)
611 #endif
612
613 #if HAVE_X86_INLINE_ASM
614 #define CLIP_ADD(s,x)\
615 do\
616 {\
617     int temp;\
618     asm("movd       %0, %%xmm0     \n"\
619         "movd       %2, %%xmm1     \n"\
620         "paddsw %%xmm1, %%xmm0     \n"\
621         "movd   %%xmm0, %1         \n"\
622         :"+m"(s), "=&r"(temp)\
623         :"m"(x)\
624     );\
625     s = temp;\
626 } while(0)
627
628 #define CLIP_ADD2(s,x)\
629 do\
630 {\
631     asm("movd       %0, %%xmm0     \n"\
632         "movd       %1, %%xmm1     \n"\
633         "paddsw %%xmm1, %%xmm0     \n"\
634         "movd   %%xmm0, %0         \n"\
635         :"+m"(M32(s))\
636         :"m"(M32(x))\
637     );\
638 } while(0)
639 #else
640 #define CLIP_ADD(s,x) (s) = X264_MIN((s)+(x),(1<<15)-1)
641 #define CLIP_ADD2(s,x)\
642 do\
643 {\
644     CLIP_ADD((s)[0], (x)[0]);\
645     CLIP_ADD((s)[1], (x)[1]);\
646 } while(0)
647 #endif
648
649 #define PROPAGATE_LIST(cpu)\
650 void x264_mbtree_propagate_list_internal_##cpu( int16_t (*mvs)[2], int16_t *propagate_amount,\
651                                                 uint16_t *lowres_costs, int16_t *output,\
652                                                 int bipred_weight, int mb_y, int len );\
653 \
654 static void x264_mbtree_propagate_list_##cpu( x264_t *h, uint16_t *ref_costs, int16_t (*mvs)[2],\
655                                               int16_t *propagate_amount, uint16_t *lowres_costs,\
656                                               int bipred_weight, int mb_y, int len, int list )\
657 {\
658     int16_t *current = h->scratch_buffer2;\
659 \
660     x264_mbtree_propagate_list_internal_##cpu( mvs, propagate_amount, lowres_costs,\
661                                                current, bipred_weight, mb_y, len );\
662 \
663     unsigned stride = h->mb.i_mb_stride;\
664     unsigned width = h->mb.i_mb_width;\
665     unsigned height = h->mb.i_mb_height;\
666 \
667     for( unsigned i = 0; i < len; current += 32 )\
668     {\
669         int end = X264_MIN( i+8, len );\
670         for( ; i < end; i++, current += 2 )\
671         {\
672             if( !(lowres_costs[i] & (1 << (list+LOWRES_COST_SHIFT))) )\
673                 continue;\
674 \
675             unsigned mbx = current[0];\
676             unsigned mby = current[1];\
677             unsigned idx0 = mbx + mby * stride;\
678             unsigned idx2 = idx0 + stride;\
679 \
680             /* Shortcut for the simple/common case of zero MV */\
681             if( !M32( mvs[i] ) )\
682             {\
683                 CLIP_ADD( ref_costs[idx0], current[16] );\
684                 continue;\
685             }\
686 \
687             if( mbx < width-1 && mby < height-1 )\
688             {\
689                 CLIP_ADD2( ref_costs+idx0, current+16 );\
690                 CLIP_ADD2( ref_costs+idx2, current+32 );\
691             }\
692             else\
693             {\
694                 /* Note: this takes advantage of unsigned representation to\
695                  * catch negative mbx/mby. */\
696                 if( mby < height )\
697                 {\
698                     if( mbx < width )\
699                         CLIP_ADD( ref_costs[idx0+0], current[16] );\
700                     if( mbx+1 < width )\
701                         CLIP_ADD( ref_costs[idx0+1], current[17] );\
702                 }\
703                 if( mby+1 < height )\
704                 {\
705                     if( mbx < width )\
706                         CLIP_ADD( ref_costs[idx2+0], current[32] );\
707                     if( mbx+1 < width )\
708                         CLIP_ADD( ref_costs[idx2+1], current[33] );\
709                 }\
710             }\
711         }\
712     }\
713 }
714
715 PROPAGATE_LIST(ssse3)
716 PROPAGATE_LIST(avx)
717 #undef CLIP_ADD
718 #undef CLIP_ADD2
719
720 void x264_mc_init_mmx( int cpu, x264_mc_functions_t *pf )
721 {
722     if( !(cpu&X264_CPU_MMX) )
723         return;
724
725     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_mmx;
726     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_mmx;
727
728     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_mmx;
729
730     pf->copy_16x16_unaligned = x264_mc_copy_w16_mmx;
731     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_mmx;
732     pf->copy[PIXEL_8x8]   = x264_mc_copy_w8_mmx;
733     pf->copy[PIXEL_4x4]   = x264_mc_copy_w4_mmx;
734     pf->memcpy_aligned  = x264_memcpy_aligned_mmx;
735     pf->memzero_aligned = x264_memzero_aligned_mmx;
736     pf->integral_init4v = x264_integral_init4v_mmx;
737     pf->integral_init8v = x264_integral_init8v_mmx;
738
739     if( !(cpu&X264_CPU_MMX2) )
740         return;
741
742     pf->prefetch_fenc_420 = x264_prefetch_fenc_420_mmx2;
743     pf->prefetch_fenc_422 = x264_prefetch_fenc_422_mmx2;
744     pf->prefetch_ref  = x264_prefetch_ref_mmx2;
745
746     pf->plane_copy = x264_plane_copy_mmx2;
747     pf->plane_copy_interleave = x264_plane_copy_interleave_mmx2;
748     pf->store_interleave_chroma = x264_store_interleave_chroma_mmx2;
749
750     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_mmx2;
751     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_mmx2;
752     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_mmx2;
753     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_mmx2;
754     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_mmx2;
755     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_mmx2;
756     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_mmx2;
757     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_mmx2;
758     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_mmx2;
759
760     pf->mc_luma = mc_luma_mmx2;
761     pf->get_ref = get_ref_mmx2;
762     pf->mc_chroma = x264_mc_chroma_mmx2;
763     pf->hpel_filter = x264_hpel_filter_mmx2;
764     pf->weight = x264_mc_weight_wtab_mmx2;
765     pf->weight_cache = x264_weight_cache_mmx2;
766     pf->offsetadd = x264_mc_offsetadd_wtab_mmx2;
767     pf->offsetsub = x264_mc_offsetsub_wtab_mmx2;
768
769     pf->frame_init_lowres_core = x264_frame_init_lowres_core_mmx2;
770
771     if( cpu&X264_CPU_SSE )
772     {
773         pf->memcpy_aligned  = x264_memcpy_aligned_sse;
774         pf->memzero_aligned = x264_memzero_aligned_sse;
775     }
776
777 #if HIGH_BIT_DEPTH
778 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
779     if( cpu&(X264_CPU_CACHELINE_32|X264_CPU_CACHELINE_64) )
780         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
781 #endif
782
783     if( !(cpu&X264_CPU_SSE2) )
784         return;
785
786     pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
787
788     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_sse2;
789     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_sse2;
790
791     pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
792     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
793
794     if( cpu&X264_CPU_SSE2_IS_FAST )
795     {
796         pf->get_ref = get_ref_sse2;
797         pf->mc_luma = mc_luma_sse2;
798         pf->hpel_filter = x264_hpel_filter_sse2;
799     }
800
801     pf->integral_init4v = x264_integral_init4v_sse2;
802     pf->integral_init8v = x264_integral_init8v_sse2;
803     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
804     pf->store_interleave_chroma = x264_store_interleave_chroma_sse2;
805     pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
806     pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
807
808     if( cpu&X264_CPU_SSE2_IS_SLOW )
809         return;
810
811     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
812     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
813     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_sse2;
814     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_sse2;
815     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_sse2;
816     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_sse2;
817     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_sse2;
818     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_sse2;
819     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_sse2;
820
821     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse;
822     pf->weight = x264_mc_weight_wtab_sse2;
823
824     if( !(cpu&X264_CPU_STACK_MOD4) )
825         pf->mc_chroma = x264_mc_chroma_sse2;
826
827     if( !(cpu&X264_CPU_SSSE3) )
828         return;
829
830     pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
831     pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_ssse3;
832     pf->mbtree_propagate_list = x264_mbtree_propagate_list_ssse3;
833
834     if( !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SLOW_ATOM|X264_CPU_SLOW_PALIGNR)) )
835         pf->integral_init4v = x264_integral_init4v_ssse3;
836
837     if( !(cpu&X264_CPU_AVX) )
838         return;
839
840     pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx;
841     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_avx;
842     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_avx;
843     pf->plane_copy_interleave        = x264_plane_copy_interleave_avx;
844     pf->plane_copy_deinterleave      = x264_plane_copy_deinterleave_avx;
845     pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_avx;
846     pf->store_interleave_chroma      = x264_store_interleave_chroma_avx;
847     pf->copy[PIXEL_16x16]            = x264_mc_copy_w16_aligned_avx;
848
849     if( !(cpu&X264_CPU_STACK_MOD4) )
850         pf->mc_chroma = x264_mc_chroma_avx;
851
852     if( cpu&X264_CPU_XOP )
853         pf->frame_init_lowres_core = x264_frame_init_lowres_core_xop;
854
855     if( cpu&X264_CPU_AVX2 )
856     {
857         pf->mc_luma = mc_luma_avx2;
858         pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_avx2;
859     }
860 #else // !HIGH_BIT_DEPTH
861
862 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
863     if( cpu&X264_CPU_CACHELINE_32 )
864     {
865         pf->mc_luma = mc_luma_cache32_mmx2;
866         pf->get_ref = get_ref_cache32_mmx2;
867         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
868     }
869     else if( cpu&X264_CPU_CACHELINE_64 )
870     {
871         pf->mc_luma = mc_luma_cache64_mmx2;
872         pf->get_ref = get_ref_cache64_mmx2;
873         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
874     }
875 #endif
876
877     if( !(cpu&X264_CPU_SSE2) )
878         return;
879
880     pf->integral_init4v = x264_integral_init4v_sse2;
881     pf->integral_init8v = x264_integral_init8v_sse2;
882     pf->hpel_filter = x264_hpel_filter_sse2_amd;
883     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
884     pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_sse2;
885
886     if( !(cpu&X264_CPU_SSE2_IS_SLOW) )
887     {
888         pf->weight = x264_mc_weight_wtab_sse2;
889         if( !(cpu&X264_CPU_SLOW_ATOM) )
890         {
891             pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
892             pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
893         }
894
895         pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse;
896         pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
897         pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
898         pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_sse2;
899         pf->avg[PIXEL_8x8]  = x264_pixel_avg_8x8_sse2;
900         pf->avg[PIXEL_8x4]  = x264_pixel_avg_8x4_sse2;
901         pf->hpel_filter = x264_hpel_filter_sse2;
902         pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
903         if( !(cpu&X264_CPU_STACK_MOD4) )
904             pf->mc_chroma = x264_mc_chroma_sse2;
905
906         if( cpu&X264_CPU_SSE2_IS_FAST )
907         {
908             pf->store_interleave_chroma = x264_store_interleave_chroma_sse2; // FIXME sse2fast? sse2medium?
909             pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_sse2;
910             pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_sse2;
911             pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
912             pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
913             pf->mc_luma = mc_luma_sse2;
914             pf->get_ref = get_ref_sse2;
915             if( cpu&X264_CPU_CACHELINE_64 )
916             {
917                 pf->mc_luma = mc_luma_cache64_sse2;
918                 pf->get_ref = get_ref_cache64_sse2;
919             }
920         }
921     }
922
923     if( !(cpu&X264_CPU_SSSE3) )
924         return;
925
926     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_ssse3;
927     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_ssse3;
928     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_ssse3;
929     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_ssse3;
930     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_ssse3;
931     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_ssse3;
932     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_ssse3;
933     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_ssse3;
934     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_ssse3;
935     pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_ssse3;
936     pf->mbtree_propagate_list = x264_mbtree_propagate_list_ssse3;
937
938     if( !(cpu&X264_CPU_SLOW_PSHUFB) )
939     {
940         pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_ssse3;
941         pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_ssse3;
942         pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_ssse3;
943     }
944
945     if( !(cpu&X264_CPU_SLOW_PALIGNR) )
946     {
947 #if ARCH_X86_64
948         if( !(cpu&X264_CPU_SLOW_ATOM) ) /* The 64-bit version is slower, but the 32-bit version is faster? */
949 #endif
950             pf->hpel_filter = x264_hpel_filter_ssse3;
951         pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
952     }
953     if( !(cpu&X264_CPU_STACK_MOD4) )
954         pf->mc_chroma = x264_mc_chroma_ssse3;
955
956     if( cpu&X264_CPU_CACHELINE_64 )
957     {
958         if( !(cpu&X264_CPU_STACK_MOD4) )
959             pf->mc_chroma = x264_mc_chroma_ssse3_cache64;
960         pf->mc_luma = mc_luma_cache64_ssse3;
961         pf->get_ref = get_ref_cache64_ssse3;
962         if( cpu&X264_CPU_SLOW_ATOM )
963         {
964             pf->mc_luma = mc_luma_cache64_ssse3_atom;
965             pf->get_ref = get_ref_cache64_ssse3_atom;
966         }
967     }
968
969     pf->weight_cache = x264_weight_cache_ssse3;
970     pf->weight = x264_mc_weight_wtab_ssse3;
971
972     if( !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SLOW_ATOM|X264_CPU_SLOW_PALIGNR)) )
973         pf->integral_init4v = x264_integral_init4v_ssse3;
974
975     if( !(cpu&X264_CPU_SSE4) )
976         return;
977
978     pf->integral_init4h = x264_integral_init4h_sse4;
979     pf->integral_init8h = x264_integral_init8h_sse4;
980
981     if( !(cpu&X264_CPU_AVX) )
982         return;
983
984     pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx;
985     pf->integral_init8h = x264_integral_init8h_avx;
986     pf->hpel_filter = x264_hpel_filter_avx;
987
988     if( !(cpu&X264_CPU_STACK_MOD4) )
989         pf->mc_chroma = x264_mc_chroma_avx;
990
991     if( cpu&X264_CPU_XOP )
992         pf->frame_init_lowres_core = x264_frame_init_lowres_core_xop;
993
994     if( cpu&X264_CPU_AVX2 )
995     {
996         pf->hpel_filter = x264_hpel_filter_avx2;
997         pf->mc_chroma = x264_mc_chroma_avx2;
998         pf->weight = x264_mc_weight_wtab_avx2;
999         pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_avx2;
1000         pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_avx2;
1001         pf->integral_init8v = x264_integral_init8v_avx2;
1002         pf->integral_init4v = x264_integral_init4v_avx2;
1003         pf->integral_init8h = x264_integral_init8h_avx2;
1004         pf->integral_init4h = x264_integral_init4h_avx2;
1005         pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx2;
1006     }
1007 #endif // HIGH_BIT_DEPTH
1008
1009     if( !(cpu&X264_CPU_AVX) )
1010         return;
1011     pf->memzero_aligned = x264_memzero_aligned_avx;
1012     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx;
1013     pf->mbtree_propagate_list = x264_mbtree_propagate_list_avx;
1014
1015     if( cpu&X264_CPU_FMA4 )
1016         pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_fma4;
1017
1018     if( !(cpu&X264_CPU_AVX2) )
1019         return;
1020     pf->get_ref = get_ref_avx2;
1021
1022     if( cpu&X264_CPU_FMA3 )
1023         pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx2_fma3;
1024 }
1025
1026 void x264_mc_init_mmx_mpeg2( int cpu, x264_mc_functions_t *pf )
1027 {
1028     if( !(cpu&X264_CPU_MMX) )
1029         return;
1030
1031     pf->get_ref = get_ref_mpeg2_mmx2;
1032     pf->mc_luma = mc_luma_mpeg2_mmx2;
1033
1034     if( !(cpu&X264_CPU_SSE2) )
1035         return;
1036
1037     pf->get_ref = get_ref_mpeg2_sse2;
1038     pf->mc_luma = mc_luma_mpeg2_sse2;
1039
1040     if( !(cpu&X264_CPU_AVX2) )
1041         return;
1042
1043     pf->get_ref = get_ref_mpeg2_avx2;
1044 }