Bump dates to 2013
[x262.git] / common / x86 / mc-c.c
1 /*****************************************************************************
2  * mc-c.c: x86 motion compensation
3  *****************************************************************************
4  * Copyright (C) 2003-2013 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Jason Garrett-Glaser <darkshikari@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31
32 #include "common/common.h"
33 #include "mc.h"
34
35 #define DECL_SUF( func, args )\
36     void func##_mmx2 args;\
37     void func##_sse2 args;\
38     void func##_ssse3 args;
39
40 DECL_SUF( x264_pixel_avg_16x16, ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
41 DECL_SUF( x264_pixel_avg_16x8,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
42 DECL_SUF( x264_pixel_avg_8x16,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
43 DECL_SUF( x264_pixel_avg_8x8,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
44 DECL_SUF( x264_pixel_avg_8x4,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
45 DECL_SUF( x264_pixel_avg_4x16,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
46 DECL_SUF( x264_pixel_avg_4x8,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
47 DECL_SUF( x264_pixel_avg_4x4,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
48 DECL_SUF( x264_pixel_avg_4x2,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
49
50 #define MC_WEIGHT(w,type) \
51     void x264_mc_weight_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int );
52
53 #define MC_WEIGHT_OFFSET(w,type) \
54     void x264_mc_offsetadd_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ); \
55     void x264_mc_offsetsub_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ); \
56     MC_WEIGHT(w,type)
57
58 MC_WEIGHT_OFFSET( 4, mmx2 )
59 MC_WEIGHT_OFFSET( 8, mmx2 )
60 MC_WEIGHT_OFFSET( 12, mmx2 )
61 MC_WEIGHT_OFFSET( 16, mmx2 )
62 MC_WEIGHT_OFFSET( 20, mmx2 )
63 MC_WEIGHT_OFFSET( 12, sse2 )
64 MC_WEIGHT_OFFSET( 16, sse2 )
65 MC_WEIGHT_OFFSET( 20, sse2 )
66 #if HIGH_BIT_DEPTH
67 MC_WEIGHT_OFFSET( 8, sse2 )
68 #endif
69 MC_WEIGHT( 8, sse2  )
70 MC_WEIGHT( 4, ssse3 )
71 MC_WEIGHT( 8, ssse3 )
72 MC_WEIGHT( 12, ssse3 )
73 MC_WEIGHT( 16, ssse3 )
74 MC_WEIGHT( 20, ssse3 )
75 #undef MC_OFFSET
76 #undef MC_WEIGHT
77
78 void x264_mc_copy_w4_mmx  ( pixel *, intptr_t, pixel *, intptr_t, int );
79 void x264_mc_copy_w8_mmx  ( pixel *, intptr_t, pixel *, intptr_t, int );
80 void x264_mc_copy_w8_sse2 ( pixel *, intptr_t, pixel *, intptr_t, int );
81 void x264_mc_copy_w16_mmx ( pixel *, intptr_t, pixel *, intptr_t, int );
82 void x264_mc_copy_w16_sse2( pixel *, intptr_t, pixel *, intptr_t, int );
83 void x264_mc_copy_w16_aligned_sse2( pixel *, intptr_t, pixel *, intptr_t, int );
84 void x264_prefetch_fenc_420_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
85 void x264_prefetch_fenc_422_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
86 void x264_prefetch_ref_mmx2( pixel *, intptr_t, int );
87 void x264_plane_copy_core_mmx2( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
88 void x264_plane_copy_c( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
89 void x264_plane_copy_interleave_core_mmx2( pixel *dst,  intptr_t i_dst,
90                                            pixel *srcu, intptr_t i_srcu,
91                                            pixel *srcv, intptr_t i_srcv, int w, int h );
92 void x264_plane_copy_interleave_core_sse2( pixel *dst,  intptr_t i_dst,
93                                            pixel *srcu, intptr_t i_srcu,
94                                            pixel *srcv, intptr_t i_srcv, int w, int h );
95 void x264_plane_copy_interleave_core_avx( pixel *dst,  intptr_t i_dst,
96                                           pixel *srcu, intptr_t i_srcu,
97                                           pixel *srcv, intptr_t i_srcv, int w, int h );
98 void x264_plane_copy_interleave_c( pixel *dst,  intptr_t i_dst,
99                                    pixel *srcu, intptr_t i_srcu,
100                                    pixel *srcv, intptr_t i_srcv, int w, int h );
101 void x264_plane_copy_deinterleave_mmx( pixel *dstu, intptr_t i_dstu,
102                                        pixel *dstv, intptr_t i_dstv,
103                                        pixel *src,  intptr_t i_src, int w, int h );
104 void x264_plane_copy_deinterleave_sse2( pixel *dstu, intptr_t i_dstu,
105                                         pixel *dstv, intptr_t i_dstv,
106                                         pixel *src,  intptr_t i_src, int w, int h );
107 void x264_plane_copy_deinterleave_ssse3( uint8_t *dstu, intptr_t i_dstu,
108                                          uint8_t *dstv, intptr_t i_dstv,
109                                          uint8_t *src,  intptr_t i_src, int w, int h );
110 void x264_plane_copy_deinterleave_avx( uint16_t *dstu, intptr_t i_dstu,
111                                        uint16_t *dstv, intptr_t i_dstv,
112                                        uint16_t *src,  intptr_t i_src, int w, int h );
113 void x264_store_interleave_chroma_mmx2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
114 void x264_store_interleave_chroma_sse2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
115 void x264_store_interleave_chroma_avx ( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
116 void x264_load_deinterleave_chroma_fenc_mmx ( pixel *dst, pixel *src, intptr_t i_src, int height );
117 void x264_load_deinterleave_chroma_fenc_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
118 void x264_load_deinterleave_chroma_fenc_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
119 void x264_load_deinterleave_chroma_fenc_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
120 void x264_load_deinterleave_chroma_fdec_mmx ( pixel *dst, pixel *src, intptr_t i_src, int height );
121 void x264_load_deinterleave_chroma_fdec_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
122 void x264_load_deinterleave_chroma_fdec_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
123 void x264_load_deinterleave_chroma_fdec_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
124 void *x264_memcpy_aligned_mmx ( void *dst, const void *src, size_t n );
125 void *x264_memcpy_aligned_sse2( void *dst, const void *src, size_t n );
126 void x264_memzero_aligned_mmx ( void *dst, size_t n );
127 void x264_memzero_aligned_sse2( void *dst, size_t n );
128 void x264_integral_init4h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
129 void x264_integral_init8h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
130 void x264_integral_init8h_avx ( uint16_t *sum, uint8_t *pix, intptr_t stride );
131 void x264_integral_init4v_mmx  ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
132 void x264_integral_init4v_sse2 ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
133 void x264_integral_init4v_ssse3( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
134 void x264_integral_init8v_mmx ( uint16_t *sum8, intptr_t stride );
135 void x264_integral_init8v_sse2( uint16_t *sum8, intptr_t stride );
136 void x264_mbtree_propagate_cost_sse2( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
137                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
138 void x264_mbtree_propagate_cost_avx ( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
139                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
140 void x264_mbtree_propagate_cost_fma4( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
141                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
142 void x264_mbtree_propagate_cost_avx2_fma3( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
143                                            uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
144
145 #define MC_CHROMA(cpu)\
146 void x264_mc_chroma_##cpu( pixel *dstu, pixel *dstv, intptr_t i_dst, pixel *src, intptr_t i_src,\
147                            int dx, int dy, int i_width, int i_height );
148 MC_CHROMA(mmx2)
149 MC_CHROMA(sse2)
150 MC_CHROMA(sse2_misalign)
151 MC_CHROMA(ssse3)
152 MC_CHROMA(ssse3_cache64)
153 MC_CHROMA(avx)
154 MC_CHROMA(avx_cache64)
155
156 #define LOWRES(cpu)\
157 void x264_frame_init_lowres_core_##cpu( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,\
158                                         intptr_t src_stride, intptr_t dst_stride, int width, int height );
159 LOWRES(mmx2)
160 LOWRES(cache32_mmx2)
161 LOWRES(sse2)
162 LOWRES(ssse3)
163 LOWRES(avx)
164 LOWRES(xop)
165
166 #define PIXEL_AVG_W(width,cpu)\
167 void x264_pixel_avg2_w##width##_##cpu( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t );
168 /* This declares some functions that don't exist, but that isn't a problem. */
169 #define PIXEL_AVG_WALL(cpu)\
170 PIXEL_AVG_W(4,cpu); PIXEL_AVG_W(8,cpu); PIXEL_AVG_W(10,cpu); PIXEL_AVG_W(12,cpu); PIXEL_AVG_W(16,cpu); PIXEL_AVG_W(18,cpu); PIXEL_AVG_W(20,cpu);
171
172 PIXEL_AVG_WALL(mmx2)
173 PIXEL_AVG_WALL(cache32_mmx2)
174 PIXEL_AVG_WALL(cache64_mmx2)
175 PIXEL_AVG_WALL(cache64_sse2)
176 PIXEL_AVG_WALL(sse2)
177 PIXEL_AVG_WALL(sse2_misalign)
178 PIXEL_AVG_WALL(cache64_ssse3)
179
180 #define PIXEL_AVG_WTAB(instr, name1, name2, name3, name4, name5)\
181 static void (* const x264_pixel_avg_wtab_##instr[6])( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t ) =\
182 {\
183     NULL,\
184     x264_pixel_avg2_w4_##name1,\
185     x264_pixel_avg2_w8_##name2,\
186     x264_pixel_avg2_w12_##name3,\
187     x264_pixel_avg2_w16_##name4,\
188     x264_pixel_avg2_w20_##name5,\
189 };
190
191 #if HIGH_BIT_DEPTH
192 /* we can replace w12/w20 with w10/w18 as only 9/17 pixels in fact are important */
193 #define x264_pixel_avg2_w12_mmx2       x264_pixel_avg2_w10_mmx2
194 #define x264_pixel_avg2_w20_mmx2       x264_pixel_avg2_w18_mmx2
195 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w10_sse2
196 #define x264_pixel_avg2_w20_sse2         x264_pixel_avg2_w18_sse2
197 #else
198 /* w16 sse2 is faster than w12 mmx as long as the cacheline issue is resolved */
199 #define x264_pixel_avg2_w12_cache64_ssse3 x264_pixel_avg2_w16_cache64_ssse3
200 #define x264_pixel_avg2_w12_cache64_sse2 x264_pixel_avg2_w16_cache64_sse2
201 #define x264_pixel_avg2_w12_sse3         x264_pixel_avg2_w16_sse3
202 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w16_sse2
203 #endif // HIGH_BIT_DEPTH
204
205 PIXEL_AVG_WTAB(mmx2, mmx2, mmx2, mmx2, mmx2, mmx2)
206 #if HIGH_BIT_DEPTH
207 PIXEL_AVG_WTAB(sse2, mmx2, sse2, sse2, sse2, sse2)
208 #else // !HIGH_BIT_DEPTH
209 #if ARCH_X86
210 PIXEL_AVG_WTAB(cache32_mmx2, mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2)
211 PIXEL_AVG_WTAB(cache64_mmx2, mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2)
212 #endif
213 PIXEL_AVG_WTAB(sse2, mmx2, mmx2, sse2, sse2, sse2)
214 PIXEL_AVG_WTAB(sse2_misalign, mmx2, mmx2, sse2, sse2, sse2_misalign)
215 PIXEL_AVG_WTAB(cache64_sse2, mmx2, cache64_mmx2, cache64_sse2, cache64_sse2, cache64_sse2)
216 PIXEL_AVG_WTAB(cache64_ssse3, mmx2, cache64_mmx2, cache64_ssse3, cache64_ssse3, cache64_sse2)
217 #endif // HIGH_BIT_DEPTH
218
219 #define MC_COPY_WTAB(instr, name1, name2, name3)\
220 static void (* const x264_mc_copy_wtab_##instr[5])( pixel *, intptr_t, pixel *, intptr_t, int ) =\
221 {\
222     NULL,\
223     x264_mc_copy_w4_##name1,\
224     x264_mc_copy_w8_##name2,\
225     NULL,\
226     x264_mc_copy_w16_##name3,\
227 };
228
229 MC_COPY_WTAB(mmx,mmx,mmx,mmx)
230 #if HIGH_BIT_DEPTH
231 MC_COPY_WTAB(sse2,mmx,sse2,sse2)
232 #else
233 MC_COPY_WTAB(sse2,mmx,mmx,sse2)
234 #endif
235
236 #define MC_WEIGHT_WTAB(function, instr, name1, name2, w12version)\
237     static void (* x264_mc_##function##_wtab_##instr[6])( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ) =\
238 {\
239     x264_mc_##function##_w4_##name1,\
240     x264_mc_##function##_w4_##name1,\
241     x264_mc_##function##_w8_##name2,\
242     x264_mc_##function##_w##w12version##_##instr,\
243     x264_mc_##function##_w16_##instr,\
244     x264_mc_##function##_w20_##instr,\
245 };
246
247 #if HIGH_BIT_DEPTH
248 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
249 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
250 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
251 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,12)
252 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,sse2,16)
253 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,sse2,16)
254
255 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
256 {
257     if( w->i_scale == 1<<w->i_denom )
258     {
259         if( w->i_offset < 0 )
260             w->weightfn = h->mc.offsetsub;
261         else
262             w->weightfn = h->mc.offsetadd;
263         for( int i = 0; i < 8; i++ )
264             w->cachea[i] = abs(w->i_offset<<(BIT_DEPTH-8));
265         return;
266     }
267     w->weightfn = h->mc.weight;
268     int den1 = 1<<w->i_denom;
269     int den2 = w->i_scale<<1;
270     int den3 = 1+(w->i_offset<<(BIT_DEPTH-8+1));
271     for( int i = 0; i < 8; i++ )
272     {
273         w->cachea[i] = den1;
274         w->cacheb[i] = i&1 ? den3 : den2;
275     }
276 }
277 #else
278 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
279 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
280 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
281 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,16)
282 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,mmx2,16)
283 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,mmx2,16)
284 MC_WEIGHT_WTAB(weight,ssse3,ssse3,ssse3,16)
285
286 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
287 {
288     int i;
289     int16_t den1;
290
291     if( w->i_scale == 1<<w->i_denom )
292     {
293         if( w->i_offset < 0 )
294             w->weightfn = h->mc.offsetsub;
295         else
296             w->weightfn = h->mc.offsetadd;
297         memset( w->cachea, abs(w->i_offset), sizeof(w->cachea) );
298         return;
299     }
300     w->weightfn = h->mc.weight;
301     den1 = 1 << (w->i_denom - 1) | w->i_offset << w->i_denom;
302     for( i = 0; i < 8; i++ )
303     {
304         w->cachea[i] = w->i_scale;
305         w->cacheb[i] = den1;
306     }
307 }
308
309 static void x264_weight_cache_ssse3( x264_t *h, x264_weight_t *w )
310 {
311     int i, den1;
312     if( w->i_scale == 1<<w->i_denom )
313     {
314         if( w->i_offset < 0 )
315             w->weightfn = h->mc.offsetsub;
316         else
317             w->weightfn = h->mc.offsetadd;
318
319         memset( w->cachea, abs( w->i_offset ), sizeof(w->cachea) );
320         return;
321     }
322     w->weightfn = h->mc.weight;
323     den1 = w->i_scale << (8 - w->i_denom);
324     for( i = 0; i < 8; i++ )
325     {
326         w->cachea[i] = den1;
327         w->cacheb[i] = w->i_offset;
328     }
329 }
330 #endif // !HIGH_BIT_DEPTH
331
332 static const uint8_t hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
333 static const uint8_t hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
334
335 #define MC_LUMA(name,instr1,instr2)\
336 static void mc_luma_##name( pixel *dst,    intptr_t i_dst_stride,\
337                             pixel *src[4], intptr_t i_src_stride,\
338                             int mvx, int mvy,\
339                             int i_width, int i_height, const x264_weight_t *weight )\
340 {\
341     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
342     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
343     pixel *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
344     if( qpel_idx & 5 ) /* qpel interpolation needed */\
345     {\
346         pixel *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
347         x264_pixel_avg_wtab_##instr1[i_width>>2](\
348                 dst, i_dst_stride, src1, i_src_stride,\
349                 src2, i_height );\
350         if( weight->weightfn )\
351             weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );\
352     }\
353     else if( weight->weightfn )\
354         weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );\
355     else\
356         x264_mc_copy_wtab_##instr2[i_width>>2](dst, i_dst_stride, src1, i_src_stride, i_height );\
357 }
358
359 MC_LUMA(mmx2,mmx2,mmx)
360 MC_LUMA(sse2,sse2,sse2)
361 #if !HIGH_BIT_DEPTH
362 #if ARCH_X86
363 MC_LUMA(cache32_mmx2,cache32_mmx2,mmx)
364 MC_LUMA(cache64_mmx2,cache64_mmx2,mmx)
365 #endif
366 MC_LUMA(cache64_sse2,cache64_sse2,sse2)
367 MC_LUMA(cache64_ssse3,cache64_ssse3,sse2)
368 #endif // !HIGH_BIT_DEPTH
369
370 #define GET_REF(name)\
371 static pixel *get_ref_##name( pixel *dst,   intptr_t *i_dst_stride,\
372                               pixel *src[4], intptr_t i_src_stride,\
373                               int mvx, int mvy,\
374                               int i_width, int i_height, const x264_weight_t *weight )\
375 {\
376     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
377     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
378     pixel *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
379     if( qpel_idx & 5 ) /* qpel interpolation needed */\
380     {\
381         pixel *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
382         x264_pixel_avg_wtab_##name[i_width>>2](\
383                 dst, *i_dst_stride, src1, i_src_stride,\
384                 src2, i_height );\
385         if( weight->weightfn )\
386             weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );\
387         return dst;\
388     }\
389     else if( weight->weightfn )\
390     {\
391         weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );\
392         return dst;\
393     }\
394     else\
395     {\
396         *i_dst_stride = i_src_stride;\
397         return src1;\
398     }\
399 }
400
401 GET_REF(mmx2)
402 GET_REF(sse2)
403 #if !HIGH_BIT_DEPTH
404 #if ARCH_X86
405 GET_REF(cache32_mmx2)
406 GET_REF(cache64_mmx2)
407 #endif
408 GET_REF(sse2_misalign)
409 GET_REF(cache64_sse2)
410 GET_REF(cache64_ssse3)
411 #endif // !HIGH_BIT_DEPTH
412
413 #define HPEL(align, cpu, cpuv, cpuc, cpuh)\
414 void x264_hpel_filter_v_##cpuv( pixel *dst, pixel *src, int16_t *buf, intptr_t stride, intptr_t width);\
415 void x264_hpel_filter_c_##cpuc( pixel *dst, int16_t *buf, intptr_t width );\
416 void x264_hpel_filter_h_##cpuh( pixel *dst, pixel *src, intptr_t width );\
417 static void x264_hpel_filter_##cpu( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,\
418                                     intptr_t stride, int width, int height, int16_t *buf )\
419 {\
420     intptr_t realign = (intptr_t)src & (align-1);\
421     src -= realign;\
422     dstv -= realign;\
423     dstc -= realign;\
424     dsth -= realign;\
425     width += realign;\
426     while( height-- )\
427     {\
428         x264_hpel_filter_v_##cpuv( dstv, src, buf+8, stride, width );\
429         x264_hpel_filter_c_##cpuc( dstc, buf+8, width );\
430         x264_hpel_filter_h_##cpuh( dsth, src, width );\
431         dsth += stride;\
432         dstv += stride;\
433         dstc += stride;\
434         src  += stride;\
435     }\
436     x264_sfence();\
437 }
438
439 HPEL(8, mmx2, mmx2, mmx2, mmx2)
440 #if HIGH_BIT_DEPTH
441 HPEL(16, sse2, sse2, sse2, sse2)
442 #else // !HIGH_BIT_DEPTH
443 HPEL(16, sse2_amd, mmx2, mmx2, sse2)
444 #if ARCH_X86_64
445 void x264_hpel_filter_sse2 ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
446 void x264_hpel_filter_ssse3( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
447 void x264_hpel_filter_avx  ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
448 #else
449 HPEL(16, sse2, sse2, sse2, sse2)
450 HPEL(16, ssse3, ssse3, ssse3, ssse3)
451 HPEL(16, avx, avx, avx, avx)
452 #endif
453 HPEL(16, sse2_misalign, sse2, sse2_misalign, sse2)
454 #endif // HIGH_BIT_DEPTH
455
456 static void x264_plane_copy_mmx2( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h )
457 {
458     int c_w = 16/sizeof(pixel) - 1;
459     if( w < 256 ) { // tiny resolutions don't want non-temporal hints. dunno the exact threshold.
460         x264_plane_copy_c( dst, i_dst, src, i_src, w, h );
461     } else if( !(w&c_w) ) {
462         x264_plane_copy_core_mmx2( dst, i_dst, src, i_src, w, h );
463     } else if( i_src > 0 ) {
464         // have to use plain memcpy on the last line (in memory order) to avoid overreading src
465         x264_plane_copy_core_mmx2( dst, i_dst, src, i_src, (w+c_w)&~c_w, h-1 );
466         memcpy( dst+i_dst*(h-1), src+i_src*(h-1), w*sizeof(pixel) );
467     } else {
468         memcpy( dst, src, w*sizeof(pixel) );
469         x264_plane_copy_core_mmx2( dst+i_dst, i_dst, src+i_src, i_src, (w+c_w)&~c_w, h-1 );
470     }
471 }
472
473 #define PLANE_INTERLEAVE(cpu) \
474 static void x264_plane_copy_interleave_##cpu( pixel *dst,  intptr_t i_dst,\
475                                               pixel *srcu, intptr_t i_srcu,\
476                                               pixel *srcv, intptr_t i_srcv, int w, int h )\
477 {\
478     if( !(w&15) ) {\
479         x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
480     } else if( w < 16 || (i_srcu ^ i_srcv) ) {\
481         x264_plane_copy_interleave_c( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
482     } else if( i_srcu > 0 ) {\
483         x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, (w+15)&~15, h-1 );\
484         x264_plane_copy_interleave_c( dst+i_dst*(h-1), 0, srcu+i_srcu*(h-1), 0, srcv+i_srcv*(h-1), 0, w, 1 );\
485     } else {\
486         x264_plane_copy_interleave_c( dst, 0, srcu, 0, srcv, 0, w, 1 );\
487         x264_plane_copy_interleave_core_##cpu( dst+i_dst, i_dst, srcu+i_srcu, i_srcu, srcv+i_srcv, i_srcv, (w+15)&~15, h-1 );\
488     }\
489 }
490
491 PLANE_INTERLEAVE(mmx2)
492 PLANE_INTERLEAVE(sse2)
493 #if HIGH_BIT_DEPTH
494 PLANE_INTERLEAVE(avx)
495 #endif
496
497 void x264_mc_init_mmx( int cpu, x264_mc_functions_t *pf )
498 {
499     if( !(cpu&X264_CPU_MMX) )
500         return;
501
502     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_mmx;
503     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_mmx;
504
505     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_mmx;
506
507     pf->copy_16x16_unaligned = x264_mc_copy_w16_mmx;
508     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_mmx;
509     pf->copy[PIXEL_8x8]   = x264_mc_copy_w8_mmx;
510     pf->copy[PIXEL_4x4]   = x264_mc_copy_w4_mmx;
511     pf->memcpy_aligned  = x264_memcpy_aligned_mmx;
512     pf->memzero_aligned = x264_memzero_aligned_mmx;
513     pf->integral_init4v = x264_integral_init4v_mmx;
514     pf->integral_init8v = x264_integral_init8v_mmx;
515
516     if( !(cpu&X264_CPU_MMX2) )
517         return;
518
519     pf->prefetch_fenc_420 = x264_prefetch_fenc_420_mmx2;
520     pf->prefetch_fenc_422 = x264_prefetch_fenc_422_mmx2;
521     pf->prefetch_ref  = x264_prefetch_ref_mmx2;
522
523     pf->plane_copy = x264_plane_copy_mmx2;
524     pf->plane_copy_interleave = x264_plane_copy_interleave_mmx2;
525     pf->store_interleave_chroma = x264_store_interleave_chroma_mmx2;
526
527     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_mmx2;
528     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_mmx2;
529     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_mmx2;
530     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_mmx2;
531     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_mmx2;
532     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_mmx2;
533     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_mmx2;
534     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_mmx2;
535     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_mmx2;
536
537     pf->mc_luma = mc_luma_mmx2;
538     pf->get_ref = get_ref_mmx2;
539     pf->mc_chroma = x264_mc_chroma_mmx2;
540     pf->hpel_filter = x264_hpel_filter_mmx2;
541     pf->weight = x264_mc_weight_wtab_mmx2;
542     pf->weight_cache = x264_weight_cache_mmx2;
543     pf->offsetadd = x264_mc_offsetadd_wtab_mmx2;
544     pf->offsetsub = x264_mc_offsetsub_wtab_mmx2;
545
546     pf->frame_init_lowres_core = x264_frame_init_lowres_core_mmx2;
547
548 #if HIGH_BIT_DEPTH
549 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
550     if( cpu&(X264_CPU_CACHELINE_32|X264_CPU_CACHELINE_64) )
551         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
552 #endif
553
554     if( !(cpu&X264_CPU_SSE2) )
555         return;
556
557     pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
558
559     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_sse2;
560     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_sse2;
561
562     pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
563     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
564
565     if( cpu&X264_CPU_SSE2_IS_FAST )
566     {
567         pf->get_ref = get_ref_sse2;
568         pf->mc_luma = mc_luma_sse2;
569         pf->hpel_filter = x264_hpel_filter_sse2;
570     }
571
572     pf->memcpy_aligned  = x264_memcpy_aligned_sse2;
573     pf->memzero_aligned = x264_memzero_aligned_sse2;
574     pf->integral_init4v = x264_integral_init4v_sse2;
575     pf->integral_init8v = x264_integral_init8v_sse2;
576     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
577     pf->store_interleave_chroma = x264_store_interleave_chroma_sse2;
578     pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
579     pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
580
581     if( cpu&X264_CPU_SSE2_IS_SLOW )
582         return;
583
584     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
585     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
586     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_sse2;
587     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_sse2;
588     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_sse2;
589     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_sse2;
590     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_sse2;
591     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_sse2;
592     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_sse2;
593
594     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse2;
595     pf->weight = x264_mc_weight_wtab_sse2;
596
597     if( !(cpu&X264_CPU_STACK_MOD4) )
598         pf->mc_chroma = x264_mc_chroma_sse2;
599
600     if( !(cpu&X264_CPU_SSSE3) )
601         return;
602
603     pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
604
605     if( (cpu&X264_CPU_SHUFFLE_IS_FAST) && !(cpu&X264_CPU_SLOW_ATOM) )
606         pf->integral_init4v = x264_integral_init4v_ssse3;
607
608     if( !(cpu&X264_CPU_AVX) )
609         return;
610
611     pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx;
612     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_avx;
613     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_avx;
614     pf->plane_copy_interleave        = x264_plane_copy_interleave_avx;
615     pf->plane_copy_deinterleave      = x264_plane_copy_deinterleave_avx;
616     pf->store_interleave_chroma      = x264_store_interleave_chroma_avx;
617
618     if( !(cpu&X264_CPU_STACK_MOD4) )
619         pf->mc_chroma = x264_mc_chroma_avx;
620
621     if( cpu&X264_CPU_XOP )
622         pf->frame_init_lowres_core = x264_frame_init_lowres_core_xop;
623 #else // !HIGH_BIT_DEPTH
624
625 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
626     if( cpu&X264_CPU_CACHELINE_32 )
627     {
628         pf->mc_luma = mc_luma_cache32_mmx2;
629         pf->get_ref = get_ref_cache32_mmx2;
630         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
631     }
632     else if( cpu&X264_CPU_CACHELINE_64 )
633     {
634         pf->mc_luma = mc_luma_cache64_mmx2;
635         pf->get_ref = get_ref_cache64_mmx2;
636         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
637     }
638 #endif
639
640     if( !(cpu&X264_CPU_SSE2) )
641         return;
642
643     pf->memcpy_aligned = x264_memcpy_aligned_sse2;
644     pf->memzero_aligned = x264_memzero_aligned_sse2;
645     pf->integral_init4v = x264_integral_init4v_sse2;
646     pf->integral_init8v = x264_integral_init8v_sse2;
647     pf->hpel_filter = x264_hpel_filter_sse2_amd;
648     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
649
650     if( cpu&X264_CPU_SSE2_IS_SLOW )
651         return;
652
653     pf->weight = x264_mc_weight_wtab_sse2;
654     if( !(cpu&X264_CPU_SLOW_ATOM) )
655     {
656         pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
657         pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
658     }
659
660     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse2;
661     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
662     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
663     pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_sse2;
664     pf->avg[PIXEL_8x8]  = x264_pixel_avg_8x8_sse2;
665     pf->avg[PIXEL_8x4]  = x264_pixel_avg_8x4_sse2;
666     pf->hpel_filter = x264_hpel_filter_sse2;
667     if( cpu&X264_CPU_SSE_MISALIGN )
668         pf->hpel_filter = x264_hpel_filter_sse2_misalign;
669     pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
670     if( !(cpu&X264_CPU_STACK_MOD4) )
671         pf->mc_chroma = x264_mc_chroma_sse2;
672
673     if( cpu&X264_CPU_SSE2_IS_FAST )
674     {
675         pf->store_interleave_chroma = x264_store_interleave_chroma_sse2; // FIXME sse2fast? sse2medium?
676         pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_sse2;
677         pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_sse2;
678         pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
679         pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
680         pf->mc_luma = mc_luma_sse2;
681         pf->get_ref = get_ref_sse2;
682         if( cpu&X264_CPU_CACHELINE_64 )
683         {
684             pf->mc_luma = mc_luma_cache64_sse2;
685             pf->get_ref = get_ref_cache64_sse2;
686         }
687         if( cpu&X264_CPU_SSE_MISALIGN )
688         {
689             pf->get_ref = get_ref_sse2_misalign;
690             if( !(cpu&X264_CPU_STACK_MOD4) )
691                 pf->mc_chroma = x264_mc_chroma_sse2_misalign;
692         }
693     }
694
695     if( !(cpu&X264_CPU_SSSE3) )
696         return;
697
698     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_ssse3;
699     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_ssse3;
700     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_ssse3;
701     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_ssse3;
702     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_ssse3;
703     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_ssse3;
704     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_ssse3;
705     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_ssse3;
706     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_ssse3;
707
708     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_ssse3;
709     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_ssse3;
710     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_ssse3;
711
712     pf->hpel_filter = x264_hpel_filter_ssse3;
713     pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
714     if( !(cpu&X264_CPU_STACK_MOD4) )
715         pf->mc_chroma = x264_mc_chroma_ssse3;
716
717     if( cpu&X264_CPU_CACHELINE_64 )
718     {
719         if( !(cpu&X264_CPU_STACK_MOD4) )
720             pf->mc_chroma = x264_mc_chroma_ssse3_cache64;
721         pf->mc_luma = mc_luma_cache64_ssse3;
722         pf->get_ref = get_ref_cache64_ssse3;
723
724         /* ssse3 weight is slower on Nehalem, so only assign here. */
725         pf->weight_cache = x264_weight_cache_ssse3;
726         pf->weight = x264_mc_weight_wtab_ssse3;
727     }
728
729     if( (cpu&X264_CPU_SHUFFLE_IS_FAST) && !(cpu&X264_CPU_SLOW_ATOM) )
730         pf->integral_init4v = x264_integral_init4v_ssse3;
731
732     if( !(cpu&X264_CPU_SSE4) )
733         return;
734
735     pf->integral_init4h = x264_integral_init4h_sse4;
736     pf->integral_init8h = x264_integral_init8h_sse4;
737
738     if( !(cpu&X264_CPU_AVX) )
739         return;
740
741     pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx;
742     pf->integral_init8h = x264_integral_init8h_avx;
743     pf->hpel_filter = x264_hpel_filter_avx;
744
745     /* ssse3 weight seems to be faster again on Sandy Bridge and Bulldozer. */
746     pf->weight_cache = x264_weight_cache_ssse3;
747     pf->weight = x264_mc_weight_wtab_ssse3;
748     if( !(cpu&X264_CPU_STACK_MOD4) )
749         pf->mc_chroma = x264_mc_chroma_avx;
750
751     if( cpu&X264_CPU_XOP )
752         pf->frame_init_lowres_core = x264_frame_init_lowres_core_xop;
753 #endif // HIGH_BIT_DEPTH
754
755     if( !(cpu&X264_CPU_AVX) )
756         return;
757     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx;
758
759     if( cpu&X264_CPU_FMA4 )
760         pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_fma4;
761
762     if( !(cpu&X264_CPU_AVX2) )
763         return;
764
765     if( cpu&X264_CPU_FMA3 )
766         pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx2_fma3;
767 }