Remove some unused, broken, and/or useless functions
[x262.git] / common / x86 / mc-c.c
1 /*****************************************************************************
2  * mc-c.c: x86 motion compensation
3  *****************************************************************************
4  * Copyright (C) 2003-2011 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Jason Garrett-Glaser <darkshikari@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31
32 #include "common/common.h"
33 #include "mc.h"
34
35 #define DECL_SUF( func, args )\
36     void func##_mmx2 args;\
37     void func##_sse2 args;\
38     void func##_ssse3 args;
39
40 DECL_SUF( x264_pixel_avg_16x16, ( pixel *, int, pixel *, int, pixel *, int, int ))
41 DECL_SUF( x264_pixel_avg_16x8,  ( pixel *, int, pixel *, int, pixel *, int, int ))
42 DECL_SUF( x264_pixel_avg_8x16,  ( pixel *, int, pixel *, int, pixel *, int, int ))
43 DECL_SUF( x264_pixel_avg_8x8,   ( pixel *, int, pixel *, int, pixel *, int, int ))
44 DECL_SUF( x264_pixel_avg_8x4,   ( pixel *, int, pixel *, int, pixel *, int, int ))
45 DECL_SUF( x264_pixel_avg_4x8,   ( pixel *, int, pixel *, int, pixel *, int, int ))
46 DECL_SUF( x264_pixel_avg_4x4,   ( pixel *, int, pixel *, int, pixel *, int, int ))
47 DECL_SUF( x264_pixel_avg_4x2,   ( pixel *, int, pixel *, int, pixel *, int, int ))
48
49 #define MC_WEIGHT(w,type) \
50     void x264_mc_weight_w##w##_##type( pixel *,int, pixel *,int, const x264_weight_t *,int );
51
52 #define MC_WEIGHT_OFFSET(w,type) \
53     void x264_mc_offsetadd_w##w##_##type( pixel *,int, pixel *,int, const x264_weight_t *,int ); \
54     void x264_mc_offsetsub_w##w##_##type( pixel *,int, pixel *,int, const x264_weight_t *,int ); \
55     MC_WEIGHT(w,type)
56
57 MC_WEIGHT_OFFSET( 4, mmx2 )
58 MC_WEIGHT_OFFSET( 8, mmx2 )
59 MC_WEIGHT_OFFSET( 12, mmx2 )
60 MC_WEIGHT_OFFSET( 16, mmx2 )
61 MC_WEIGHT_OFFSET( 20, mmx2 )
62 MC_WEIGHT_OFFSET( 12, sse2 )
63 MC_WEIGHT_OFFSET( 16, sse2 )
64 MC_WEIGHT_OFFSET( 20, sse2 )
65 #if HIGH_BIT_DEPTH
66 MC_WEIGHT_OFFSET( 8, sse2 )
67 #endif
68 MC_WEIGHT( 8, sse2  )
69 MC_WEIGHT( 4, ssse3 )
70 MC_WEIGHT( 8, ssse3 )
71 MC_WEIGHT( 12, ssse3 )
72 MC_WEIGHT( 16, ssse3 )
73 MC_WEIGHT( 20, ssse3 )
74 MC_WEIGHT( 4, avx )
75 MC_WEIGHT( 8, avx )
76 MC_WEIGHT( 12, avx )
77 MC_WEIGHT( 16, avx )
78 MC_WEIGHT( 20, avx )
79 #undef MC_OFFSET
80 #undef MC_WEIGHT
81
82 void x264_mc_copy_w4_mmx( pixel *, int, pixel *, int, int );
83 void x264_mc_copy_w8_mmx( pixel *, int, pixel *, int, int );
84 void x264_mc_copy_w8_sse2( pixel *, int, pixel *, int, int );
85 void x264_mc_copy_w16_mmx( pixel *, int, pixel *, int, int );
86 void x264_mc_copy_w16_sse2( pixel *, int, pixel *, int, int );
87 void x264_mc_copy_w16_aligned_sse2( pixel *, int, pixel *, int, int );
88 void x264_prefetch_fenc_mmx2( uint8_t *, int, uint8_t *, int, int );
89 void x264_prefetch_ref_mmx2( uint8_t *, int, int );
90 void x264_plane_copy_core_mmx2( pixel *, int, pixel *, int, int w, int h);
91 void x264_plane_copy_c( pixel *, int, pixel *, int, int w, int h );
92 void x264_plane_copy_interleave_core_mmx2( pixel *dst, int i_dst,
93                                              pixel *srcu, int i_srcu,
94                                              pixel *srcv, int i_srcv, int w, int h );
95 void x264_plane_copy_interleave_core_sse2( pixel *dst, int i_dst,
96                                            pixel *srcu, int i_srcu,
97                                            pixel *srcv, int i_srcv, int w, int h );
98 void x264_plane_copy_interleave_core_avx( pixel *dst, int i_dst,
99                                            pixel *srcu, int i_srcu,
100                                            pixel *srcv, int i_srcv, int w, int h );
101 void x264_plane_copy_interleave_c( pixel *dst, int i_dst,
102                                    pixel *srcu, int i_srcu,
103                                    pixel *srcv, int i_srcv, int w, int h );
104 void x264_plane_copy_deinterleave_mmx( pixel *dstu, int i_dstu,
105                                        pixel *dstv, int i_dstv,
106                                        pixel *src, int i_src, int w, int h );
107 void x264_plane_copy_deinterleave_sse2( pixel *dstu, int i_dstu,
108                                         pixel *dstv, int i_dstv,
109                                         pixel *src, int i_src, int w, int h );
110 void x264_plane_copy_deinterleave_ssse3( uint8_t *dstu, int i_dstu,
111                                          uint8_t *dstv, int i_dstv,
112                                          uint8_t *src, int i_src, int w, int h );
113 void x264_plane_copy_deinterleave_avx( uint16_t *dstu, int i_dstu,
114                                          uint16_t *dstv, int i_dstv,
115                                          uint16_t *src, int i_src, int w, int h );
116 void x264_store_interleave_8x8x2_mmx2( pixel *dst, int i_dst, pixel *srcu, pixel *srcv );
117 void x264_store_interleave_8x8x2_sse2( pixel *dst, int i_dst, pixel *srcu, pixel *srcv );
118 void x264_store_interleave_8x8x2_avx( pixel *dst, int i_dst, pixel *srcu, pixel *srcv );
119 void x264_load_deinterleave_8x8x2_fenc_mmx( pixel *dst, pixel *src, int i_src );
120 void x264_load_deinterleave_8x8x2_fenc_sse2( pixel *dst, pixel *src, int i_src );
121 void x264_load_deinterleave_8x8x2_fenc_ssse3( uint8_t *dst, uint8_t *src, int i_src );
122 void x264_load_deinterleave_8x8x2_fenc_avx( uint16_t *dst, uint16_t *src, int i_src );
123 void x264_load_deinterleave_8x8x2_fdec_mmx( pixel *dst, pixel *src, int i_src );
124 void x264_load_deinterleave_8x8x2_fdec_sse2( pixel *dst, pixel *src, int i_src );
125 void x264_load_deinterleave_8x8x2_fdec_ssse3( uint8_t *dst, uint8_t *src, int i_src );
126 void x264_load_deinterleave_8x8x2_fdec_avx( uint16_t *dst, uint16_t *src, int i_src );
127 void *x264_memcpy_aligned_mmx( void * dst, const void * src, size_t n );
128 void *x264_memcpy_aligned_sse2( void * dst, const void * src, size_t n );
129 void x264_memzero_aligned_mmx( void * dst, int n );
130 void x264_memzero_aligned_sse2( void * dst, int n );
131 void x264_integral_init4h_sse4( uint16_t *sum, uint8_t *pix, int stride );
132 void x264_integral_init8h_sse4( uint16_t *sum, uint8_t *pix, int stride );
133 void x264_integral_init8h_avx ( uint16_t *sum, uint8_t *pix, int stride );
134 void x264_integral_init4v_mmx( uint16_t *sum8, uint16_t *sum4, int stride );
135 void x264_integral_init4v_sse2( uint16_t *sum8, uint16_t *sum4, int stride );
136 void x264_integral_init8v_mmx( uint16_t *sum8, int stride );
137 void x264_integral_init8v_sse2( uint16_t *sum8, int stride );
138 void x264_integral_init4v_ssse3( uint16_t *sum8, uint16_t *sum4, int stride );
139 void x264_mbtree_propagate_cost_sse2( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
140                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
141 void x264_mbtree_propagate_cost_avx( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
142                                      uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
143
144 #define MC_CHROMA(cpu)\
145 void x264_mc_chroma_##cpu( pixel *dstu, pixel *dstv, int i_dst,\
146                            pixel *src, int i_src,\
147                            int dx, int dy, int i_width, int i_height );
148 MC_CHROMA(mmx2)
149 MC_CHROMA(sse2)
150 MC_CHROMA(sse2_misalign)
151 MC_CHROMA(ssse3)
152 MC_CHROMA(ssse3_cache64)
153 MC_CHROMA(avx)
154 MC_CHROMA(avx_cache64)
155
156 #define LOWRES(cpu)\
157 void x264_frame_init_lowres_core_##cpu( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,\
158                                         int src_stride, int dst_stride, int width, int height );
159 LOWRES(mmx2)
160 LOWRES(cache32_mmx2)
161 LOWRES(sse2)
162 LOWRES(ssse3)
163
164 #define PIXEL_AVG_W(width,cpu)\
165 void x264_pixel_avg2_w##width##_##cpu( pixel *, int, pixel *, int, pixel *, int );
166 /* This declares some functions that don't exist, but that isn't a problem. */
167 #define PIXEL_AVG_WALL(cpu)\
168 PIXEL_AVG_W(4,cpu); PIXEL_AVG_W(8,cpu); PIXEL_AVG_W(10,cpu); PIXEL_AVG_W(12,cpu); PIXEL_AVG_W(16,cpu); PIXEL_AVG_W(18,cpu); PIXEL_AVG_W(20,cpu);
169
170 PIXEL_AVG_WALL(mmx2)
171 PIXEL_AVG_WALL(cache32_mmx2)
172 PIXEL_AVG_WALL(cache64_mmx2)
173 PIXEL_AVG_WALL(cache64_sse2)
174 PIXEL_AVG_WALL(sse2)
175 PIXEL_AVG_WALL(sse2_misalign)
176 PIXEL_AVG_WALL(cache64_ssse3)
177
178 #define PIXEL_AVG_WTAB(instr, name1, name2, name3, name4, name5)\
179 static void (* const x264_pixel_avg_wtab_##instr[6])( pixel *, int, pixel *, int, pixel *, int ) =\
180 {\
181     NULL,\
182     x264_pixel_avg2_w4_##name1,\
183     x264_pixel_avg2_w8_##name2,\
184     x264_pixel_avg2_w12_##name3,\
185     x264_pixel_avg2_w16_##name4,\
186     x264_pixel_avg2_w20_##name5,\
187 };
188
189 #if HIGH_BIT_DEPTH
190 /* we can replace w12/w20 with w10/w18 as only 9/17 pixels in fact are important */
191 #define x264_pixel_avg2_w12_mmx2       x264_pixel_avg2_w10_mmx2
192 #define x264_pixel_avg2_w20_mmx2       x264_pixel_avg2_w18_mmx2
193 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w10_sse2
194 #define x264_pixel_avg2_w20_sse2         x264_pixel_avg2_w18_sse2
195 #else
196 /* w16 sse2 is faster than w12 mmx as long as the cacheline issue is resolved */
197 #define x264_pixel_avg2_w12_cache64_ssse3 x264_pixel_avg2_w16_cache64_ssse3
198 #define x264_pixel_avg2_w12_cache64_sse2 x264_pixel_avg2_w16_cache64_sse2
199 #define x264_pixel_avg2_w12_sse3         x264_pixel_avg2_w16_sse3
200 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w16_sse2
201 #endif // HIGH_BIT_DEPTH
202
203 PIXEL_AVG_WTAB(mmx2, mmx2, mmx2, mmx2, mmx2, mmx2)
204 #if HIGH_BIT_DEPTH
205 PIXEL_AVG_WTAB(sse2, mmx2, sse2, sse2, sse2, sse2)
206 #else // !HIGH_BIT_DEPTH
207 #if ARCH_X86
208 PIXEL_AVG_WTAB(cache32_mmx2, mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2)
209 PIXEL_AVG_WTAB(cache64_mmx2, mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2)
210 #endif
211 PIXEL_AVG_WTAB(sse2, mmx2, mmx2, sse2, sse2, sse2)
212 PIXEL_AVG_WTAB(sse2_misalign, mmx2, mmx2, sse2, sse2, sse2_misalign)
213 PIXEL_AVG_WTAB(cache64_sse2, mmx2, cache64_mmx2, cache64_sse2, cache64_sse2, cache64_sse2)
214 PIXEL_AVG_WTAB(cache64_ssse3, mmx2, cache64_mmx2, cache64_ssse3, cache64_ssse3, cache64_sse2)
215 #endif // HIGH_BIT_DEPTH
216
217 #define MC_COPY_WTAB(instr, name1, name2, name3)\
218 static void (* const x264_mc_copy_wtab_##instr[5])( pixel *, int, pixel *, int, int ) =\
219 {\
220     NULL,\
221     x264_mc_copy_w4_##name1,\
222     x264_mc_copy_w8_##name2,\
223     NULL,\
224     x264_mc_copy_w16_##name3,\
225 };
226
227 MC_COPY_WTAB(mmx,mmx,mmx,mmx)
228 MC_COPY_WTAB(sse2,mmx,mmx,sse2)
229
230 #define MC_WEIGHT_WTAB(function, instr, name1, name2, w12version)\
231     static void (* x264_mc_##function##_wtab_##instr[6])( pixel *, int, pixel *, int, const x264_weight_t *, int ) =\
232 {\
233     x264_mc_##function##_w4_##name1,\
234     x264_mc_##function##_w4_##name1,\
235     x264_mc_##function##_w8_##name2,\
236     x264_mc_##function##_w##w12version##_##instr,\
237     x264_mc_##function##_w16_##instr,\
238     x264_mc_##function##_w20_##instr,\
239 };
240
241 #if HIGH_BIT_DEPTH
242 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
243 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
244 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
245 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,12)
246 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,sse2,16)
247 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,sse2,16)
248
249 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
250 {
251     if( w->i_scale == 1<<w->i_denom )
252     {
253         if( w->i_offset < 0 )
254             w->weightfn = h->mc.offsetsub;
255         else
256             w->weightfn = h->mc.offsetadd;
257         for( int i = 0; i < 8; i++ )
258             w->cachea[i] = abs(w->i_offset<<(BIT_DEPTH-8));
259         return;
260     }
261     w->weightfn = h->mc.weight;
262     int den1 = 1<<w->i_denom;
263     int den2 = w->i_scale<<1;
264     int den3 = 1+(w->i_offset<<(BIT_DEPTH-8+1));
265     for( int i = 0; i < 8; i++ )
266     {
267         w->cachea[i] = den1;
268         w->cacheb[i] = i&1 ? den3 : den2;
269     }
270 }
271 #else
272 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
273 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
274 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
275 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,16)
276 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,mmx2,16)
277 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,mmx2,16)
278 MC_WEIGHT_WTAB(weight,ssse3,ssse3,ssse3,16)
279
280 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
281 {
282     int i;
283     int16_t den1;
284
285     if( w->i_scale == 1<<w->i_denom )
286     {
287         if( w->i_offset < 0 )
288             w->weightfn = h->mc.offsetsub;
289         else
290             w->weightfn = h->mc.offsetadd;
291         memset( w->cachea, abs(w->i_offset), sizeof(w->cachea) );
292         return;
293     }
294     w->weightfn = h->mc.weight;
295     den1 = 1 << (w->i_denom - 1) | w->i_offset << w->i_denom;
296     for( i = 0; i < 8; i++ )
297     {
298         w->cachea[i] = w->i_scale;
299         w->cacheb[i] = den1;
300     }
301 }
302
303 static void x264_weight_cache_ssse3( x264_t *h, x264_weight_t *w )
304 {
305     int i, den1;
306     if( w->i_scale == 1<<w->i_denom )
307     {
308         if( w->i_offset < 0 )
309             w->weightfn = h->mc.offsetsub;
310         else
311             w->weightfn = h->mc.offsetadd;
312
313         memset( w->cachea, abs( w->i_offset ), sizeof(w->cachea) );
314         return;
315     }
316     w->weightfn = h->mc.weight;
317     den1 = w->i_scale << (8 - w->i_denom);
318     for( i = 0; i < 8; i++ )
319     {
320         w->cachea[i] = den1;
321         w->cacheb[i] = w->i_offset;
322     }
323 }
324 #endif // !HIGH_BIT_DEPTH
325
326 static const uint8_t hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
327 static const uint8_t hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
328
329 #define MC_LUMA(name,instr1,instr2)\
330 static void mc_luma_##name( pixel *dst,    int i_dst_stride,\
331                   pixel *src[4], int i_src_stride,\
332                   int mvx, int mvy,\
333                   int i_width, int i_height, const x264_weight_t *weight )\
334 {\
335     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
336     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
337     pixel *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
338     if( qpel_idx & 5 ) /* qpel interpolation needed */\
339     {\
340         pixel *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
341         x264_pixel_avg_wtab_##instr1[i_width>>2](\
342                 dst, i_dst_stride, src1, i_src_stride,\
343                 src2, i_height );\
344         if( weight->weightfn )\
345             weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );\
346     }\
347     else if( weight->weightfn )\
348         weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );\
349     else\
350         x264_mc_copy_wtab_##instr2[i_width>>2](dst, i_dst_stride, src1, i_src_stride, i_height );\
351 }
352
353 MC_LUMA(mmx2,mmx2,mmx)
354 MC_LUMA(sse2,sse2,sse2)
355 #if !HIGH_BIT_DEPTH
356 #if ARCH_X86
357 MC_LUMA(cache32_mmx2,cache32_mmx2,mmx)
358 MC_LUMA(cache64_mmx2,cache64_mmx2,mmx)
359 #endif
360 MC_LUMA(cache64_sse2,cache64_sse2,sse2)
361 MC_LUMA(cache64_ssse3,cache64_ssse3,sse2)
362 #endif // !HIGH_BIT_DEPTH
363
364 #define GET_REF(name)\
365 static pixel *get_ref_##name( pixel *dst,   int *i_dst_stride,\
366                          pixel *src[4], int i_src_stride,\
367                          int mvx, int mvy,\
368                          int i_width, int i_height, const x264_weight_t *weight )\
369 {\
370     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
371     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
372     pixel *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
373     if( qpel_idx & 5 ) /* qpel interpolation needed */\
374     {\
375         pixel *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
376         x264_pixel_avg_wtab_##name[i_width>>2](\
377                 dst, *i_dst_stride, src1, i_src_stride,\
378                 src2, i_height );\
379         if( weight->weightfn )\
380             weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );\
381         return dst;\
382     }\
383     else if( weight->weightfn )\
384     {\
385         weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );\
386         return dst;\
387     }\
388     else\
389     {\
390         *i_dst_stride = i_src_stride;\
391         return src1;\
392     }\
393 }
394
395 GET_REF(mmx2)
396 GET_REF(sse2)
397 #if !HIGH_BIT_DEPTH
398 #if ARCH_X86
399 GET_REF(cache32_mmx2)
400 GET_REF(cache64_mmx2)
401 #endif
402 GET_REF(sse2_misalign)
403 GET_REF(cache64_sse2)
404 GET_REF(cache64_ssse3)
405 #endif // !HIGH_BIT_DEPTH
406
407 #define HPEL(align, cpu, cpuv, cpuc, cpuh)\
408 void x264_hpel_filter_v_##cpuv( pixel *dst, pixel *src, int16_t *buf, int stride, int width);\
409 void x264_hpel_filter_c_##cpuc( pixel *dst, int16_t *buf, int width );\
410 void x264_hpel_filter_h_##cpuh( pixel *dst, pixel *src, int width );\
411 static void x264_hpel_filter_##cpu( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,\
412                              int stride, int width, int height, int16_t *buf )\
413 {\
414     int realign = (intptr_t)src & (align-1);\
415     src -= realign;\
416     dstv -= realign;\
417     dstc -= realign;\
418     dsth -= realign;\
419     width += realign;\
420     while( height-- )\
421     {\
422         x264_hpel_filter_v_##cpuv( dstv, src, buf+8, stride, width );\
423         x264_hpel_filter_c_##cpuc( dstc, buf+8, width );\
424         x264_hpel_filter_h_##cpuh( dsth, src, width );\
425         dsth += stride;\
426         dstv += stride;\
427         dstc += stride;\
428         src  += stride;\
429     }\
430     x264_sfence();\
431 }
432
433 HPEL(8, mmx2, mmx2, mmx2, mmx2)
434 #if HIGH_BIT_DEPTH
435 HPEL(16, sse2, sse2, sse2, sse2)
436 #else // !HIGH_BIT_DEPTH
437 HPEL(16, sse2_amd, mmx2, mmx2, sse2)
438 #if ARCH_X86_64
439 void x264_hpel_filter_sse2( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, int stride, int width, int height, int16_t *buf );
440 void x264_hpel_filter_ssse3( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, int stride, int width, int height, int16_t *buf );
441 void x264_hpel_filter_avx( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, int stride, int width, int height, int16_t *buf );
442 #else
443 HPEL(16, sse2, sse2, sse2, sse2)
444 HPEL(16, ssse3, ssse3, ssse3, ssse3)
445 HPEL(16, avx, avx, avx, ssse3)
446 #endif
447 HPEL(16, sse2_misalign, sse2, sse2_misalign, sse2)
448 #endif // HIGH_BIT_DEPTH
449
450 static void x264_plane_copy_mmx2( pixel *dst, int i_dst, pixel *src, int i_src, int w, int h )
451 {
452     int c_w = 16/sizeof(pixel) - 1;
453     if( w < 256 ) { // tiny resolutions don't want non-temporal hints. dunno the exact threshold.
454         x264_plane_copy_c( dst, i_dst, src, i_src, w, h );
455     } else if( !(w&c_w) ) {
456         x264_plane_copy_core_mmx2( dst, i_dst, src, i_src, w, h );
457     } else if( i_src > 0 ) {
458         // have to use plain memcpy on the last line (in memory order) to avoid overreading src
459         x264_plane_copy_core_mmx2( dst, i_dst, src, i_src, (w+c_w)&~c_w, h-1 );
460         memcpy( dst+i_dst*(h-1), src+i_src*(h-1), w*sizeof(pixel) );
461     } else {
462         memcpy( dst, src, w*sizeof(pixel) );
463         x264_plane_copy_core_mmx2( dst+i_dst, i_dst, src+i_src, i_src, (w+c_w)&~c_w, h-1 );
464     }
465 }
466
467 #define PLANE_INTERLEAVE(cpu) \
468 static void x264_plane_copy_interleave_##cpu( pixel *dst, int i_dst,\
469                                               pixel *srcu, int i_srcu,\
470                                               pixel *srcv, int i_srcv, int w, int h )\
471 {\
472     if( !(w&15) ) {\
473         x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
474     } else if( w < 16 || (i_srcu ^ i_srcv) ) {\
475         x264_plane_copy_interleave_c( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
476     } else if( i_srcu > 0 ) {\
477         x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, (w+15)&~15, h-1 );\
478         x264_plane_copy_interleave_c( dst+i_dst*(h-1), 0, srcu+i_srcu*(h-1), 0, srcv+i_srcv*(h-1), 0, w, 1 );\
479     } else {\
480         x264_plane_copy_interleave_c( dst, 0, srcu, 0, srcv, 0, w, 1 );\
481         x264_plane_copy_interleave_core_##cpu( dst+i_dst, i_dst, srcu+i_srcu, i_srcu, srcv+i_srcv, i_srcv, (w+15)&~15, h-1 );\
482     }\
483 }
484
485 PLANE_INTERLEAVE(mmx2)
486 PLANE_INTERLEAVE(sse2)
487 #if HIGH_BIT_DEPTH
488 PLANE_INTERLEAVE(avx)
489 #endif
490
491 void x264_mc_init_mmx( int cpu, x264_mc_functions_t *pf )
492 {
493     if( !(cpu&X264_CPU_MMX) )
494         return;
495
496     pf->load_deinterleave_8x8x2_fenc = x264_load_deinterleave_8x8x2_fenc_mmx;
497     pf->load_deinterleave_8x8x2_fdec = x264_load_deinterleave_8x8x2_fdec_mmx;
498
499     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_mmx;
500
501     pf->copy_16x16_unaligned = x264_mc_copy_w16_mmx;
502     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_mmx;
503     pf->copy[PIXEL_8x8]   = x264_mc_copy_w8_mmx;
504     pf->copy[PIXEL_4x4]   = x264_mc_copy_w4_mmx;
505     pf->memcpy_aligned  = x264_memcpy_aligned_mmx;
506     pf->memzero_aligned = x264_memzero_aligned_mmx;
507     pf->integral_init4v = x264_integral_init4v_mmx;
508     pf->integral_init8v = x264_integral_init8v_mmx;
509
510     if( !(cpu&X264_CPU_MMX2) )
511         return;
512
513     pf->plane_copy = x264_plane_copy_mmx2;
514     pf->plane_copy_interleave = x264_plane_copy_interleave_mmx2;
515     pf->store_interleave_8x8x2 = x264_store_interleave_8x8x2_mmx2;
516
517     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_mmx2;
518     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_mmx2;
519     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_mmx2;
520     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_mmx2;
521     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_mmx2;
522     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_mmx2;
523     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_mmx2;
524     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_mmx2;
525
526     pf->mc_luma = mc_luma_mmx2;
527     pf->get_ref = get_ref_mmx2;
528     pf->mc_chroma = x264_mc_chroma_mmx2;
529     pf->hpel_filter = x264_hpel_filter_mmx2;
530     pf->weight = x264_mc_weight_wtab_mmx2;
531     pf->weight_cache = x264_weight_cache_mmx2;
532     pf->offsetadd = x264_mc_offsetadd_wtab_mmx2;
533     pf->offsetsub = x264_mc_offsetsub_wtab_mmx2;
534
535     pf->frame_init_lowres_core = x264_frame_init_lowres_core_mmx2;
536
537 #if HIGH_BIT_DEPTH
538 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
539     if( cpu&(X264_CPU_CACHELINE_32|X264_CPU_CACHELINE_64) )
540         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
541 #endif
542
543     if( !(cpu&X264_CPU_SSE2) )
544         return;
545
546     pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
547
548     pf->load_deinterleave_8x8x2_fenc = x264_load_deinterleave_8x8x2_fenc_sse2;
549     pf->load_deinterleave_8x8x2_fdec = x264_load_deinterleave_8x8x2_fdec_sse2;
550
551     pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
552     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
553
554     if( cpu&X264_CPU_SSE2_IS_FAST )
555     {
556         pf->get_ref = get_ref_sse2;
557         pf->mc_luma = mc_luma_sse2;
558         pf->hpel_filter = x264_hpel_filter_sse2;
559     }
560
561     pf->memcpy_aligned  = x264_memcpy_aligned_sse2;
562     pf->memzero_aligned = x264_memzero_aligned_sse2;
563     pf->integral_init4v = x264_integral_init4v_sse2;
564     pf->integral_init8v = x264_integral_init8v_sse2;
565     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
566     pf->store_interleave_8x8x2 = x264_store_interleave_8x8x2_sse2;
567     pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
568     pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
569
570     if( cpu&X264_CPU_SSE2_IS_SLOW )
571         return;
572
573     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
574     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
575     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_sse2;
576     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_sse2;
577     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_sse2;
578     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_sse2;
579     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_sse2;
580     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_sse2;
581
582     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse2;
583     pf->weight = x264_mc_weight_wtab_sse2;
584
585     if( !(cpu&X264_CPU_STACK_MOD4) )
586         pf->mc_chroma = x264_mc_chroma_sse2;
587
588     if( !(cpu&X264_CPU_SSSE3) )
589         return;
590
591     pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
592
593     if( (cpu&X264_CPU_SHUFFLE_IS_FAST) && !(cpu&X264_CPU_SLOW_ATOM) )
594         pf->integral_init4v = x264_integral_init4v_ssse3;
595
596     if( !(cpu&X264_CPU_AVX) )
597         return;
598
599     pf->load_deinterleave_8x8x2_fenc = x264_load_deinterleave_8x8x2_fenc_avx;
600     pf->load_deinterleave_8x8x2_fdec = x264_load_deinterleave_8x8x2_fdec_avx;
601     pf->plane_copy_interleave        = x264_plane_copy_interleave_avx;
602     pf->plane_copy_deinterleave      = x264_plane_copy_deinterleave_avx;
603     pf->store_interleave_8x8x2       = x264_store_interleave_8x8x2_avx;
604
605     if( !(cpu&X264_CPU_STACK_MOD4) )
606         pf->mc_chroma = x264_mc_chroma_avx;
607 #else // !HIGH_BIT_DEPTH
608     pf->prefetch_fenc = x264_prefetch_fenc_mmx2;
609     pf->prefetch_ref  = x264_prefetch_ref_mmx2;
610
611 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
612     if( cpu&X264_CPU_CACHELINE_32 )
613     {
614         pf->mc_luma = mc_luma_cache32_mmx2;
615         pf->get_ref = get_ref_cache32_mmx2;
616         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
617     }
618     else if( cpu&X264_CPU_CACHELINE_64 )
619     {
620         pf->mc_luma = mc_luma_cache64_mmx2;
621         pf->get_ref = get_ref_cache64_mmx2;
622         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
623     }
624 #endif
625
626     if( !(cpu&X264_CPU_SSE2) )
627         return;
628
629     pf->memcpy_aligned = x264_memcpy_aligned_sse2;
630     pf->memzero_aligned = x264_memzero_aligned_sse2;
631     pf->integral_init4v = x264_integral_init4v_sse2;
632     pf->integral_init8v = x264_integral_init8v_sse2;
633     pf->hpel_filter = x264_hpel_filter_sse2_amd;
634     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
635
636     if( cpu&X264_CPU_SSE2_IS_SLOW )
637         return;
638
639     pf->weight = x264_mc_weight_wtab_sse2;
640     if( !(cpu&X264_CPU_SLOW_ATOM) )
641     {
642         pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
643         pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
644     }
645
646     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse2;
647     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
648     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
649     pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_sse2;
650     pf->avg[PIXEL_8x8]  = x264_pixel_avg_8x8_sse2;
651     pf->avg[PIXEL_8x4]  = x264_pixel_avg_8x4_sse2;
652     pf->hpel_filter = x264_hpel_filter_sse2;
653     if( cpu&X264_CPU_SSE_MISALIGN )
654         pf->hpel_filter = x264_hpel_filter_sse2_misalign;
655     pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
656     if( !(cpu&X264_CPU_STACK_MOD4) )
657         pf->mc_chroma = x264_mc_chroma_sse2;
658
659     if( cpu&X264_CPU_SSE2_IS_FAST )
660     {
661         pf->store_interleave_8x8x2  = x264_store_interleave_8x8x2_sse2; // FIXME sse2fast? sse2medium?
662         pf->load_deinterleave_8x8x2_fenc = x264_load_deinterleave_8x8x2_fenc_sse2;
663         pf->load_deinterleave_8x8x2_fdec = x264_load_deinterleave_8x8x2_fdec_sse2;
664         pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
665         pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
666         pf->mc_luma = mc_luma_sse2;
667         pf->get_ref = get_ref_sse2;
668         if( cpu&X264_CPU_CACHELINE_64 )
669         {
670             pf->mc_luma = mc_luma_cache64_sse2;
671             pf->get_ref = get_ref_cache64_sse2;
672         }
673         if( cpu&X264_CPU_SSE_MISALIGN )
674         {
675             pf->get_ref = get_ref_sse2_misalign;
676             if( !(cpu&X264_CPU_STACK_MOD4) )
677                 pf->mc_chroma = x264_mc_chroma_sse2_misalign;
678         }
679     }
680
681     if( !(cpu&X264_CPU_SSSE3) )
682         return;
683
684     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_ssse3;
685     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_ssse3;
686     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_ssse3;
687     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_ssse3;
688     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_ssse3;
689     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_ssse3;
690     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_ssse3;
691     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_ssse3;
692
693     pf->load_deinterleave_8x8x2_fenc = x264_load_deinterleave_8x8x2_fenc_ssse3;
694     pf->load_deinterleave_8x8x2_fdec = x264_load_deinterleave_8x8x2_fdec_ssse3;
695     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_ssse3;
696
697     pf->hpel_filter = x264_hpel_filter_ssse3;
698     pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
699     if( !(cpu&X264_CPU_STACK_MOD4) )
700         pf->mc_chroma = x264_mc_chroma_ssse3;
701
702     if( cpu&X264_CPU_CACHELINE_64 )
703     {
704         if( !(cpu&X264_CPU_STACK_MOD4) )
705             pf->mc_chroma = x264_mc_chroma_ssse3_cache64;
706         pf->mc_luma = mc_luma_cache64_ssse3;
707         pf->get_ref = get_ref_cache64_ssse3;
708
709         /* ssse3 weight is slower on Nehalem, so only assign here. */
710         pf->weight_cache = x264_weight_cache_ssse3;
711         pf->weight = x264_mc_weight_wtab_ssse3;
712     }
713
714     if( (cpu&X264_CPU_SHUFFLE_IS_FAST) && !(cpu&X264_CPU_SLOW_ATOM) )
715         pf->integral_init4v = x264_integral_init4v_ssse3;
716
717     if( !(cpu&X264_CPU_SSE4) )
718         return;
719
720     pf->integral_init4h = x264_integral_init4h_sse4;
721     pf->integral_init8h = x264_integral_init8h_sse4;
722
723     if( !(cpu&X264_CPU_AVX) )
724         return;
725
726     pf->integral_init8h = x264_integral_init8h_avx;
727     pf->hpel_filter = x264_hpel_filter_avx;
728     if( !(cpu&X264_CPU_STACK_MOD4) )
729         pf->mc_chroma = x264_mc_chroma_avx;
730 #endif // HIGH_BIT_DEPTH
731
732     if( !(cpu&X264_CPU_AVX) )
733         return;
734     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx;
735 }