MBAFF: Enable adaptive MBAFF with VSAD decision
[x262.git] / common / pixel.c
1 /*****************************************************************************
2  * pixel.c: pixel metrics
3  *****************************************************************************
4  * Copyright (C) 2003-2011 x264 project
5  *
6  * Authors: Loren Merritt <lorenm@u.washington.edu>
7  *          Laurent Aimar <fenrir@via.ecp.fr>
8  *          Jason Garrett-Glaser <darkshikari@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include "common.h"
29
30 #if HAVE_MMX
31 #   include "x86/pixel.h"
32 #endif
33 #if ARCH_PPC
34 #   include "ppc/pixel.h"
35 #endif
36 #if ARCH_ARM
37 #   include "arm/pixel.h"
38 #endif
39 #if ARCH_UltraSPARC
40 #   include "sparc/pixel.h"
41 #endif
42
43
44 /****************************************************************************
45  * pixel_sad_WxH
46  ****************************************************************************/
47 #define PIXEL_SAD_C( name, lx, ly ) \
48 static int name( pixel *pix1, int i_stride_pix1,  \
49                  pixel *pix2, int i_stride_pix2 ) \
50 {                                                   \
51     int i_sum = 0;                                  \
52     for( int y = 0; y < ly; y++ )                   \
53     {                                               \
54         for( int x = 0; x < lx; x++ )               \
55         {                                           \
56             i_sum += abs( pix1[x] - pix2[x] );      \
57         }                                           \
58         pix1 += i_stride_pix1;                      \
59         pix2 += i_stride_pix2;                      \
60     }                                               \
61     return i_sum;                                   \
62 }
63
64
65 PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
66 PIXEL_SAD_C( x264_pixel_sad_16x8,  16,  8 )
67 PIXEL_SAD_C( x264_pixel_sad_8x16,   8, 16 )
68 PIXEL_SAD_C( x264_pixel_sad_8x8,    8,  8 )
69 PIXEL_SAD_C( x264_pixel_sad_8x4,    8,  4 )
70 PIXEL_SAD_C( x264_pixel_sad_4x8,    4,  8 )
71 PIXEL_SAD_C( x264_pixel_sad_4x4,    4,  4 )
72
73
74 /****************************************************************************
75  * pixel_ssd_WxH
76  ****************************************************************************/
77 #define PIXEL_SSD_C( name, lx, ly ) \
78 static int name( pixel *pix1, int i_stride_pix1,  \
79                  pixel *pix2, int i_stride_pix2 ) \
80 {                                                   \
81     int i_sum = 0;                                  \
82     for( int y = 0; y < ly; y++ )                   \
83     {                                               \
84         for( int x = 0; x < lx; x++ )               \
85         {                                           \
86             int d = pix1[x] - pix2[x];              \
87             i_sum += d*d;                           \
88         }                                           \
89         pix1 += i_stride_pix1;                      \
90         pix2 += i_stride_pix2;                      \
91     }                                               \
92     return i_sum;                                   \
93 }
94
95 PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
96 PIXEL_SSD_C( x264_pixel_ssd_16x8,  16,  8 )
97 PIXEL_SSD_C( x264_pixel_ssd_8x16,   8, 16 )
98 PIXEL_SSD_C( x264_pixel_ssd_8x8,    8,  8 )
99 PIXEL_SSD_C( x264_pixel_ssd_8x4,    8,  4 )
100 PIXEL_SSD_C( x264_pixel_ssd_4x8,    4,  8 )
101 PIXEL_SSD_C( x264_pixel_ssd_4x4,    4,  4 )
102
103 uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height )
104 {
105     uint64_t i_ssd = 0;
106     int y;
107     int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
108
109 #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
110                                           pix2 + y*i_pix2 + x, i_pix2 );
111     for( y = 0; y < i_height-15; y += 16 )
112     {
113         int x = 0;
114         if( align )
115             for( ; x < i_width-15; x += 16 )
116                 SSD(PIXEL_16x16);
117         for( ; x < i_width-7; x += 8 )
118             SSD(PIXEL_8x16);
119     }
120     if( y < i_height-7 )
121         for( int x = 0; x < i_width-7; x += 8 )
122             SSD(PIXEL_8x8);
123 #undef SSD
124
125 #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
126     if( i_width & 7 )
127     {
128         for( y = 0; y < (i_height & ~7); y++ )
129             for( int x = i_width & ~7; x < i_width; x++ )
130                 SSD1;
131     }
132     if( i_height & 7 )
133     {
134         for( y = i_height & ~7; y < i_height; y++ )
135             for( int x = 0; x < i_width; x++ )
136                 SSD1;
137     }
138 #undef SSD1
139
140     return i_ssd;
141 }
142
143 static void pixel_ssd_nv12_core( pixel *pixuv1, int stride1, pixel *pixuv2, int stride2, int width, int height, uint64_t *ssd_u, uint64_t *ssd_v )
144 {
145     *ssd_u = 0, *ssd_v = 0;
146     for( int y = 0; y < height; y++, pixuv1+=stride1, pixuv2+=stride2 )
147         for( int x = 0; x < width; x++ )
148         {
149             int du = pixuv1[2*x]   - pixuv2[2*x];
150             int dv = pixuv1[2*x+1] - pixuv2[2*x+1];
151             *ssd_u += du*du;
152             *ssd_v += dv*dv;
153         }
154 }
155
156 void x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height, uint64_t *ssd_u, uint64_t *ssd_v )
157 {
158     pf->ssd_nv12_core( pix1, i_pix1, pix2, i_pix2, i_width&~7, i_height, ssd_u, ssd_v );
159     if( i_width&7 )
160     {
161         uint64_t tmp[2];
162         pixel_ssd_nv12_core( pix1+(i_width&~7), i_pix1, pix2+(i_width&~7), i_pix2, i_width&7, i_height, &tmp[0], &tmp[1] );
163         *ssd_u += tmp[0];
164         *ssd_v += tmp[1];
165     }
166 }
167
168 /****************************************************************************
169  * pixel_var_wxh
170  ****************************************************************************/
171 #define PIXEL_VAR_C( name, w ) \
172 static uint64_t name( pixel *pix, int i_stride ) \
173 {                                             \
174     uint32_t sum = 0, sqr = 0;                \
175     for( int y = 0; y < w; y++ )              \
176     {                                         \
177         for( int x = 0; x < w; x++ )          \
178         {                                     \
179             sum += pix[x];                    \
180             sqr += pix[x] * pix[x];           \
181         }                                     \
182         pix += i_stride;                      \
183     }                                         \
184     return sum + ((uint64_t)sqr << 32);       \
185 }
186
187 PIXEL_VAR_C( x264_pixel_var_16x16, 16 )
188 PIXEL_VAR_C( x264_pixel_var_8x8,    8 )
189
190 /****************************************************************************
191  * pixel_var2_wxh
192  ****************************************************************************/
193 static int pixel_var2_8x8( pixel *pix1, int i_stride1, pixel *pix2, int i_stride2, int *ssd )
194 {
195     uint32_t var = 0, sum = 0, sqr = 0;
196     for( int y = 0; y < 8; y++ )
197     {
198         for( int x = 0; x < 8; x++ )
199         {
200             int diff = pix1[x] - pix2[x];
201             sum += diff;
202             sqr += diff * diff;
203         }
204         pix1 += i_stride1;
205         pix2 += i_stride2;
206     }
207     sum = abs(sum);
208     var = sqr - ((uint64_t)sum * sum >> 6);
209     *ssd = sqr;
210     return var;
211 }
212
213 #if BIT_DEPTH > 8
214     typedef uint32_t sum_t;
215     typedef uint64_t sum2_t;
216 #else
217     typedef uint16_t sum_t;
218     typedef uint32_t sum2_t;
219 #endif
220 #define BITS_PER_SUM (8 * sizeof(sum_t))
221
222 #define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
223     sum2_t t0 = s0 + s1;\
224     sum2_t t1 = s0 - s1;\
225     sum2_t t2 = s2 + s3;\
226     sum2_t t3 = s2 - s3;\
227     d0 = t0 + t2;\
228     d2 = t0 - t2;\
229     d1 = t1 + t3;\
230     d3 = t1 - t3;\
231 }
232
233 // in: a pseudo-simd number of the form x+(y<<16)
234 // return: abs(x)+(abs(y)<<16)
235 static ALWAYS_INLINE sum2_t abs2( sum2_t a )
236 {
237     sum2_t s = ((a>>(BITS_PER_SUM-1))&(((sum2_t)1<<BITS_PER_SUM)+1))*((sum_t)-1);
238     return (a+s)^s;
239 }
240
241 /****************************************************************************
242  * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
243  ****************************************************************************/
244
245 static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
246 {
247     sum2_t tmp[4][2];
248     sum2_t a0, a1, a2, a3, b0, b1;
249     sum2_t sum = 0;
250     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
251     {
252         a0 = pix1[0] - pix2[0];
253         a1 = pix1[1] - pix2[1];
254         b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
255         a2 = pix1[2] - pix2[2];
256         a3 = pix1[3] - pix2[3];
257         b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
258         tmp[i][0] = b0 + b1;
259         tmp[i][1] = b0 - b1;
260     }
261     for( int i = 0; i < 2; i++ )
262     {
263         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
264         a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
265         sum += ((sum_t)a0) + (a0>>BITS_PER_SUM);
266     }
267     return sum >> 1;
268 }
269
270 static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
271 {
272     sum2_t tmp[4][4];
273     sum2_t a0, a1, a2, a3;
274     sum2_t sum = 0;
275     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
276     {
277         a0 = (pix1[0] - pix2[0]) + ((sum2_t)(pix1[4] - pix2[4]) << BITS_PER_SUM);
278         a1 = (pix1[1] - pix2[1]) + ((sum2_t)(pix1[5] - pix2[5]) << BITS_PER_SUM);
279         a2 = (pix1[2] - pix2[2]) + ((sum2_t)(pix1[6] - pix2[6]) << BITS_PER_SUM);
280         a3 = (pix1[3] - pix2[3]) + ((sum2_t)(pix1[7] - pix2[7]) << BITS_PER_SUM);
281         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
282     }
283     for( int i = 0; i < 4; i++ )
284     {
285         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
286         sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
287     }
288     return (((sum_t)sum) + (sum>>BITS_PER_SUM)) >> 1;
289 }
290
291 #define PIXEL_SATD_C( w, h, sub )\
292 static int x264_pixel_satd_##w##x##h( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )\
293 {\
294     int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
295             + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
296     if( w==16 )\
297         sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
298             + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
299     if( h==16 )\
300         sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
301             + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
302     if( w==16 && h==16 )\
303         sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
304             + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
305     return sum;\
306 }
307 PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
308 PIXEL_SATD_C( 16, 8,  x264_pixel_satd_8x4 )
309 PIXEL_SATD_C( 8,  16, x264_pixel_satd_8x4 )
310 PIXEL_SATD_C( 8,  8,  x264_pixel_satd_8x4 )
311 PIXEL_SATD_C( 4,  8,  x264_pixel_satd_4x4 )
312
313
314 static NOINLINE int sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
315 {
316     sum2_t tmp[8][4];
317     sum2_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
318     sum2_t sum = 0;
319     for( int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2 )
320     {
321         a0 = pix1[0] - pix2[0];
322         a1 = pix1[1] - pix2[1];
323         b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
324         a2 = pix1[2] - pix2[2];
325         a3 = pix1[3] - pix2[3];
326         b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
327         a4 = pix1[4] - pix2[4];
328         a5 = pix1[5] - pix2[5];
329         b2 = (a4+a5) + ((a4-a5)<<BITS_PER_SUM);
330         a6 = pix1[6] - pix2[6];
331         a7 = pix1[7] - pix2[7];
332         b3 = (a6+a7) + ((a6-a7)<<BITS_PER_SUM);
333         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
334     }
335     for( int i = 0; i < 4; i++ )
336     {
337         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
338         HADAMARD4( a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
339         b0  = abs2(a0+a4) + abs2(a0-a4);
340         b0 += abs2(a1+a5) + abs2(a1-a5);
341         b0 += abs2(a2+a6) + abs2(a2-a6);
342         b0 += abs2(a3+a7) + abs2(a3-a7);
343         sum += (sum_t)b0 + (b0>>BITS_PER_SUM);
344     }
345     return sum;
346 }
347
348 static int x264_pixel_sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
349 {
350     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
351     return (sum+2)>>2;
352 }
353
354 static int x264_pixel_sa8d_16x16( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
355 {
356     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
357             + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
358             + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
359             + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
360     return (sum+2)>>2;
361 }
362
363
364 static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, int stride )
365 {
366     sum2_t tmp[32];
367     sum2_t a0, a1, a2, a3, dc;
368     sum2_t sum4 = 0, sum8 = 0;
369     for( int i = 0; i < 8; i++, pix+=stride )
370     {
371         sum2_t *t = tmp + (i&3) + (i&4)*4;
372         a0 = (pix[0]+pix[1]) + ((sum2_t)(pix[0]-pix[1])<<BITS_PER_SUM);
373         a1 = (pix[2]+pix[3]) + ((sum2_t)(pix[2]-pix[3])<<BITS_PER_SUM);
374         t[0] = a0 + a1;
375         t[4] = a0 - a1;
376         a2 = (pix[4]+pix[5]) + ((sum2_t)(pix[4]-pix[5])<<BITS_PER_SUM);
377         a3 = (pix[6]+pix[7]) + ((sum2_t)(pix[6]-pix[7])<<BITS_PER_SUM);
378         t[8] = a2 + a3;
379         t[12] = a2 - a3;
380     }
381     for( int i = 0; i < 8; i++ )
382     {
383         HADAMARD4( a0, a1, a2, a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
384         tmp[i*4+0] = a0;
385         tmp[i*4+1] = a1;
386         tmp[i*4+2] = a2;
387         tmp[i*4+3] = a3;
388         sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
389     }
390     for( int i = 0; i < 8; i++ )
391     {
392         HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
393         sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
394     }
395     dc = (sum_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
396     sum4 = (sum_t)sum4 + (sum4>>BITS_PER_SUM) - dc;
397     sum8 = (sum_t)sum8 + (sum8>>BITS_PER_SUM) - dc;
398     return ((uint64_t)sum8<<32) + sum4;
399 }
400
401 #define HADAMARD_AC(w,h) \
402 static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, int stride )\
403 {\
404     uint64_t sum = pixel_hadamard_ac( pix, stride );\
405     if( w==16 )\
406         sum += pixel_hadamard_ac( pix+8, stride );\
407     if( h==16 )\
408         sum += pixel_hadamard_ac( pix+8*stride, stride );\
409     if( w==16 && h==16 )\
410         sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
411     return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
412 }
413 HADAMARD_AC( 16, 16 )
414 HADAMARD_AC( 16, 8 )
415 HADAMARD_AC( 8, 16 )
416 HADAMARD_AC( 8, 8 )
417
418
419 /****************************************************************************
420  * pixel_sad_x4
421  ****************************************************************************/
422 #define SAD_X( size ) \
423 static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
424 {\
425     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
426     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
427     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
428 }\
429 static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
430 {\
431     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
432     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
433     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
434     scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
435 }
436
437 SAD_X( 16x16 )
438 SAD_X( 16x8 )
439 SAD_X( 8x16 )
440 SAD_X( 8x8 )
441 SAD_X( 8x4 )
442 SAD_X( 4x8 )
443 SAD_X( 4x4 )
444
445 #if !HIGH_BIT_DEPTH
446 #if ARCH_UltraSPARC
447 SAD_X( 16x16_vis )
448 SAD_X( 16x8_vis )
449 SAD_X( 8x16_vis )
450 SAD_X( 8x8_vis )
451 #endif
452 #endif // !HIGH_BIT_DEPTH
453
454 /****************************************************************************
455  * pixel_satd_x4
456  * no faster than single satd, but needed for satd to be a drop-in replacement for sad
457  ****************************************************************************/
458
459 #define SATD_X( size, cpu ) \
460 static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
461 {\
462     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
463     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
464     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
465 }\
466 static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
467 {\
468     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
469     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
470     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
471     scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
472 }
473 #define SATD_X_DECL6( cpu )\
474 SATD_X( 16x16, cpu )\
475 SATD_X( 16x8, cpu )\
476 SATD_X( 8x16, cpu )\
477 SATD_X( 8x8, cpu )\
478 SATD_X( 8x4, cpu )\
479 SATD_X( 4x8, cpu )
480 #define SATD_X_DECL7( cpu )\
481 SATD_X_DECL6( cpu )\
482 SATD_X( 4x4, cpu )
483
484 SATD_X_DECL7()
485 #if HAVE_MMX
486 SATD_X_DECL7( _mmxext )
487 #if !HIGH_BIT_DEPTH
488 SATD_X_DECL6( _sse2 )
489 SATD_X_DECL7( _ssse3 )
490 SATD_X_DECL7( _sse4 )
491 SATD_X_DECL7( _avx )
492 #endif // !HIGH_BIT_DEPTH
493 #endif
494
495 #if !HIGH_BIT_DEPTH
496 #if HAVE_ARMV6
497 SATD_X_DECL7( _neon )
498 #endif
499 #endif // !HIGH_BIT_DEPTH
500
501 #define INTRA_MBCMP_8x8( mbcmp, cpu )\
502 void x264_intra_##mbcmp##_x3_8x8##cpu( pixel *fenc, pixel edge[33], int res[3] )\
503 {\
504     ALIGNED_ARRAY_16( pixel, pix, [8*FDEC_STRIDE] );\
505     x264_predict_8x8_v_c( pix, edge );\
506     res[0] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
507     x264_predict_8x8_h_c( pix, edge );\
508     res[1] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
509     x264_predict_8x8_dc_c( pix, edge );\
510     res[2] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
511 }
512
513 INTRA_MBCMP_8x8( sad, )
514 INTRA_MBCMP_8x8(sa8d, )
515 #if HIGH_BIT_DEPTH && HAVE_MMX
516 INTRA_MBCMP_8x8( sad, _mmxext)
517 INTRA_MBCMP_8x8( sad, _sse2  )
518 INTRA_MBCMP_8x8( sad, _ssse3 )
519 INTRA_MBCMP_8x8(sa8d, _sse2  )
520 #endif
521
522 #define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma, cpu )\
523 void x264_intra_##mbcmp##_x3_##size##x##size##chroma##cpu( pixel *fenc, pixel *fdec, int res[3] )\
524 {\
525     x264_predict_##size##x##size##chroma##_##pred1##_c( fdec );\
526     res[0] = x264_pixel_##mbcmp##_##size##x##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
527     x264_predict_##size##x##size##chroma##_##pred2##_c( fdec );\
528     res[1] = x264_pixel_##mbcmp##_##size##x##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
529     x264_predict_##size##x##size##chroma##_##pred3##_c( fdec );\
530     res[2] = x264_pixel_##mbcmp##_##size##x##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
531 }
532
533 INTRA_MBCMP( sad,  4,  v, h, dc,  , )
534 INTRA_MBCMP(satd,  4,  v, h, dc,  , )
535 INTRA_MBCMP( sad,  8, dc, h,  v, c, )
536 INTRA_MBCMP(satd,  8, dc, h,  v, c, )
537 INTRA_MBCMP( sad, 16,  v, h, dc,  , )
538 INTRA_MBCMP(satd, 16,  v, h, dc,  , )
539
540 #if HIGH_BIT_DEPTH && HAVE_MMX
541 INTRA_MBCMP( sad,  4,  v, h, dc,  , _mmxext)
542 INTRA_MBCMP(satd,  4,  v, h, dc,  , _mmxext)
543 INTRA_MBCMP( sad,  8, dc, h,  v, c, _mmxext)
544 INTRA_MBCMP(satd,  8, dc, h,  v, c, _mmxext)
545 INTRA_MBCMP( sad, 16,  v, h, dc,  , _mmxext)
546 INTRA_MBCMP(satd, 16,  v, h, dc,  , _mmxext)
547 INTRA_MBCMP( sad,  8, dc, h,  v, c, _sse2  )
548 INTRA_MBCMP( sad, 16,  v, h, dc,  , _sse2  )
549 INTRA_MBCMP( sad,  8, dc, h,  v, c, _ssse3 )
550 INTRA_MBCMP( sad, 16,  v, h, dc,  , _ssse3 )
551 #endif
552
553 /****************************************************************************
554  * structural similarity metric
555  ****************************************************************************/
556 static void ssim_4x4x2_core( const pixel *pix1, int stride1,
557                              const pixel *pix2, int stride2,
558                              int sums[2][4])
559 {
560     for( int z = 0; z < 2; z++ )
561     {
562         uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
563         for( int y = 0; y < 4; y++ )
564             for( int x = 0; x < 4; x++ )
565             {
566                 int a = pix1[x+y*stride1];
567                 int b = pix2[x+y*stride2];
568                 s1  += a;
569                 s2  += b;
570                 ss  += a*a;
571                 ss  += b*b;
572                 s12 += a*b;
573             }
574         sums[z][0] = s1;
575         sums[z][1] = s2;
576         sums[z][2] = ss;
577         sums[z][3] = s12;
578         pix1 += 4;
579         pix2 += 4;
580     }
581 }
582
583 static float ssim_end1( int s1, int s2, int ss, int s12 )
584 {
585 /* Maximum value for 10-bit is: ss*64 = (2^10-1)^2*16*4*64 = 4286582784, which will overflow in some cases.
586  * s1*s1, s2*s2, and s1*s2 also obtain this value for edge cases: ((2^10-1)*16*4)^2 = 4286582784.
587  * Maximum value for 9-bit is: ss*64 = (2^9-1)^2*16*4*64 = 1069551616, which will not overflow. */
588 #if BIT_DEPTH > 9
589 #define type float
590     static const float ssim_c1 = .01*.01*PIXEL_MAX*PIXEL_MAX*64;
591     static const float ssim_c2 = .03*.03*PIXEL_MAX*PIXEL_MAX*64*63;
592 #else
593 #define type int
594     static const int ssim_c1 = (int)(.01*.01*PIXEL_MAX*PIXEL_MAX*64 + .5);
595     static const int ssim_c2 = (int)(.03*.03*PIXEL_MAX*PIXEL_MAX*64*63 + .5);
596 #endif
597     type fs1 = s1;
598     type fs2 = s2;
599     type fss = ss;
600     type fs12 = s12;
601     type vars = fss*64 - fs1*fs1 - fs2*fs2;
602     type covar = fs12*64 - fs1*fs2;
603     return (float)(2*fs1*fs2 + ssim_c1) * (float)(2*covar + ssim_c2)
604          / ((float)(fs1*fs1 + fs2*fs2 + ssim_c1) * (float)(vars + ssim_c2));
605 #undef type
606 }
607
608 static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
609 {
610     float ssim = 0.0;
611     for( int i = 0; i < width; i++ )
612         ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
613                            sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
614                            sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
615                            sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
616     return ssim;
617 }
618
619 float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
620                            pixel *pix1, int stride1,
621                            pixel *pix2, int stride2,
622                            int width, int height, void *buf )
623 {
624     int z = 0;
625     float ssim = 0.0;
626     int (*sum0)[4] = buf;
627     int (*sum1)[4] = sum0 + (width >> 2) + 3;
628     width >>= 2;
629     height >>= 2;
630     for( int y = 1; y < height; y++ )
631     {
632         for( ; z <= y; z++ )
633         {
634             XCHG( void*, sum0, sum1 );
635             for( int x = 0; x < width; x+=2 )
636                 pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
637         }
638         for( int x = 0; x < width-1; x += 4 )
639             ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
640     }
641     return ssim;
642 }
643
644 int pixel_vsad( pixel *src, int stride )
645 {
646     int score = 0;
647     for( int i = 1; i < 16; i++, src += stride )
648         for( int j = 0; j < 16; j++ )
649             score += abs(src[j] - src[j+stride]);
650     return score;
651 }
652
653 int x264_field_vsad( x264_t *h, pixel *fenc, int stride )
654 {
655     int score_field, score_frame;
656     score_frame  = h->pixf.vsad( fenc,           stride );
657     score_frame += h->pixf.vsad( fenc+16*stride, stride );
658     score_field  = h->pixf.vsad( fenc,           stride*2 );
659     score_field += h->pixf.vsad( fenc+stride,    stride*2 );
660     return (score_field < score_frame);
661 }
662
663 /****************************************************************************
664  * successive elimination
665  ****************************************************************************/
666 static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
667                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
668 {
669     int nmv = 0;
670     for( int i = 0; i < width; i++, sums++ )
671     {
672         int ads = abs( enc_dc[0] - sums[0] )
673                 + abs( enc_dc[1] - sums[8] )
674                 + abs( enc_dc[2] - sums[delta] )
675                 + abs( enc_dc[3] - sums[delta+8] )
676                 + cost_mvx[i];
677         if( ads < thresh )
678             mvs[nmv++] = i;
679     }
680     return nmv;
681 }
682
683 static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
684                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
685 {
686     int nmv = 0;
687     for( int i = 0; i < width; i++, sums++ )
688     {
689         int ads = abs( enc_dc[0] - sums[0] )
690                 + abs( enc_dc[1] - sums[delta] )
691                 + cost_mvx[i];
692         if( ads < thresh )
693             mvs[nmv++] = i;
694     }
695     return nmv;
696 }
697
698 static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
699                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
700 {
701     int nmv = 0;
702     for( int i = 0; i<width; i++, sums++ )
703     {
704         int ads = abs( enc_dc[0] - sums[0] )
705                 + cost_mvx[i];
706         if( ads < thresh )
707             mvs[nmv++] = i;
708     }
709     return nmv;
710 }
711
712
713 /****************************************************************************
714  * x264_pixel_init:
715  ****************************************************************************/
716 void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
717 {
718     memset( pixf, 0, sizeof(*pixf) );
719
720 #define INIT2_NAME( name1, name2, cpu ) \
721     pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
722     pixf->name1[PIXEL_16x8]  = x264_pixel_##name2##_16x8##cpu;
723 #define INIT4_NAME( name1, name2, cpu ) \
724     INIT2_NAME( name1, name2, cpu ) \
725     pixf->name1[PIXEL_8x16]  = x264_pixel_##name2##_8x16##cpu;\
726     pixf->name1[PIXEL_8x8]   = x264_pixel_##name2##_8x8##cpu;
727 #define INIT5_NAME( name1, name2, cpu ) \
728     INIT4_NAME( name1, name2, cpu ) \
729     pixf->name1[PIXEL_8x4]   = x264_pixel_##name2##_8x4##cpu;
730 #define INIT6_NAME( name1, name2, cpu ) \
731     INIT5_NAME( name1, name2, cpu ) \
732     pixf->name1[PIXEL_4x8]   = x264_pixel_##name2##_4x8##cpu;
733 #define INIT7_NAME( name1, name2, cpu ) \
734     INIT6_NAME( name1, name2, cpu ) \
735     pixf->name1[PIXEL_4x4]   = x264_pixel_##name2##_4x4##cpu;
736 #define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
737 #define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
738 #define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
739 #define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
740 #define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
741
742 #define INIT_ADS( cpu ) \
743     pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
744     pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
745     pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
746
747     INIT7( sad, );
748     INIT7_NAME( sad_aligned, sad, );
749     INIT7( sad_x3, );
750     INIT7( sad_x4, );
751     INIT7( ssd, );
752     INIT7( satd, );
753     INIT7( satd_x3, );
754     INIT7( satd_x4, );
755     INIT4( hadamard_ac, );
756     INIT_ADS( );
757
758     pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
759     pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8;
760     pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
761     pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8;
762
763     pixf->ssd_nv12_core = pixel_ssd_nv12_core;
764     pixf->ssim_4x4x2_core = ssim_4x4x2_core;
765     pixf->ssim_end4 = ssim_end4;
766     pixf->var2_8x8 = pixel_var2_8x8;
767     pixf->vsad = pixel_vsad;
768
769     pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4;
770     pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4;
771     pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8;
772     pixf->intra_sa8d_x3_8x8   = x264_intra_sa8d_x3_8x8;
773     pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c;
774     pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c;
775     pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16;
776     pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16;
777
778 #if HIGH_BIT_DEPTH
779 #if HAVE_MMX
780     if( cpu&X264_CPU_MMXEXT )
781     {
782         INIT7( sad, _mmxext );
783         INIT7( sad_x3, _mmxext );
784         INIT7( sad_x4, _mmxext );
785         INIT7( satd, _mmxext );
786         INIT7( satd_x3, _mmxext );
787         INIT7( satd_x4, _mmxext );
788         INIT4( hadamard_ac, _mmxext );
789         INIT7( ssd, _mmxext );
790         INIT_ADS( _mmxext );
791
792         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_mmxext;
793         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmxext;
794         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmxext;
795         pixf->var2_8x8 = x264_pixel_var2_8x8_mmxext;
796
797         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmxext;
798         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmxext;
799         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmxext;
800         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmxext;
801         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmxext;
802         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmxext;
803         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmxext;
804     }
805     if( cpu&X264_CPU_SSE2 )
806     {
807         INIT4_NAME( sad_aligned, sad, _sse2_aligned );
808         INIT5( ssd, _sse2 );
809
810         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
811         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
812 #if ARCH_X86_64
813         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
814 #endif
815         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_sse2;
816         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
817         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
818         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
819         pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
820         pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
821     }
822     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
823     {
824         INIT5( sad, _sse2 );
825         INIT2( sad_x3, _sse2 );
826         INIT2( sad_x4, _sse2 );
827         INIT_ADS( _sse2 );
828
829         if( !(cpu&X264_CPU_STACK_MOD4) )
830         {
831             INIT4( hadamard_ac, _sse2 );
832         }
833
834         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_sse2;
835         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_sse2;
836         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_sse2;
837     }
838     if( cpu&X264_CPU_SSE2_IS_FAST )
839     {
840         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
841         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
842         pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_sse2;
843         pixf->sad_x3[PIXEL_8x4]  = x264_pixel_sad_x3_8x4_sse2;
844         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
845         pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_sse2;
846         pixf->sad_x4[PIXEL_8x4]  = x264_pixel_sad_x4_8x4_sse2;
847     }
848     if( cpu&X264_CPU_SSSE3 )
849     {
850         INIT4_NAME( sad_aligned, sad, _ssse3_aligned );
851         INIT7( sad, _ssse3 );
852         INIT7( sad_x3, _ssse3 );
853         INIT7( sad_x4, _ssse3 );
854         INIT_ADS( _ssse3 );
855
856         if( !(cpu&X264_CPU_STACK_MOD4) )
857         {
858             INIT4( hadamard_ac, _ssse3 );
859         }
860
861         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
862         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
863         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_ssse3;
864         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
865         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
866     }
867 #endif // HAVE_MMX
868 #else // !HIGH_BIT_DEPTH
869 #if HAVE_MMX
870     if( cpu&X264_CPU_MMX )
871     {
872         INIT7( ssd, _mmx );
873     }
874
875     if( cpu&X264_CPU_MMXEXT )
876     {
877         INIT7( sad, _mmxext );
878         INIT7_NAME( sad_aligned, sad, _mmxext );
879         INIT7( sad_x3, _mmxext );
880         INIT7( sad_x4, _mmxext );
881         INIT7( satd, _mmxext );
882         INIT7( satd_x3, _mmxext );
883         INIT7( satd_x4, _mmxext );
884         INIT4( hadamard_ac, _mmxext );
885         INIT_ADS( _mmxext );
886         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmxext;
887         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmxext;
888         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_mmxext;
889 #if ARCH_X86
890         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmxext;
891         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_mmxext;
892         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmxext;
893         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_mmxext;
894         pixf->var2_8x8 = x264_pixel_var2_8x8_mmxext;
895         pixf->vsad = x264_pixel_vsad_mmxext;
896
897         if( cpu&X264_CPU_CACHELINE_32 )
898         {
899             INIT5( sad, _cache32_mmxext );
900             INIT4( sad_x3, _cache32_mmxext );
901             INIT4( sad_x4, _cache32_mmxext );
902         }
903         else if( cpu&X264_CPU_CACHELINE_64 )
904         {
905             INIT5( sad, _cache64_mmxext );
906             INIT4( sad_x3, _cache64_mmxext );
907             INIT4( sad_x4, _cache64_mmxext );
908         }
909 #else
910         if( cpu&X264_CPU_CACHELINE_64 )
911         {
912             pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmxext;
913             pixf->sad[PIXEL_8x8]  = x264_pixel_sad_8x8_cache64_mmxext;
914             pixf->sad[PIXEL_8x4]  = x264_pixel_sad_8x4_cache64_mmxext;
915             pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmxext;
916             pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_cache64_mmxext;
917             pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmxext;
918             pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_cache64_mmxext;
919         }
920 #endif
921         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmxext;
922         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmxext;
923         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmxext;
924         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmxext;
925         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmxext;
926         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmxext;
927         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmxext;
928     }
929
930     if( cpu&X264_CPU_SSE2 )
931     {
932         INIT5( ssd, _sse2slow );
933         INIT2_NAME( sad_aligned, sad, _sse2_aligned );
934         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
935         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_sse2;
936         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
937         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
938         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
939         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
940 #if ARCH_X86_64
941         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
942 #endif
943         pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
944         pixf->vsad = x264_pixel_vsad_sse2;
945     }
946
947     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
948     {
949         INIT2( sad, _sse2 );
950         INIT2( sad_x3, _sse2 );
951         INIT2( sad_x4, _sse2 );
952         INIT6( satd, _sse2 );
953         INIT6( satd_x3, _sse2 );
954         INIT6( satd_x4, _sse2 );
955         if( !(cpu&X264_CPU_STACK_MOD4) )
956         {
957             INIT4( hadamard_ac, _sse2 );
958         }
959         INIT_ADS( _sse2 );
960         pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
961         pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
962         if( cpu&X264_CPU_CACHELINE_64 )
963         {
964             INIT2( ssd, _sse2); /* faster for width 16 on p4 */
965 #if ARCH_X86
966             INIT2( sad, _cache64_sse2 );
967             INIT2( sad_x3, _cache64_sse2 );
968             INIT2( sad_x4, _cache64_sse2 );
969 #endif
970            if( cpu&X264_CPU_SSE2_IS_FAST )
971            {
972                pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
973                pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
974            }
975         }
976
977         if( cpu&X264_CPU_SSE_MISALIGN )
978         {
979             INIT2( sad_x3, _sse2_misalign );
980             INIT2( sad_x4, _sse2_misalign );
981         }
982     }
983
984     if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
985     {
986         pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
987         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
988         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
989         pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
990         pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
991         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
992         pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
993         pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
994     }
995
996     if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
997     {
998         INIT2( sad, _sse3 );
999         INIT2( sad_x3, _sse3 );
1000         INIT2( sad_x4, _sse3 );
1001     }
1002
1003     if( cpu&X264_CPU_SSSE3 )
1004     {
1005         if( !(cpu&X264_CPU_STACK_MOD4) )
1006         {
1007             INIT4( hadamard_ac, _ssse3 );
1008         }
1009         INIT_ADS( _ssse3 );
1010         if( !(cpu&X264_CPU_SLOW_ATOM) )
1011         {
1012             INIT7( ssd, _ssse3 );
1013             pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
1014             pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
1015             INIT7( satd, _ssse3 );
1016             INIT7( satd_x3, _ssse3 );
1017             INIT7( satd_x4, _ssse3 );
1018         }
1019         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
1020         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
1021         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_ssse3;
1022         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
1023         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_ssse3;
1024 #if ARCH_X86_64
1025         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_ssse3;
1026 #endif
1027         pixf->var2_8x8 = x264_pixel_var2_8x8_ssse3;
1028         if( cpu&X264_CPU_CACHELINE_64 )
1029         {
1030             INIT2( sad, _cache64_ssse3 );
1031             INIT2( sad_x3, _cache64_ssse3 );
1032             INIT2( sad_x4, _cache64_ssse3 );
1033         }
1034         if( cpu&X264_CPU_SLOW_ATOM || !(cpu&X264_CPU_SHUFFLE_IS_FAST) )
1035         {
1036             INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
1037         }
1038     }
1039
1040     if( cpu&X264_CPU_SSE4 )
1041     {
1042         INIT7( satd, _sse4 );
1043         INIT7( satd_x3, _sse4 );
1044         INIT7( satd_x4, _sse4 );
1045         if( !(cpu&X264_CPU_STACK_MOD4) )
1046         {
1047             INIT4( hadamard_ac, _sse4 );
1048         }
1049         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
1050         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
1051         pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_sse4;
1052         /* Slower on Conroe, so only enable under SSE4 */
1053         pixf->intra_sad_x3_8x8  = x264_intra_sad_x3_8x8_ssse3;
1054     }
1055
1056     if( cpu&X264_CPU_AVX )
1057     {
1058         INIT7( satd, _avx );
1059         INIT7( satd_x3, _avx );
1060         INIT7( satd_x4, _avx );
1061         pixf->ads[PIXEL_16x16] = x264_pixel_ads4_avx;
1062         pixf->ads[PIXEL_16x8]  = x264_pixel_ads2_avx;
1063         if( !(cpu&X264_CPU_STACK_MOD4) )
1064         {
1065             INIT4( hadamard_ac, _avx );
1066         }
1067         INIT5( ssd, _avx );
1068 #if ARCH_X86_64
1069         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
1070         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_avx;
1071         pixf->intra_sa8d_x3_8x8= x264_intra_sa8d_x3_8x8_avx;
1072 #endif
1073         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_avx;
1074         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
1075         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_avx;
1076         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_avx;
1077         pixf->ssim_end4        = x264_pixel_ssim_end4_avx;
1078         pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_avx;
1079         pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_avx;
1080     }
1081 #endif //HAVE_MMX
1082
1083 #if HAVE_ARMV6
1084     if( cpu&X264_CPU_ARMV6 )
1085     {
1086         pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1087         pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1088         pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1089         pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1090     }
1091     if( cpu&X264_CPU_NEON )
1092     {
1093         INIT5( sad, _neon );
1094         INIT5( sad_aligned, _neon );
1095         INIT7( sad_x3, _neon );
1096         INIT7( sad_x4, _neon );
1097         INIT7( ssd, _neon );
1098         INIT7( satd, _neon );
1099         INIT7( satd_x3, _neon );
1100         INIT7( satd_x4, _neon );
1101         INIT4( hadamard_ac, _neon );
1102         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_neon;
1103         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
1104         pixf->var[PIXEL_8x8]    = x264_pixel_var_8x8_neon;
1105         pixf->var[PIXEL_16x16]  = x264_pixel_var_16x16_neon;
1106         pixf->var2_8x8          = x264_pixel_var2_8x8_neon;
1107
1108         pixf->ssim_4x4x2_core   = x264_pixel_ssim_4x4x2_core_neon;
1109         pixf->ssim_end4         = x264_pixel_ssim_end4_neon;
1110
1111         if( cpu&X264_CPU_FAST_NEON_MRC )
1112         {
1113             pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
1114             pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
1115             pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
1116             pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
1117         }
1118         else    // really just scheduled for dual issue / A8
1119         {
1120             INIT5( sad_aligned, _neon_dual );
1121         }
1122     }
1123 #endif
1124 #endif // HIGH_BIT_DEPTH
1125 #if HAVE_ALTIVEC
1126     if( cpu&X264_CPU_ALTIVEC )
1127     {
1128         x264_pixel_altivec_init( pixf );
1129     }
1130 #endif
1131 #if !HIGH_BIT_DEPTH
1132 #if ARCH_UltraSPARC
1133     INIT4( sad, _vis );
1134     INIT4( sad_x3, _vis );
1135     INIT4( sad_x4, _vis );
1136 #endif
1137 #endif // !HIGH_BIT_DEPTH
1138
1139     pixf->ads[PIXEL_8x16] =
1140     pixf->ads[PIXEL_8x4] =
1141     pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
1142     pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];
1143 }
1144