Preprocessing cosmetics
[x262.git] / common / pixel.c
1 /*****************************************************************************
2  * pixel.c: h264 encoder
3  *****************************************************************************
4  * Copyright (C) 2003-2008 x264 project
5  *
6  * Authors: Loren Merritt <lorenm@u.washington.edu>
7  *          Laurent Aimar <fenrir@via.ecp.fr>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
22  *****************************************************************************/
23
24 #include "common.h"
25
26 #if HAVE_MMX
27 #   include "x86/pixel.h"
28 #endif
29 #if ARCH_PPC
30 #   include "ppc/pixel.h"
31 #endif
32 #if ARCH_ARM
33 #   include "arm/pixel.h"
34 #endif
35 #if ARCH_UltraSparc
36 #   include "sparc/pixel.h"
37 #endif
38
39
40 /****************************************************************************
41  * pixel_sad_WxH
42  ****************************************************************************/
43 #define PIXEL_SAD_C( name, lx, ly ) \
44 static int name( pixel *pix1, int i_stride_pix1,  \
45                  pixel *pix2, int i_stride_pix2 ) \
46 {                                                   \
47     int i_sum = 0;                                  \
48     for( int y = 0; y < ly; y++ )                   \
49     {                                               \
50         for( int x = 0; x < lx; x++ )               \
51         {                                           \
52             i_sum += abs( pix1[x] - pix2[x] );      \
53         }                                           \
54         pix1 += i_stride_pix1;                      \
55         pix2 += i_stride_pix2;                      \
56     }                                               \
57     return i_sum;                                   \
58 }
59
60
61 PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
62 PIXEL_SAD_C( x264_pixel_sad_16x8,  16,  8 )
63 PIXEL_SAD_C( x264_pixel_sad_8x16,   8, 16 )
64 PIXEL_SAD_C( x264_pixel_sad_8x8,    8,  8 )
65 PIXEL_SAD_C( x264_pixel_sad_8x4,    8,  4 )
66 PIXEL_SAD_C( x264_pixel_sad_4x8,    4,  8 )
67 PIXEL_SAD_C( x264_pixel_sad_4x4,    4,  4 )
68
69
70 /****************************************************************************
71  * pixel_ssd_WxH
72  ****************************************************************************/
73 #define PIXEL_SSD_C( name, lx, ly ) \
74 static int name( pixel *pix1, int i_stride_pix1,  \
75                  pixel *pix2, int i_stride_pix2 ) \
76 {                                                   \
77     int i_sum = 0;                                  \
78     for( int y = 0; y < ly; y++ )                   \
79     {                                               \
80         for( int x = 0; x < lx; x++ )               \
81         {                                           \
82             int d = pix1[x] - pix2[x];              \
83             i_sum += d*d;                           \
84         }                                           \
85         pix1 += i_stride_pix1;                      \
86         pix2 += i_stride_pix2;                      \
87     }                                               \
88     return i_sum;                                   \
89 }
90
91 PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
92 PIXEL_SSD_C( x264_pixel_ssd_16x8,  16,  8 )
93 PIXEL_SSD_C( x264_pixel_ssd_8x16,   8, 16 )
94 PIXEL_SSD_C( x264_pixel_ssd_8x8,    8,  8 )
95 PIXEL_SSD_C( x264_pixel_ssd_8x4,    8,  4 )
96 PIXEL_SSD_C( x264_pixel_ssd_4x8,    4,  8 )
97 PIXEL_SSD_C( x264_pixel_ssd_4x4,    4,  4 )
98
99 int64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height )
100 {
101     int64_t i_ssd = 0;
102     int y;
103     int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
104
105 #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
106                                           pix2 + y*i_pix2 + x, i_pix2 );
107     for( y = 0; y < i_height-15; y += 16 )
108     {
109         int x = 0;
110         if( align )
111             for( ; x < i_width-15; x += 16 )
112                 SSD(PIXEL_16x16);
113         for( ; x < i_width-7; x += 8 )
114             SSD(PIXEL_8x16);
115     }
116     if( y < i_height-7 )
117         for( int x = 0; x < i_width-7; x += 8 )
118             SSD(PIXEL_8x8);
119 #undef SSD
120
121 #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
122     if( i_width & 7 )
123     {
124         for( y = 0; y < (i_height & ~7); y++ )
125             for( int x = i_width & ~7; x < i_width; x++ )
126                 SSD1;
127     }
128     if( i_height & 7 )
129     {
130         for( y = i_height & ~7; y < i_height; y++ )
131             for( int x = 0; x < i_width; x++ )
132                 SSD1;
133     }
134 #undef SSD1
135
136     return i_ssd;
137 }
138
139
140 /****************************************************************************
141  * pixel_var_wxh
142  ****************************************************************************/
143 #define PIXEL_VAR_C( name, w ) \
144 static uint64_t name( pixel *pix, int i_stride ) \
145 {                                             \
146     uint32_t sum = 0, sqr = 0;                \
147     for( int y = 0; y < w; y++ )              \
148     {                                         \
149         for( int x = 0; x < w; x++ )          \
150         {                                     \
151             sum += pix[x];                    \
152             sqr += pix[x] * pix[x];           \
153         }                                     \
154         pix += i_stride;                      \
155     }                                         \
156     return sum + ((uint64_t)sqr << 32);       \
157 }
158
159 PIXEL_VAR_C( x264_pixel_var_16x16, 16 )
160 PIXEL_VAR_C( x264_pixel_var_8x8,    8 )
161
162 /****************************************************************************
163  * pixel_var2_wxh
164  ****************************************************************************/
165 static int pixel_var2_8x8( pixel *pix1, int i_stride1, pixel *pix2, int i_stride2, int *ssd )
166 {
167     uint32_t var = 0, sum = 0, sqr = 0;
168     for( int y = 0; y < 8; y++ )
169     {
170         for( int x = 0; x < 8; x++ )
171         {
172             int diff = pix1[x] - pix2[x];
173             sum += diff;
174             sqr += diff * diff;
175         }
176         pix1 += i_stride1;
177         pix2 += i_stride2;
178     }
179     sum = abs(sum);
180     var = sqr - (sum * sum >> 6);
181     *ssd = sqr;
182     return var;
183 }
184
185
186 #define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
187     int t0 = s0 + s1;\
188     int t1 = s0 - s1;\
189     int t2 = s2 + s3;\
190     int t3 = s2 - s3;\
191     d0 = t0 + t2;\
192     d2 = t0 - t2;\
193     d1 = t1 + t3;\
194     d3 = t1 - t3;\
195 }
196
197 // in: a pseudo-simd number of the form x+(y<<16)
198 // return: abs(x)+(abs(y)<<16)
199 static ALWAYS_INLINE uint32_t abs2( uint32_t a )
200 {
201     uint32_t s = ((a>>15)&0x10001)*0xffff;
202     return (a+s)^s;
203 }
204
205 /****************************************************************************
206  * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
207  ****************************************************************************/
208
209 static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
210 {
211     uint32_t tmp[4][2];
212     uint32_t a0, a1, a2, a3, b0, b1;
213     int sum = 0;
214     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
215     {
216         a0 = pix1[0] - pix2[0];
217         a1 = pix1[1] - pix2[1];
218         b0 = (a0+a1) + ((a0-a1)<<16);
219         a2 = pix1[2] - pix2[2];
220         a3 = pix1[3] - pix2[3];
221         b1 = (a2+a3) + ((a2-a3)<<16);
222         tmp[i][0] = b0 + b1;
223         tmp[i][1] = b0 - b1;
224     }
225     for( int i = 0; i < 2; i++ )
226     {
227         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
228         a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
229         sum += ((uint16_t)a0) + (a0>>16);
230     }
231     return sum >> 1;
232 }
233
234 static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
235 {
236     uint32_t tmp[4][4];
237     uint32_t a0, a1, a2, a3;
238     int sum = 0;
239     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
240     {
241         a0 = (pix1[0] - pix2[0]) + ((pix1[4] - pix2[4]) << 16);
242         a1 = (pix1[1] - pix2[1]) + ((pix1[5] - pix2[5]) << 16);
243         a2 = (pix1[2] - pix2[2]) + ((pix1[6] - pix2[6]) << 16);
244         a3 = (pix1[3] - pix2[3]) + ((pix1[7] - pix2[7]) << 16);
245         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
246     }
247     for( int i = 0; i < 4; i++ )
248     {
249         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
250         sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
251     }
252     return (((uint16_t)sum) + ((uint32_t)sum>>16)) >> 1;
253 }
254
255 #define PIXEL_SATD_C( w, h, sub )\
256 static int x264_pixel_satd_##w##x##h( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )\
257 {\
258     int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
259             + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
260     if( w==16 )\
261         sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
262             + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
263     if( h==16 )\
264         sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
265             + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
266     if( w==16 && h==16 )\
267         sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
268             + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
269     return sum;\
270 }
271 PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
272 PIXEL_SATD_C( 16, 8,  x264_pixel_satd_8x4 )
273 PIXEL_SATD_C( 8,  16, x264_pixel_satd_8x4 )
274 PIXEL_SATD_C( 8,  8,  x264_pixel_satd_8x4 )
275 PIXEL_SATD_C( 4,  8,  x264_pixel_satd_4x4 )
276
277
278 static NOINLINE int sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
279 {
280     uint32_t tmp[8][4];
281     uint32_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
282     int sum = 0;
283     for( int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2 )
284     {
285         a0 = pix1[0] - pix2[0];
286         a1 = pix1[1] - pix2[1];
287         b0 = (a0+a1) + ((a0-a1)<<16);
288         a2 = pix1[2] - pix2[2];
289         a3 = pix1[3] - pix2[3];
290         b1 = (a2+a3) + ((a2-a3)<<16);
291         a4 = pix1[4] - pix2[4];
292         a5 = pix1[5] - pix2[5];
293         b2 = (a4+a5) + ((a4-a5)<<16);
294         a6 = pix1[6] - pix2[6];
295         a7 = pix1[7] - pix2[7];
296         b3 = (a6+a7) + ((a6-a7)<<16);
297         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
298     }
299     for( int i = 0; i < 4; i++ )
300     {
301         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
302         HADAMARD4( a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
303         b0  = abs2(a0+a4) + abs2(a0-a4);
304         b0 += abs2(a1+a5) + abs2(a1-a5);
305         b0 += abs2(a2+a6) + abs2(a2-a6);
306         b0 += abs2(a3+a7) + abs2(a3-a7);
307         sum += (uint16_t)b0 + (b0>>16);
308     }
309     return sum;
310 }
311
312 static int x264_pixel_sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
313 {
314     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
315     return (sum+2)>>2;
316 }
317
318 static int x264_pixel_sa8d_16x16( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
319 {
320     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
321             + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
322             + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
323             + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
324     return (sum+2)>>2;
325 }
326
327
328 static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, int stride )
329 {
330     uint32_t tmp[32];
331     uint32_t a0, a1, a2, a3, dc;
332     int sum4 = 0, sum8 = 0;
333     for( int i = 0; i < 8; i++, pix+=stride )
334     {
335         uint32_t *t = tmp + (i&3) + (i&4)*4;
336         a0 = (pix[0]+pix[1]) + ((pix[0]-pix[1])<<16);
337         a1 = (pix[2]+pix[3]) + ((pix[2]-pix[3])<<16);
338         t[0] = a0 + a1;
339         t[4] = a0 - a1;
340         a2 = (pix[4]+pix[5]) + ((pix[4]-pix[5])<<16);
341         a3 = (pix[6]+pix[7]) + ((pix[6]-pix[7])<<16);
342         t[8] = a2 + a3;
343         t[12] = a2 - a3;
344     }
345     for( int i = 0; i < 8; i++ )
346     {
347         HADAMARD4( a0, a1, a2, a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
348         tmp[i*4+0] = a0;
349         tmp[i*4+1] = a1;
350         tmp[i*4+2] = a2;
351         tmp[i*4+3] = a3;
352         sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
353     }
354     for( int i = 0; i < 8; i++ )
355     {
356         HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
357         sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
358     }
359     dc = (uint16_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
360     sum4 = (uint16_t)sum4 + ((uint32_t)sum4>>16) - dc;
361     sum8 = (uint16_t)sum8 + ((uint32_t)sum8>>16) - dc;
362     return ((uint64_t)sum8<<32) + sum4;
363 }
364
365 #define HADAMARD_AC(w,h) \
366 static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, int stride )\
367 {\
368     uint64_t sum = pixel_hadamard_ac( pix, stride );\
369     if( w==16 )\
370         sum += pixel_hadamard_ac( pix+8, stride );\
371     if( h==16 )\
372         sum += pixel_hadamard_ac( pix+8*stride, stride );\
373     if( w==16 && h==16 )\
374         sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
375     return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
376 }
377 HADAMARD_AC( 16, 16 )
378 HADAMARD_AC( 16, 8 )
379 HADAMARD_AC( 8, 16 )
380 HADAMARD_AC( 8, 8 )
381
382
383 /****************************************************************************
384  * pixel_sad_x4
385  ****************************************************************************/
386 #define SAD_X( size ) \
387 static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
388 {\
389     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
390     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
391     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
392 }\
393 static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
394 {\
395     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
396     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
397     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
398     scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
399 }
400
401 SAD_X( 16x16 )
402 SAD_X( 16x8 )
403 SAD_X( 8x16 )
404 SAD_X( 8x8 )
405 SAD_X( 8x4 )
406 SAD_X( 4x8 )
407 SAD_X( 4x4 )
408
409 #if ARCH_UltraSparc
410 SAD_X( 16x16_vis )
411 SAD_X( 16x8_vis )
412 SAD_X( 8x16_vis )
413 SAD_X( 8x8_vis )
414 #endif
415
416 /****************************************************************************
417  * pixel_satd_x4
418  * no faster than single satd, but needed for satd to be a drop-in replacement for sad
419  ****************************************************************************/
420
421 #define SATD_X( size, cpu ) \
422 static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
423 {\
424     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
425     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
426     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
427 }\
428 static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
429 {\
430     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
431     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
432     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
433     scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
434 }
435 #define SATD_X_DECL6( cpu )\
436 SATD_X( 16x16, cpu )\
437 SATD_X( 16x8, cpu )\
438 SATD_X( 8x16, cpu )\
439 SATD_X( 8x8, cpu )\
440 SATD_X( 8x4, cpu )\
441 SATD_X( 4x8, cpu )
442 #define SATD_X_DECL7( cpu )\
443 SATD_X_DECL6( cpu )\
444 SATD_X( 4x4, cpu )
445
446 SATD_X_DECL7()
447 #if HAVE_MMX
448 SATD_X_DECL7( _mmxext )
449 SATD_X_DECL6( _sse2 )
450 SATD_X_DECL7( _ssse3 )
451 SATD_X_DECL7( _sse4 )
452 #endif
453
454 #if HAVE_ARMV6
455 SATD_X_DECL7( _neon )
456 #endif
457
458 #define INTRA_MBCMP_8x8( mbcmp )\
459 void x264_intra_##mbcmp##_x3_8x8( pixel *fenc, pixel edge[33], int res[3] )\
460 {\
461     pixel pix[8*FDEC_STRIDE];\
462     x264_predict_8x8_v_c( pix, edge );\
463     res[0] = x264_pixel_##mbcmp##_8x8( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
464     x264_predict_8x8_h_c( pix, edge );\
465     res[1] = x264_pixel_##mbcmp##_8x8( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
466     x264_predict_8x8_dc_c( pix, edge );\
467     res[2] = x264_pixel_##mbcmp##_8x8( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
468 }
469
470 INTRA_MBCMP_8x8(sad)
471 INTRA_MBCMP_8x8(sa8d)
472
473 #define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma )\
474 void x264_intra_##mbcmp##_x3_##size##x##size##chroma( pixel *fenc, pixel *fdec, int res[3] )\
475 {\
476     x264_predict_##size##x##size##chroma##_##pred1##_c( fdec );\
477     res[0] = x264_pixel_##mbcmp##_##size##x##size( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
478     x264_predict_##size##x##size##chroma##_##pred2##_c( fdec );\
479     res[1] = x264_pixel_##mbcmp##_##size##x##size( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
480     x264_predict_##size##x##size##chroma##_##pred3##_c( fdec );\
481     res[2] = x264_pixel_##mbcmp##_##size##x##size( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
482 }
483
484 INTRA_MBCMP(sad, 4, v, h, dc, )
485 INTRA_MBCMP(satd, 4, v, h, dc, )
486 INTRA_MBCMP(sad, 8, dc, h, v, c )
487 INTRA_MBCMP(satd, 8, dc, h, v, c )
488 INTRA_MBCMP(sad, 16, v, h, dc, )
489 INTRA_MBCMP(satd, 16, v, h, dc, )
490
491 /****************************************************************************
492  * structural similarity metric
493  ****************************************************************************/
494 static void ssim_4x4x2_core( const pixel *pix1, int stride1,
495                              const pixel *pix2, int stride2,
496                              int sums[2][4])
497 {
498     for( int z = 0; z < 2; z++ )
499     {
500         uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
501         for( int y = 0; y < 4; y++ )
502             for( int x = 0; x < 4; x++ )
503             {
504                 int a = pix1[x+y*stride1];
505                 int b = pix2[x+y*stride2];
506                 s1  += a;
507                 s2  += b;
508                 ss  += a*a;
509                 ss  += b*b;
510                 s12 += a*b;
511             }
512         sums[z][0] = s1;
513         sums[z][1] = s2;
514         sums[z][2] = ss;
515         sums[z][3] = s12;
516         pix1 += 4;
517         pix2 += 4;
518     }
519 }
520
521 static float ssim_end1( int s1, int s2, int ss, int s12 )
522 {
523     static const int ssim_c1 = (int)(.01*.01*255*255*64 + .5);
524     static const int ssim_c2 = (int)(.03*.03*255*255*64*63 + .5);
525     int vars = ss*64 - s1*s1 - s2*s2;
526     int covar = s12*64 - s1*s2;
527     return (float)(2*s1*s2 + ssim_c1) * (float)(2*covar + ssim_c2)
528          / ((float)(s1*s1 + s2*s2 + ssim_c1) * (float)(vars + ssim_c2));
529 }
530
531 static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
532 {
533     float ssim = 0.0;
534     for( int i = 0; i < width; i++ )
535         ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
536                            sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
537                            sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
538                            sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
539     return ssim;
540 }
541
542 float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
543                            pixel *pix1, int stride1,
544                            pixel *pix2, int stride2,
545                            int width, int height, void *buf )
546 {
547     int z = 0;
548     float ssim = 0.0;
549     int (*sum0)[4] = buf;
550     int (*sum1)[4] = sum0 + width/4+3;
551     width >>= 2;
552     height >>= 2;
553     for( int y = 1; y < height; y++ )
554     {
555         for( ; z <= y; z++ )
556         {
557             XCHG( void*, sum0, sum1 );
558             for( int x = 0; x < width; x+=2 )
559                 pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
560         }
561         for( int x = 0; x < width-1; x += 4 )
562             ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
563     }
564     return ssim;
565 }
566
567
568 /****************************************************************************
569  * successive elimination
570  ****************************************************************************/
571 static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
572                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
573 {
574     int nmv = 0;
575     for( int i = 0; i < width; i++, sums++ )
576     {
577         int ads = abs( enc_dc[0] - sums[0] )
578                 + abs( enc_dc[1] - sums[8] )
579                 + abs( enc_dc[2] - sums[delta] )
580                 + abs( enc_dc[3] - sums[delta+8] )
581                 + cost_mvx[i];
582         if( ads < thresh )
583             mvs[nmv++] = i;
584     }
585     return nmv;
586 }
587
588 static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
589                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
590 {
591     int nmv = 0;
592     for( int i = 0; i < width; i++, sums++ )
593     {
594         int ads = abs( enc_dc[0] - sums[0] )
595                 + abs( enc_dc[1] - sums[delta] )
596                 + cost_mvx[i];
597         if( ads < thresh )
598             mvs[nmv++] = i;
599     }
600     return nmv;
601 }
602
603 static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
604                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
605 {
606     int nmv = 0;
607     for( int i = 0; i<width; i++, sums++ )
608     {
609         int ads = abs( enc_dc[0] - sums[0] )
610                 + cost_mvx[i];
611         if( ads < thresh )
612             mvs[nmv++] = i;
613     }
614     return nmv;
615 }
616
617
618 /****************************************************************************
619  * x264_pixel_init:
620  ****************************************************************************/
621 void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
622 {
623     memset( pixf, 0, sizeof(*pixf) );
624
625 #define INIT2_NAME( name1, name2, cpu ) \
626     pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
627     pixf->name1[PIXEL_16x8]  = x264_pixel_##name2##_16x8##cpu;
628 #define INIT4_NAME( name1, name2, cpu ) \
629     INIT2_NAME( name1, name2, cpu ) \
630     pixf->name1[PIXEL_8x16]  = x264_pixel_##name2##_8x16##cpu;\
631     pixf->name1[PIXEL_8x8]   = x264_pixel_##name2##_8x8##cpu;
632 #define INIT5_NAME( name1, name2, cpu ) \
633     INIT4_NAME( name1, name2, cpu ) \
634     pixf->name1[PIXEL_8x4]   = x264_pixel_##name2##_8x4##cpu;
635 #define INIT6_NAME( name1, name2, cpu ) \
636     INIT5_NAME( name1, name2, cpu ) \
637     pixf->name1[PIXEL_4x8]   = x264_pixel_##name2##_4x8##cpu;
638 #define INIT7_NAME( name1, name2, cpu ) \
639     INIT6_NAME( name1, name2, cpu ) \
640     pixf->name1[PIXEL_4x4]   = x264_pixel_##name2##_4x4##cpu;
641 #define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
642 #define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
643 #define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
644 #define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
645 #define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
646
647 #define INIT_ADS( cpu ) \
648     pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
649     pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
650     pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
651
652     INIT7( sad, );
653     INIT7_NAME( sad_aligned, sad, );
654     INIT7( sad_x3, );
655     INIT7( sad_x4, );
656     INIT7( ssd, );
657     INIT7( satd, );
658     INIT7( satd_x3, );
659     INIT7( satd_x4, );
660     INIT4( hadamard_ac, );
661     INIT_ADS( );
662
663     pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
664     pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8;
665     pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
666     pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8;
667
668     pixf->ssim_4x4x2_core = ssim_4x4x2_core;
669     pixf->ssim_end4 = ssim_end4;
670     pixf->var2_8x8 = pixel_var2_8x8;
671
672     pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4;
673     pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4;
674     pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8;
675     pixf->intra_sa8d_x3_8x8   = x264_intra_sa8d_x3_8x8;
676     pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c;
677     pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c;
678     pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16;
679     pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16;
680
681 #if HAVE_MMX
682     if( cpu&X264_CPU_MMX )
683     {
684         INIT7( ssd, _mmx );
685     }
686
687     if( cpu&X264_CPU_MMXEXT )
688     {
689         INIT7( sad, _mmxext );
690         INIT7_NAME( sad_aligned, sad, _mmxext );
691         INIT7( sad_x3, _mmxext );
692         INIT7( sad_x4, _mmxext );
693         INIT7( satd, _mmxext );
694         INIT7( satd_x3, _mmxext );
695         INIT7( satd_x4, _mmxext );
696         INIT4( hadamard_ac, _mmxext );
697         INIT_ADS( _mmxext );
698         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmxext;
699         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmxext;
700 #if ARCH_X86
701         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmxext;
702         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_mmxext;
703         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmxext;
704         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_mmxext;
705         pixf->var2_8x8 = x264_pixel_var2_8x8_mmxext;
706
707         if( cpu&X264_CPU_CACHELINE_32 )
708         {
709             INIT5( sad, _cache32_mmxext );
710             INIT4( sad_x3, _cache32_mmxext );
711             INIT4( sad_x4, _cache32_mmxext );
712         }
713         else if( cpu&X264_CPU_CACHELINE_64 )
714         {
715             INIT5( sad, _cache64_mmxext );
716             INIT4( sad_x3, _cache64_mmxext );
717             INIT4( sad_x4, _cache64_mmxext );
718         }
719 #else
720         if( cpu&X264_CPU_CACHELINE_64 )
721         {
722             pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmxext;
723             pixf->sad[PIXEL_8x8]  = x264_pixel_sad_8x8_cache64_mmxext;
724             pixf->sad[PIXEL_8x4]  = x264_pixel_sad_8x4_cache64_mmxext;
725             pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmxext;
726             pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_cache64_mmxext;
727             pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmxext;
728             pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_cache64_mmxext;
729         }
730 #endif
731         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmxext;
732         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmxext;
733         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmxext;
734         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmxext;
735         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmxext;
736         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmxext;
737         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmxext;
738     }
739
740     if( cpu&X264_CPU_SSE2 )
741     {
742         INIT5( ssd, _sse2slow );
743         INIT2_NAME( sad_aligned, sad, _sse2_aligned );
744         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
745         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
746         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
747         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
748         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
749 #if ARCH_X86_64
750         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
751 #endif
752         pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
753     }
754
755     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
756     {
757         INIT2( sad, _sse2 );
758         INIT2( sad_x3, _sse2 );
759         INIT2( sad_x4, _sse2 );
760         INIT6( satd, _sse2 );
761         INIT6( satd_x3, _sse2 );
762         INIT6( satd_x4, _sse2 );
763         if( !(cpu&X264_CPU_STACK_MOD4) )
764         {
765             INIT4( hadamard_ac, _sse2 );
766         }
767         INIT_ADS( _sse2 );
768         pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
769         pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
770         if( cpu&X264_CPU_CACHELINE_64 )
771         {
772             INIT2( ssd, _sse2); /* faster for width 16 on p4 */
773 #if ARCH_X86
774             INIT2( sad, _cache64_sse2 );
775             INIT2( sad_x3, _cache64_sse2 );
776             INIT2( sad_x4, _cache64_sse2 );
777 #endif
778            if( cpu&X264_CPU_SSE2_IS_FAST )
779            {
780                pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
781                pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
782            }
783         }
784
785         if( cpu&X264_CPU_SSE_MISALIGN )
786         {
787             INIT2( sad_x3, _sse2_misalign );
788             INIT2( sad_x4, _sse2_misalign );
789         }
790     }
791
792     if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
793     {
794         pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
795         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
796         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
797         pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
798         pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
799         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
800         pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
801         pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
802     }
803
804     if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
805     {
806         INIT2( sad, _sse3 );
807         INIT2( sad_x3, _sse3 );
808         INIT2( sad_x4, _sse3 );
809     }
810
811     if( cpu&X264_CPU_SSSE3 )
812     {
813         if( !(cpu&X264_CPU_STACK_MOD4) )
814         {
815             INIT4( hadamard_ac, _ssse3 );
816         }
817         INIT_ADS( _ssse3 );
818         if( !(cpu&X264_CPU_SLOW_ATOM) )
819         {
820             INIT7( ssd, _ssse3 );
821             pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
822             pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
823             INIT7( satd, _ssse3 );
824             INIT7( satd_x3, _ssse3 );
825             INIT7( satd_x4, _ssse3 );
826         }
827         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
828         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
829         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_ssse3;
830         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
831         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_ssse3;
832 #if ARCH_X86_64
833         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_ssse3;
834 #endif
835         pixf->var2_8x8 = x264_pixel_var2_8x8_ssse3;
836         if( cpu&X264_CPU_CACHELINE_64 )
837         {
838             INIT2( sad, _cache64_ssse3 );
839             INIT2( sad_x3, _cache64_ssse3 );
840             INIT2( sad_x4, _cache64_ssse3 );
841         }
842         if( cpu&X264_CPU_SLOW_ATOM || !(cpu&X264_CPU_SHUFFLE_IS_FAST) )
843         {
844             INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
845         }
846     }
847
848     if( cpu&X264_CPU_SSE4 )
849     {
850         INIT7( satd, _sse4 );
851         INIT7( satd_x3, _sse4 );
852         INIT7( satd_x4, _sse4 );
853         if( !(cpu&X264_CPU_STACK_MOD4) )
854         {
855             INIT4( hadamard_ac, _sse4 );
856         }
857         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
858         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
859     }
860 #endif //HAVE_MMX
861
862 #if HAVE_ARMV6
863     if( cpu&X264_CPU_ARMV6 )
864     {
865         pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
866         pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
867         pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
868         pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
869     }
870     if( cpu&X264_CPU_NEON )
871     {
872         INIT5( sad, _neon );
873         INIT5( sad_aligned, _neon );
874         INIT7( sad_x3, _neon );
875         INIT7( sad_x4, _neon );
876         INIT7( ssd, _neon );
877         INIT7( satd, _neon );
878         INIT7( satd_x3, _neon );
879         INIT7( satd_x4, _neon );
880         INIT4( hadamard_ac, _neon );
881         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_neon;
882         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
883         pixf->var[PIXEL_8x8]    = x264_pixel_var_8x8_neon;
884         pixf->var[PIXEL_16x16]  = x264_pixel_var_16x16_neon;
885         pixf->var2_8x8          = x264_pixel_var2_8x8_neon;
886
887         pixf->ssim_4x4x2_core   = x264_pixel_ssim_4x4x2_core_neon;
888         pixf->ssim_end4         = x264_pixel_ssim_end4_neon;
889
890         if( cpu&X264_CPU_FAST_NEON_MRC )
891         {
892             pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
893             pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
894             pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
895             pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
896         }
897         else    // really just scheduled for dual issue / A8
898         {
899             INIT5( sad_aligned, _neon_dual );
900         }
901     }
902 #endif
903 #if HAVE_ALTIVEC
904     if( cpu&X264_CPU_ALTIVEC )
905     {
906         x264_pixel_altivec_init( pixf );
907     }
908 #endif
909 #if ARCH_UltraSparc
910     INIT4( sad, _vis );
911     INIT4( sad_x3, _vis );
912     INIT4( sad_x4, _vis );
913 #endif
914
915     pixf->ads[PIXEL_8x16] =
916     pixf->ads[PIXEL_8x4] =
917     pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
918     pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];
919 }
920