Fix potential problem with overflows in ssd_nv12
[x262.git] / common / pixel.c
1 /*****************************************************************************
2  * pixel.c: pixel metrics
3  *****************************************************************************
4  * Copyright (C) 2003-2010 x264 project
5  *
6  * Authors: Loren Merritt <lorenm@u.washington.edu>
7  *          Laurent Aimar <fenrir@via.ecp.fr>
8  *          Jason Garrett-Glaser <darkshikari@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include "common.h"
29
30 #if HAVE_MMX
31 #   include "x86/pixel.h"
32 #endif
33 #if ARCH_PPC
34 #   include "ppc/pixel.h"
35 #endif
36 #if ARCH_ARM
37 #   include "arm/pixel.h"
38 #endif
39 #if ARCH_UltraSparc
40 #   include "sparc/pixel.h"
41 #endif
42
43
44 /****************************************************************************
45  * pixel_sad_WxH
46  ****************************************************************************/
47 #define PIXEL_SAD_C( name, lx, ly ) \
48 static int name( pixel *pix1, int i_stride_pix1,  \
49                  pixel *pix2, int i_stride_pix2 ) \
50 {                                                   \
51     int i_sum = 0;                                  \
52     for( int y = 0; y < ly; y++ )                   \
53     {                                               \
54         for( int x = 0; x < lx; x++ )               \
55         {                                           \
56             i_sum += abs( pix1[x] - pix2[x] );      \
57         }                                           \
58         pix1 += i_stride_pix1;                      \
59         pix2 += i_stride_pix2;                      \
60     }                                               \
61     return i_sum;                                   \
62 }
63
64
65 PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
66 PIXEL_SAD_C( x264_pixel_sad_16x8,  16,  8 )
67 PIXEL_SAD_C( x264_pixel_sad_8x16,   8, 16 )
68 PIXEL_SAD_C( x264_pixel_sad_8x8,    8,  8 )
69 PIXEL_SAD_C( x264_pixel_sad_8x4,    8,  4 )
70 PIXEL_SAD_C( x264_pixel_sad_4x8,    4,  8 )
71 PIXEL_SAD_C( x264_pixel_sad_4x4,    4,  4 )
72
73
74 /****************************************************************************
75  * pixel_ssd_WxH
76  ****************************************************************************/
77 #define PIXEL_SSD_C( name, lx, ly ) \
78 static int name( pixel *pix1, int i_stride_pix1,  \
79                  pixel *pix2, int i_stride_pix2 ) \
80 {                                                   \
81     int i_sum = 0;                                  \
82     for( int y = 0; y < ly; y++ )                   \
83     {                                               \
84         for( int x = 0; x < lx; x++ )               \
85         {                                           \
86             int d = pix1[x] - pix2[x];              \
87             i_sum += d*d;                           \
88         }                                           \
89         pix1 += i_stride_pix1;                      \
90         pix2 += i_stride_pix2;                      \
91     }                                               \
92     return i_sum;                                   \
93 }
94
95 PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
96 PIXEL_SSD_C( x264_pixel_ssd_16x8,  16,  8 )
97 PIXEL_SSD_C( x264_pixel_ssd_8x16,   8, 16 )
98 PIXEL_SSD_C( x264_pixel_ssd_8x8,    8,  8 )
99 PIXEL_SSD_C( x264_pixel_ssd_8x4,    8,  4 )
100 PIXEL_SSD_C( x264_pixel_ssd_4x8,    4,  8 )
101 PIXEL_SSD_C( x264_pixel_ssd_4x4,    4,  4 )
102
103 uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height )
104 {
105     uint64_t i_ssd = 0;
106     int y;
107     int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
108
109 #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
110                                           pix2 + y*i_pix2 + x, i_pix2 );
111     for( y = 0; y < i_height-15; y += 16 )
112     {
113         int x = 0;
114         if( align )
115             for( ; x < i_width-15; x += 16 )
116                 SSD(PIXEL_16x16);
117         for( ; x < i_width-7; x += 8 )
118             SSD(PIXEL_8x16);
119     }
120     if( y < i_height-7 )
121         for( int x = 0; x < i_width-7; x += 8 )
122             SSD(PIXEL_8x8);
123 #undef SSD
124
125 #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
126     if( i_width & 7 )
127     {
128         for( y = 0; y < (i_height & ~7); y++ )
129             for( int x = i_width & ~7; x < i_width; x++ )
130                 SSD1;
131     }
132     if( i_height & 7 )
133     {
134         for( y = i_height & ~7; y < i_height; y++ )
135             for( int x = 0; x < i_width; x++ )
136                 SSD1;
137     }
138 #undef SSD1
139
140     return i_ssd;
141 }
142
143 static void pixel_ssd_nv12_core( pixel *pixuv1, int stride1, pixel *pixuv2, int stride2, int width, int height, uint64_t *ssd_u, uint64_t *ssd_v )
144 {
145     *ssd_u = 0, *ssd_v = 0;
146     for( int y = 0; y < height; y++, pixuv1+=stride1, pixuv2+=stride2 )
147         for( int x = 0; x < width; x++ )
148         {
149             int du = pixuv1[2*x]   - pixuv2[2*x];
150             int dv = pixuv1[2*x+1] - pixuv2[2*x+1];
151             *ssd_u += du*du;
152             *ssd_v += dv*dv;
153         }
154 }
155
156 void x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height, uint64_t *ssd_u, uint64_t *ssd_v )
157 {
158     pf->ssd_nv12_core( pix1, i_pix1, pix2, i_pix2, i_width&~7, i_height, ssd_u, ssd_v );
159     if( i_width&7 )
160     {
161         uint64_t tmp[2];
162         pixel_ssd_nv12_core( pix1+(i_width&~7), i_pix1, pix2+(i_width&~7), i_pix2, i_width&7, i_height, &tmp[0], &tmp[1] );
163         *ssd_u += tmp[0];
164         *ssd_v += tmp[1];
165     }
166 }
167
168 /****************************************************************************
169  * pixel_var_wxh
170  ****************************************************************************/
171 #define PIXEL_VAR_C( name, w ) \
172 static uint64_t name( pixel *pix, int i_stride ) \
173 {                                             \
174     uint32_t sum = 0, sqr = 0;                \
175     for( int y = 0; y < w; y++ )              \
176     {                                         \
177         for( int x = 0; x < w; x++ )          \
178         {                                     \
179             sum += pix[x];                    \
180             sqr += pix[x] * pix[x];           \
181         }                                     \
182         pix += i_stride;                      \
183     }                                         \
184     return sum + ((uint64_t)sqr << 32);       \
185 }
186
187 PIXEL_VAR_C( x264_pixel_var_16x16, 16 )
188 PIXEL_VAR_C( x264_pixel_var_8x8,    8 )
189
190 /****************************************************************************
191  * pixel_var2_wxh
192  ****************************************************************************/
193 static int pixel_var2_8x8( pixel *pix1, int i_stride1, pixel *pix2, int i_stride2, int *ssd )
194 {
195     uint32_t var = 0, sum = 0, sqr = 0;
196     for( int y = 0; y < 8; y++ )
197     {
198         for( int x = 0; x < 8; x++ )
199         {
200             int diff = pix1[x] - pix2[x];
201             sum += diff;
202             sqr += diff * diff;
203         }
204         pix1 += i_stride1;
205         pix2 += i_stride2;
206     }
207     sum = abs(sum);
208     var = sqr - ((uint64_t)sum * sum >> 6);
209     *ssd = sqr;
210     return var;
211 }
212
213
214 #define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
215     int t0 = s0 + s1;\
216     int t1 = s0 - s1;\
217     int t2 = s2 + s3;\
218     int t3 = s2 - s3;\
219     d0 = t0 + t2;\
220     d2 = t0 - t2;\
221     d1 = t1 + t3;\
222     d3 = t1 - t3;\
223 }
224
225 // in: a pseudo-simd number of the form x+(y<<16)
226 // return: abs(x)+(abs(y)<<16)
227 static ALWAYS_INLINE uint32_t abs2( uint32_t a )
228 {
229     uint32_t s = ((a>>15)&0x10001)*0xffff;
230     return (a+s)^s;
231 }
232
233 /****************************************************************************
234  * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
235  ****************************************************************************/
236
237 static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
238 {
239     uint32_t tmp[4][2];
240     uint32_t a0, a1, a2, a3, b0, b1;
241     int sum = 0;
242     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
243     {
244         a0 = pix1[0] - pix2[0];
245         a1 = pix1[1] - pix2[1];
246         b0 = (a0+a1) + ((a0-a1)<<16);
247         a2 = pix1[2] - pix2[2];
248         a3 = pix1[3] - pix2[3];
249         b1 = (a2+a3) + ((a2-a3)<<16);
250         tmp[i][0] = b0 + b1;
251         tmp[i][1] = b0 - b1;
252     }
253     for( int i = 0; i < 2; i++ )
254     {
255         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
256         a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
257         sum += ((uint16_t)a0) + (a0>>16);
258     }
259     return sum >> 1;
260 }
261
262 static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
263 {
264     uint32_t tmp[4][4];
265     uint32_t a0, a1, a2, a3;
266     int sum = 0;
267     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
268     {
269         a0 = (pix1[0] - pix2[0]) + ((pix1[4] - pix2[4]) << 16);
270         a1 = (pix1[1] - pix2[1]) + ((pix1[5] - pix2[5]) << 16);
271         a2 = (pix1[2] - pix2[2]) + ((pix1[6] - pix2[6]) << 16);
272         a3 = (pix1[3] - pix2[3]) + ((pix1[7] - pix2[7]) << 16);
273         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
274     }
275     for( int i = 0; i < 4; i++ )
276     {
277         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
278         sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
279     }
280     return (((uint16_t)sum) + ((uint32_t)sum>>16)) >> 1;
281 }
282
283 #define PIXEL_SATD_C( w, h, sub )\
284 static int x264_pixel_satd_##w##x##h( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )\
285 {\
286     int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
287             + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
288     if( w==16 )\
289         sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
290             + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
291     if( h==16 )\
292         sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
293             + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
294     if( w==16 && h==16 )\
295         sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
296             + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
297     return sum;\
298 }
299 PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
300 PIXEL_SATD_C( 16, 8,  x264_pixel_satd_8x4 )
301 PIXEL_SATD_C( 8,  16, x264_pixel_satd_8x4 )
302 PIXEL_SATD_C( 8,  8,  x264_pixel_satd_8x4 )
303 PIXEL_SATD_C( 4,  8,  x264_pixel_satd_4x4 )
304
305
306 static NOINLINE int sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
307 {
308     uint32_t tmp[8][4];
309     uint32_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
310     int sum = 0;
311     for( int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2 )
312     {
313         a0 = pix1[0] - pix2[0];
314         a1 = pix1[1] - pix2[1];
315         b0 = (a0+a1) + ((a0-a1)<<16);
316         a2 = pix1[2] - pix2[2];
317         a3 = pix1[3] - pix2[3];
318         b1 = (a2+a3) + ((a2-a3)<<16);
319         a4 = pix1[4] - pix2[4];
320         a5 = pix1[5] - pix2[5];
321         b2 = (a4+a5) + ((a4-a5)<<16);
322         a6 = pix1[6] - pix2[6];
323         a7 = pix1[7] - pix2[7];
324         b3 = (a6+a7) + ((a6-a7)<<16);
325         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
326     }
327     for( int i = 0; i < 4; i++ )
328     {
329         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
330         HADAMARD4( a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
331         b0  = abs2(a0+a4) + abs2(a0-a4);
332         b0 += abs2(a1+a5) + abs2(a1-a5);
333         b0 += abs2(a2+a6) + abs2(a2-a6);
334         b0 += abs2(a3+a7) + abs2(a3-a7);
335         sum += (uint16_t)b0 + (b0>>16);
336     }
337     return sum;
338 }
339
340 static int x264_pixel_sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
341 {
342     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
343     return (sum+2)>>2;
344 }
345
346 static int x264_pixel_sa8d_16x16( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
347 {
348     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
349             + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
350             + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
351             + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
352     return (sum+2)>>2;
353 }
354
355
356 static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, int stride )
357 {
358     uint32_t tmp[32];
359     uint32_t a0, a1, a2, a3, dc;
360     int sum4 = 0, sum8 = 0;
361     for( int i = 0; i < 8; i++, pix+=stride )
362     {
363         uint32_t *t = tmp + (i&3) + (i&4)*4;
364         a0 = (pix[0]+pix[1]) + ((pix[0]-pix[1])<<16);
365         a1 = (pix[2]+pix[3]) + ((pix[2]-pix[3])<<16);
366         t[0] = a0 + a1;
367         t[4] = a0 - a1;
368         a2 = (pix[4]+pix[5]) + ((pix[4]-pix[5])<<16);
369         a3 = (pix[6]+pix[7]) + ((pix[6]-pix[7])<<16);
370         t[8] = a2 + a3;
371         t[12] = a2 - a3;
372     }
373     for( int i = 0; i < 8; i++ )
374     {
375         HADAMARD4( a0, a1, a2, a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
376         tmp[i*4+0] = a0;
377         tmp[i*4+1] = a1;
378         tmp[i*4+2] = a2;
379         tmp[i*4+3] = a3;
380         sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
381     }
382     for( int i = 0; i < 8; i++ )
383     {
384         HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
385         sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
386     }
387     dc = (uint16_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
388     sum4 = (uint16_t)sum4 + ((uint32_t)sum4>>16) - dc;
389     sum8 = (uint16_t)sum8 + ((uint32_t)sum8>>16) - dc;
390     return ((uint64_t)sum8<<32) + sum4;
391 }
392
393 #define HADAMARD_AC(w,h) \
394 static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, int stride )\
395 {\
396     uint64_t sum = pixel_hadamard_ac( pix, stride );\
397     if( w==16 )\
398         sum += pixel_hadamard_ac( pix+8, stride );\
399     if( h==16 )\
400         sum += pixel_hadamard_ac( pix+8*stride, stride );\
401     if( w==16 && h==16 )\
402         sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
403     return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
404 }
405 HADAMARD_AC( 16, 16 )
406 HADAMARD_AC( 16, 8 )
407 HADAMARD_AC( 8, 16 )
408 HADAMARD_AC( 8, 8 )
409
410
411 /****************************************************************************
412  * pixel_sad_x4
413  ****************************************************************************/
414 #define SAD_X( size ) \
415 static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
416 {\
417     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
418     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
419     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
420 }\
421 static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
422 {\
423     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
424     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
425     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
426     scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
427 }
428
429 SAD_X( 16x16 )
430 SAD_X( 16x8 )
431 SAD_X( 8x16 )
432 SAD_X( 8x8 )
433 SAD_X( 8x4 )
434 SAD_X( 4x8 )
435 SAD_X( 4x4 )
436
437 #if !X264_HIGH_BIT_DEPTH
438 #if ARCH_UltraSparc
439 SAD_X( 16x16_vis )
440 SAD_X( 16x8_vis )
441 SAD_X( 8x16_vis )
442 SAD_X( 8x8_vis )
443 #endif
444 #endif // !X264_HIGH_BIT_DEPTH
445
446 /****************************************************************************
447  * pixel_satd_x4
448  * no faster than single satd, but needed for satd to be a drop-in replacement for sad
449  ****************************************************************************/
450
451 #define SATD_X( size, cpu ) \
452 static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
453 {\
454     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
455     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
456     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
457 }\
458 static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
459 {\
460     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
461     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
462     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
463     scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
464 }
465 #define SATD_X_DECL6( cpu )\
466 SATD_X( 16x16, cpu )\
467 SATD_X( 16x8, cpu )\
468 SATD_X( 8x16, cpu )\
469 SATD_X( 8x8, cpu )\
470 SATD_X( 8x4, cpu )\
471 SATD_X( 4x8, cpu )
472 #define SATD_X_DECL7( cpu )\
473 SATD_X_DECL6( cpu )\
474 SATD_X( 4x4, cpu )
475
476 SATD_X_DECL7()
477 #if !X264_HIGH_BIT_DEPTH
478 #if HAVE_MMX
479 SATD_X_DECL7( _mmxext )
480 SATD_X_DECL6( _sse2 )
481 SATD_X_DECL7( _ssse3 )
482 SATD_X_DECL7( _sse4 )
483 #endif
484
485 #if HAVE_ARMV6
486 SATD_X_DECL7( _neon )
487 #endif
488 #endif // !X264_HIGH_BIT_DEPTH
489
490 #define INTRA_MBCMP_8x8( mbcmp )\
491 void x264_intra_##mbcmp##_x3_8x8( pixel *fenc, pixel edge[33], int res[3] )\
492 {\
493     pixel pix[8*FDEC_STRIDE];\
494     x264_predict_8x8_v_c( pix, edge );\
495     res[0] = x264_pixel_##mbcmp##_8x8( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
496     x264_predict_8x8_h_c( pix, edge );\
497     res[1] = x264_pixel_##mbcmp##_8x8( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
498     x264_predict_8x8_dc_c( pix, edge );\
499     res[2] = x264_pixel_##mbcmp##_8x8( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
500 }
501
502 INTRA_MBCMP_8x8(sad)
503 INTRA_MBCMP_8x8(sa8d)
504
505 #define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma )\
506 void x264_intra_##mbcmp##_x3_##size##x##size##chroma( pixel *fenc, pixel *fdec, int res[3] )\
507 {\
508     x264_predict_##size##x##size##chroma##_##pred1##_c( fdec );\
509     res[0] = x264_pixel_##mbcmp##_##size##x##size( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
510     x264_predict_##size##x##size##chroma##_##pred2##_c( fdec );\
511     res[1] = x264_pixel_##mbcmp##_##size##x##size( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
512     x264_predict_##size##x##size##chroma##_##pred3##_c( fdec );\
513     res[2] = x264_pixel_##mbcmp##_##size##x##size( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
514 }
515
516 INTRA_MBCMP(sad, 4, v, h, dc, )
517 INTRA_MBCMP(satd, 4, v, h, dc, )
518 INTRA_MBCMP(sad, 8, dc, h, v, c )
519 INTRA_MBCMP(satd, 8, dc, h, v, c )
520 INTRA_MBCMP(sad, 16, v, h, dc, )
521 INTRA_MBCMP(satd, 16, v, h, dc, )
522
523 /****************************************************************************
524  * structural similarity metric
525  ****************************************************************************/
526 static void ssim_4x4x2_core( const pixel *pix1, int stride1,
527                              const pixel *pix2, int stride2,
528                              int sums[2][4])
529 {
530     for( int z = 0; z < 2; z++ )
531     {
532         uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
533         for( int y = 0; y < 4; y++ )
534             for( int x = 0; x < 4; x++ )
535             {
536                 int a = pix1[x+y*stride1];
537                 int b = pix2[x+y*stride2];
538                 s1  += a;
539                 s2  += b;
540                 ss  += a*a;
541                 ss  += b*b;
542                 s12 += a*b;
543             }
544         sums[z][0] = s1;
545         sums[z][1] = s2;
546         sums[z][2] = ss;
547         sums[z][3] = s12;
548         pix1 += 4;
549         pix2 += 4;
550     }
551 }
552
553 static float ssim_end1( int s1, int s2, int ss, int s12 )
554 {
555     static const int ssim_c1 = (int)(.01*.01*PIXEL_MAX*PIXEL_MAX*64 + .5);
556     static const int ssim_c2 = (int)(.03*.03*PIXEL_MAX*PIXEL_MAX*64*63 + .5);
557     int vars = ss*64 - s1*s1 - s2*s2;
558     int covar = s12*64 - s1*s2;
559     return (float)(2*s1*s2 + ssim_c1) * (float)(2*covar + ssim_c2)
560          / ((float)(s1*s1 + s2*s2 + ssim_c1) * (float)(vars + ssim_c2));
561 }
562
563 static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
564 {
565     float ssim = 0.0;
566     for( int i = 0; i < width; i++ )
567         ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
568                            sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
569                            sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
570                            sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
571     return ssim;
572 }
573
574 float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
575                            pixel *pix1, int stride1,
576                            pixel *pix2, int stride2,
577                            int width, int height, void *buf )
578 {
579     int z = 0;
580     float ssim = 0.0;
581     int (*sum0)[4] = buf;
582     int (*sum1)[4] = sum0 + (width >> 2) + 3;
583     width >>= 2;
584     height >>= 2;
585     for( int y = 1; y < height; y++ )
586     {
587         for( ; z <= y; z++ )
588         {
589             XCHG( void*, sum0, sum1 );
590             for( int x = 0; x < width; x+=2 )
591                 pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
592         }
593         for( int x = 0; x < width-1; x += 4 )
594             ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
595     }
596     return ssim;
597 }
598
599
600 /****************************************************************************
601  * successive elimination
602  ****************************************************************************/
603 static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
604                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
605 {
606     int nmv = 0;
607     for( int i = 0; i < width; i++, sums++ )
608     {
609         int ads = abs( enc_dc[0] - sums[0] )
610                 + abs( enc_dc[1] - sums[8] )
611                 + abs( enc_dc[2] - sums[delta] )
612                 + abs( enc_dc[3] - sums[delta+8] )
613                 + cost_mvx[i];
614         if( ads < thresh )
615             mvs[nmv++] = i;
616     }
617     return nmv;
618 }
619
620 static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
621                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
622 {
623     int nmv = 0;
624     for( int i = 0; i < width; i++, sums++ )
625     {
626         int ads = abs( enc_dc[0] - sums[0] )
627                 + abs( enc_dc[1] - sums[delta] )
628                 + cost_mvx[i];
629         if( ads < thresh )
630             mvs[nmv++] = i;
631     }
632     return nmv;
633 }
634
635 static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
636                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
637 {
638     int nmv = 0;
639     for( int i = 0; i<width; i++, sums++ )
640     {
641         int ads = abs( enc_dc[0] - sums[0] )
642                 + cost_mvx[i];
643         if( ads < thresh )
644             mvs[nmv++] = i;
645     }
646     return nmv;
647 }
648
649
650 /****************************************************************************
651  * x264_pixel_init:
652  ****************************************************************************/
653 void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
654 {
655     memset( pixf, 0, sizeof(*pixf) );
656
657 #define INIT2_NAME( name1, name2, cpu ) \
658     pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
659     pixf->name1[PIXEL_16x8]  = x264_pixel_##name2##_16x8##cpu;
660 #define INIT4_NAME( name1, name2, cpu ) \
661     INIT2_NAME( name1, name2, cpu ) \
662     pixf->name1[PIXEL_8x16]  = x264_pixel_##name2##_8x16##cpu;\
663     pixf->name1[PIXEL_8x8]   = x264_pixel_##name2##_8x8##cpu;
664 #define INIT5_NAME( name1, name2, cpu ) \
665     INIT4_NAME( name1, name2, cpu ) \
666     pixf->name1[PIXEL_8x4]   = x264_pixel_##name2##_8x4##cpu;
667 #define INIT6_NAME( name1, name2, cpu ) \
668     INIT5_NAME( name1, name2, cpu ) \
669     pixf->name1[PIXEL_4x8]   = x264_pixel_##name2##_4x8##cpu;
670 #define INIT7_NAME( name1, name2, cpu ) \
671     INIT6_NAME( name1, name2, cpu ) \
672     pixf->name1[PIXEL_4x4]   = x264_pixel_##name2##_4x4##cpu;
673 #define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
674 #define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
675 #define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
676 #define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
677 #define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
678
679 #define INIT_ADS( cpu ) \
680     pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
681     pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
682     pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
683
684     INIT7( sad, );
685     INIT7_NAME( sad_aligned, sad, );
686     INIT7( sad_x3, );
687     INIT7( sad_x4, );
688     INIT7( ssd, );
689     INIT7( satd, );
690     INIT7( satd_x3, );
691     INIT7( satd_x4, );
692     INIT4( hadamard_ac, );
693     INIT_ADS( );
694
695     pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
696     pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8;
697     pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
698     pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8;
699
700     pixf->ssd_nv12_core = pixel_ssd_nv12_core;
701     pixf->ssim_4x4x2_core = ssim_4x4x2_core;
702     pixf->ssim_end4 = ssim_end4;
703     pixf->var2_8x8 = pixel_var2_8x8;
704
705     pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4;
706     pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4;
707     pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8;
708     pixf->intra_sa8d_x3_8x8   = x264_intra_sa8d_x3_8x8;
709     pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c;
710     pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c;
711     pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16;
712     pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16;
713
714 #if !X264_HIGH_BIT_DEPTH
715 #if HAVE_MMX
716     if( cpu&X264_CPU_MMX )
717     {
718         INIT7( ssd, _mmx );
719     }
720
721     if( cpu&X264_CPU_MMXEXT )
722     {
723         INIT7( sad, _mmxext );
724         INIT7_NAME( sad_aligned, sad, _mmxext );
725         INIT7( sad_x3, _mmxext );
726         INIT7( sad_x4, _mmxext );
727         INIT7( satd, _mmxext );
728         INIT7( satd_x3, _mmxext );
729         INIT7( satd_x4, _mmxext );
730         INIT4( hadamard_ac, _mmxext );
731         INIT_ADS( _mmxext );
732         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmxext;
733         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmxext;
734         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_mmxext;
735 #if ARCH_X86
736         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmxext;
737         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_mmxext;
738         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmxext;
739         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_mmxext;
740         pixf->var2_8x8 = x264_pixel_var2_8x8_mmxext;
741
742         if( cpu&X264_CPU_CACHELINE_32 )
743         {
744             INIT5( sad, _cache32_mmxext );
745             INIT4( sad_x3, _cache32_mmxext );
746             INIT4( sad_x4, _cache32_mmxext );
747         }
748         else if( cpu&X264_CPU_CACHELINE_64 )
749         {
750             INIT5( sad, _cache64_mmxext );
751             INIT4( sad_x3, _cache64_mmxext );
752             INIT4( sad_x4, _cache64_mmxext );
753         }
754 #else
755         if( cpu&X264_CPU_CACHELINE_64 )
756         {
757             pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmxext;
758             pixf->sad[PIXEL_8x8]  = x264_pixel_sad_8x8_cache64_mmxext;
759             pixf->sad[PIXEL_8x4]  = x264_pixel_sad_8x4_cache64_mmxext;
760             pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmxext;
761             pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_cache64_mmxext;
762             pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmxext;
763             pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_cache64_mmxext;
764         }
765 #endif
766         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmxext;
767         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmxext;
768         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmxext;
769         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmxext;
770         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmxext;
771         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmxext;
772         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmxext;
773     }
774
775     if( cpu&X264_CPU_SSE2 )
776     {
777         INIT5( ssd, _sse2slow );
778         INIT2_NAME( sad_aligned, sad, _sse2_aligned );
779         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
780         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_sse2;
781         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
782         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
783         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
784         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
785 #if ARCH_X86_64
786         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
787 #endif
788         pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
789     }
790
791     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
792     {
793         INIT2( sad, _sse2 );
794         INIT2( sad_x3, _sse2 );
795         INIT2( sad_x4, _sse2 );
796         INIT6( satd, _sse2 );
797         INIT6( satd_x3, _sse2 );
798         INIT6( satd_x4, _sse2 );
799         if( !(cpu&X264_CPU_STACK_MOD4) )
800         {
801             INIT4( hadamard_ac, _sse2 );
802         }
803         INIT_ADS( _sse2 );
804         pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
805         pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
806         if( cpu&X264_CPU_CACHELINE_64 )
807         {
808             INIT2( ssd, _sse2); /* faster for width 16 on p4 */
809 #if ARCH_X86
810             INIT2( sad, _cache64_sse2 );
811             INIT2( sad_x3, _cache64_sse2 );
812             INIT2( sad_x4, _cache64_sse2 );
813 #endif
814            if( cpu&X264_CPU_SSE2_IS_FAST )
815            {
816                pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
817                pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
818            }
819         }
820
821         if( cpu&X264_CPU_SSE_MISALIGN )
822         {
823             INIT2( sad_x3, _sse2_misalign );
824             INIT2( sad_x4, _sse2_misalign );
825         }
826     }
827
828     if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
829     {
830         pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
831         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
832         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
833         pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
834         pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
835         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
836         pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
837         pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
838     }
839
840     if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
841     {
842         INIT2( sad, _sse3 );
843         INIT2( sad_x3, _sse3 );
844         INIT2( sad_x4, _sse3 );
845     }
846
847     if( cpu&X264_CPU_SSSE3 )
848     {
849         if( !(cpu&X264_CPU_STACK_MOD4) )
850         {
851             INIT4( hadamard_ac, _ssse3 );
852         }
853         INIT_ADS( _ssse3 );
854         if( !(cpu&X264_CPU_SLOW_ATOM) )
855         {
856             INIT7( ssd, _ssse3 );
857             pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
858             pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
859             INIT7( satd, _ssse3 );
860             INIT7( satd_x3, _ssse3 );
861             INIT7( satd_x4, _ssse3 );
862         }
863         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
864         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
865         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_ssse3;
866         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
867         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_ssse3;
868 #if ARCH_X86_64
869         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_ssse3;
870 #endif
871         pixf->var2_8x8 = x264_pixel_var2_8x8_ssse3;
872         if( cpu&X264_CPU_CACHELINE_64 )
873         {
874             INIT2( sad, _cache64_ssse3 );
875             INIT2( sad_x3, _cache64_ssse3 );
876             INIT2( sad_x4, _cache64_ssse3 );
877         }
878         if( cpu&X264_CPU_SLOW_ATOM || !(cpu&X264_CPU_SHUFFLE_IS_FAST) )
879         {
880             INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
881         }
882     }
883
884     if( cpu&X264_CPU_SSE4 )
885     {
886         INIT7( satd, _sse4 );
887         INIT7( satd_x3, _sse4 );
888         INIT7( satd_x4, _sse4 );
889         if( !(cpu&X264_CPU_STACK_MOD4) )
890         {
891             INIT4( hadamard_ac, _sse4 );
892         }
893         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
894         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
895         pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_sse4;
896         /* Slower on Conroe, so only enable under SSE4 */
897         pixf->intra_sad_x3_8x8  = x264_intra_sad_x3_8x8_ssse3;
898     }
899 #endif //HAVE_MMX
900
901 #if HAVE_ARMV6
902     if( cpu&X264_CPU_ARMV6 )
903     {
904         pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
905         pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
906         pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
907         pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
908     }
909     if( cpu&X264_CPU_NEON )
910     {
911         INIT5( sad, _neon );
912         INIT5( sad_aligned, _neon );
913         INIT7( sad_x3, _neon );
914         INIT7( sad_x4, _neon );
915         INIT7( ssd, _neon );
916         INIT7( satd, _neon );
917         INIT7( satd_x3, _neon );
918         INIT7( satd_x4, _neon );
919         INIT4( hadamard_ac, _neon );
920         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_neon;
921         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
922         pixf->var[PIXEL_8x8]    = x264_pixel_var_8x8_neon;
923         pixf->var[PIXEL_16x16]  = x264_pixel_var_16x16_neon;
924         pixf->var2_8x8          = x264_pixel_var2_8x8_neon;
925
926         pixf->ssim_4x4x2_core   = x264_pixel_ssim_4x4x2_core_neon;
927         pixf->ssim_end4         = x264_pixel_ssim_end4_neon;
928
929         if( cpu&X264_CPU_FAST_NEON_MRC )
930         {
931             pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
932             pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
933             pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
934             pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
935         }
936         else    // really just scheduled for dual issue / A8
937         {
938             INIT5( sad_aligned, _neon_dual );
939         }
940     }
941 #endif
942 #endif // !X264_HIGH_BIT_DEPTH
943 #if HAVE_ALTIVEC
944     if( cpu&X264_CPU_ALTIVEC )
945     {
946         x264_pixel_altivec_init( pixf );
947     }
948 #endif
949 #if !X264_HIGH_BIT_DEPTH
950 #if ARCH_UltraSparc
951     INIT4( sad, _vis );
952     INIT4( sad_x3, _vis );
953     INIT4( sad_x4, _vis );
954 #endif
955 #endif // !X264_HIGH_BIT_DEPTH
956
957     pixf->ads[PIXEL_8x16] =
958     pixf->ads[PIXEL_8x4] =
959     pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
960     pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];
961 }
962