1 /*****************************************************************************
2 * dct.c: transform and zigzag
3 *****************************************************************************
4 * Copyright (C) 2003-2014 x264 project
6 * Authors: Loren Merritt <lorenm@u.washington.edu>
7 * Laurent Aimar <fenrir@via.ecp.fr>
8 * Henrik Gramner <henrik@gramner.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
39 /* the inverse of the scaling factors introduced by 8x8 fdct */
40 /* uint32 is for the asm implementation of trellis. the actual values fit in uint16. */
41 #define W(i) (i==0 ? FIX8(1.0000) :\
42 i==1 ? FIX8(0.8859) :\
43 i==2 ? FIX8(1.6000) :\
44 i==3 ? FIX8(0.9415) :\
45 i==4 ? FIX8(1.2651) :\
46 i==5 ? FIX8(1.1910) :0)
47 const uint32_t x264_dct8_weight_tab[64] = {
48 W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3),
49 W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
50 W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5),
51 W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
53 W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3),
54 W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
55 W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5),
56 W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1)
60 #define W(i) (i==0 ? FIX8(1.76777) :\
61 i==1 ? FIX8(1.11803) :\
62 i==2 ? FIX8(0.70711) :0)
63 const uint32_t x264_dct4_weight_tab[16] = {
64 W(0), W(1), W(0), W(1),
65 W(1), W(2), W(1), W(2),
66 W(0), W(1), W(0), W(1),
67 W(1), W(2), W(1), W(2)
72 #define W(i) (i==0 ? FIX8(3.125) :\
75 const uint32_t x264_dct4_weight2_tab[16] = {
76 W(0), W(1), W(0), W(1),
77 W(1), W(2), W(1), W(2),
78 W(0), W(1), W(0), W(1),
79 W(1), W(2), W(1), W(2)
83 #define W(i) (i==0 ? FIX8(1.00000) :\
84 i==1 ? FIX8(0.78487) :\
85 i==2 ? FIX8(2.56132) :\
86 i==3 ? FIX8(0.88637) :\
87 i==4 ? FIX8(1.60040) :\
88 i==5 ? FIX8(1.41850) :0)
89 const uint32_t x264_dct8_weight2_tab[64] = {
90 W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3),
91 W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
92 W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5),
93 W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
95 W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3),
96 W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
97 W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5),
98 W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1)
102 #define W(i) (i==0 ? FIX8(0.25) :0)
103 const uint32_t x264_mpeg2_weight_tab[64] = {
104 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0),
105 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0),
106 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0),
107 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0),
109 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0),
110 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0),
111 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0),
112 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0)
116 #define W(i) (i==0 ? FIX8(0.015625) :0)
117 const uint32_t x264_mpeg2_weight2_tab[64] = {
118 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0),
119 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0),
120 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0),
121 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0),
123 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0),
124 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0),
125 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0),
126 W(0), W(0), W(0), W(0), W(0), W(0), W(0), W(0)
130 static void dct4x4dc( dctcoef d[16] )
134 for( int i = 0; i < 4; i++ )
136 int s01 = d[i*4+0] + d[i*4+1];
137 int d01 = d[i*4+0] - d[i*4+1];
138 int s23 = d[i*4+2] + d[i*4+3];
139 int d23 = d[i*4+2] - d[i*4+3];
141 tmp[0*4+i] = s01 + s23;
142 tmp[1*4+i] = s01 - s23;
143 tmp[2*4+i] = d01 - d23;
144 tmp[3*4+i] = d01 + d23;
147 for( int i = 0; i < 4; i++ )
149 int s01 = tmp[i*4+0] + tmp[i*4+1];
150 int d01 = tmp[i*4+0] - tmp[i*4+1];
151 int s23 = tmp[i*4+2] + tmp[i*4+3];
152 int d23 = tmp[i*4+2] - tmp[i*4+3];
154 d[i*4+0] = ( s01 + s23 + 1 ) >> 1;
155 d[i*4+1] = ( s01 - s23 + 1 ) >> 1;
156 d[i*4+2] = ( d01 - d23 + 1 ) >> 1;
157 d[i*4+3] = ( d01 + d23 + 1 ) >> 1;
161 static void idct4x4dc( dctcoef d[16] )
165 for( int i = 0; i < 4; i++ )
167 int s01 = d[i*4+0] + d[i*4+1];
168 int d01 = d[i*4+0] - d[i*4+1];
169 int s23 = d[i*4+2] + d[i*4+3];
170 int d23 = d[i*4+2] - d[i*4+3];
172 tmp[0*4+i] = s01 + s23;
173 tmp[1*4+i] = s01 - s23;
174 tmp[2*4+i] = d01 - d23;
175 tmp[3*4+i] = d01 + d23;
178 for( int i = 0; i < 4; i++ )
180 int s01 = tmp[i*4+0] + tmp[i*4+1];
181 int d01 = tmp[i*4+0] - tmp[i*4+1];
182 int s23 = tmp[i*4+2] + tmp[i*4+3];
183 int d23 = tmp[i*4+2] - tmp[i*4+3];
185 d[i*4+0] = s01 + s23;
186 d[i*4+1] = s01 - s23;
187 d[i*4+2] = d01 - d23;
188 d[i*4+3] = d01 + d23;
192 static void dct2x4dc( dctcoef dct[8], dctcoef dct4x4[8][16] )
194 int a0 = dct4x4[0][0] + dct4x4[1][0];
195 int a1 = dct4x4[2][0] + dct4x4[3][0];
196 int a2 = dct4x4[4][0] + dct4x4[5][0];
197 int a3 = dct4x4[6][0] + dct4x4[7][0];
198 int a4 = dct4x4[0][0] - dct4x4[1][0];
199 int a5 = dct4x4[2][0] - dct4x4[3][0];
200 int a6 = dct4x4[4][0] - dct4x4[5][0];
201 int a7 = dct4x4[6][0] - dct4x4[7][0];
228 static inline void pixel_sub_wxh( dctcoef *diff, int i_size,
229 pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
231 for( int y = 0; y < i_size; y++ )
233 for( int x = 0; x < i_size; x++ )
234 diff[x + y*i_size] = pix1[x] - pix2[x];
240 static void sub4x4_dct( dctcoef dct[16], pixel *pix1, pixel *pix2 )
245 pixel_sub_wxh( d, 4, pix1, FENC_STRIDE, pix2, FDEC_STRIDE );
247 for( int i = 0; i < 4; i++ )
249 int s03 = d[i*4+0] + d[i*4+3];
250 int s12 = d[i*4+1] + d[i*4+2];
251 int d03 = d[i*4+0] - d[i*4+3];
252 int d12 = d[i*4+1] - d[i*4+2];
254 tmp[0*4+i] = s03 + s12;
255 tmp[1*4+i] = 2*d03 + d12;
256 tmp[2*4+i] = s03 - s12;
257 tmp[3*4+i] = d03 - 2*d12;
260 for( int i = 0; i < 4; i++ )
262 int s03 = tmp[i*4+0] + tmp[i*4+3];
263 int s12 = tmp[i*4+1] + tmp[i*4+2];
264 int d03 = tmp[i*4+0] - tmp[i*4+3];
265 int d12 = tmp[i*4+1] - tmp[i*4+2];
267 dct[i*4+0] = s03 + s12;
268 dct[i*4+1] = 2*d03 + d12;
269 dct[i*4+2] = s03 - s12;
270 dct[i*4+3] = d03 - 2*d12;
274 static void sub8x8_dct( dctcoef dct[4][16], pixel *pix1, pixel *pix2 )
276 sub4x4_dct( dct[0], &pix1[0], &pix2[0] );
277 sub4x4_dct( dct[1], &pix1[4], &pix2[4] );
278 sub4x4_dct( dct[2], &pix1[4*FENC_STRIDE+0], &pix2[4*FDEC_STRIDE+0] );
279 sub4x4_dct( dct[3], &pix1[4*FENC_STRIDE+4], &pix2[4*FDEC_STRIDE+4] );
282 static void sub16x16_dct( dctcoef dct[16][16], pixel *pix1, pixel *pix2 )
284 sub8x8_dct( &dct[ 0], &pix1[0], &pix2[0] );
285 sub8x8_dct( &dct[ 4], &pix1[8], &pix2[8] );
286 sub8x8_dct( &dct[ 8], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] );
287 sub8x8_dct( &dct[12], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
290 static int sub4x4_dct_dc( pixel *pix1, pixel *pix2 )
293 for( int i=0; i<4; i++, pix1 += FENC_STRIDE, pix2 += FDEC_STRIDE )
294 sum += pix1[0] + pix1[1] + pix1[2] + pix1[3]
295 - pix2[0] - pix2[1] - pix2[2] - pix2[3];
299 static void sub8x8_dct_dc( dctcoef dct[4], pixel *pix1, pixel *pix2 )
301 dct[0] = sub4x4_dct_dc( &pix1[0], &pix2[0] );
302 dct[1] = sub4x4_dct_dc( &pix1[4], &pix2[4] );
303 dct[2] = sub4x4_dct_dc( &pix1[4*FENC_STRIDE+0], &pix2[4*FDEC_STRIDE+0] );
304 dct[3] = sub4x4_dct_dc( &pix1[4*FENC_STRIDE+4], &pix2[4*FDEC_STRIDE+4] );
306 /* 2x2 DC transform */
307 int d0 = dct[0] + dct[1];
308 int d1 = dct[2] + dct[3];
309 int d2 = dct[0] - dct[1];
310 int d3 = dct[2] - dct[3];
317 static void sub8x16_dct_dc( dctcoef dct[8], pixel *pix1, pixel *pix2 )
319 int a0 = sub4x4_dct_dc( &pix1[ 0*FENC_STRIDE+0], &pix2[ 0*FDEC_STRIDE+0] );
320 int a1 = sub4x4_dct_dc( &pix1[ 0*FENC_STRIDE+4], &pix2[ 0*FDEC_STRIDE+4] );
321 int a2 = sub4x4_dct_dc( &pix1[ 4*FENC_STRIDE+0], &pix2[ 4*FDEC_STRIDE+0] );
322 int a3 = sub4x4_dct_dc( &pix1[ 4*FENC_STRIDE+4], &pix2[ 4*FDEC_STRIDE+4] );
323 int a4 = sub4x4_dct_dc( &pix1[ 8*FENC_STRIDE+0], &pix2[ 8*FDEC_STRIDE+0] );
324 int a5 = sub4x4_dct_dc( &pix1[ 8*FENC_STRIDE+4], &pix2[ 8*FDEC_STRIDE+4] );
325 int a6 = sub4x4_dct_dc( &pix1[12*FENC_STRIDE+0], &pix2[12*FDEC_STRIDE+0] );
326 int a7 = sub4x4_dct_dc( &pix1[12*FENC_STRIDE+4], &pix2[12*FDEC_STRIDE+4] );
328 /* 2x4 DC transform */
355 static void add4x4_idct( pixel *p_dst, dctcoef dct[16] )
360 for( int i = 0; i < 4; i++ )
362 int s02 = dct[0*4+i] + dct[2*4+i];
363 int d02 = dct[0*4+i] - dct[2*4+i];
364 int s13 = dct[1*4+i] + (dct[3*4+i]>>1);
365 int d13 = (dct[1*4+i]>>1) - dct[3*4+i];
367 tmp[i*4+0] = s02 + s13;
368 tmp[i*4+1] = d02 + d13;
369 tmp[i*4+2] = d02 - d13;
370 tmp[i*4+3] = s02 - s13;
373 for( int i = 0; i < 4; i++ )
375 int s02 = tmp[0*4+i] + tmp[2*4+i];
376 int d02 = tmp[0*4+i] - tmp[2*4+i];
377 int s13 = tmp[1*4+i] + (tmp[3*4+i]>>1);
378 int d13 = (tmp[1*4+i]>>1) - tmp[3*4+i];
380 d[0*4+i] = ( s02 + s13 + 32 ) >> 6;
381 d[1*4+i] = ( d02 + d13 + 32 ) >> 6;
382 d[2*4+i] = ( d02 - d13 + 32 ) >> 6;
383 d[3*4+i] = ( s02 - s13 + 32 ) >> 6;
387 for( int y = 0; y < 4; y++ )
389 for( int x = 0; x < 4; x++ )
390 p_dst[x] = x264_clip_pixel( p_dst[x] + d[y*4+x] );
391 p_dst += FDEC_STRIDE;
395 static void add8x8_idct( pixel *p_dst, dctcoef dct[4][16] )
397 add4x4_idct( &p_dst[0], dct[0] );
398 add4x4_idct( &p_dst[4], dct[1] );
399 add4x4_idct( &p_dst[4*FDEC_STRIDE+0], dct[2] );
400 add4x4_idct( &p_dst[4*FDEC_STRIDE+4], dct[3] );
403 static void add16x16_idct( pixel *p_dst, dctcoef dct[16][16] )
405 add8x8_idct( &p_dst[0], &dct[0] );
406 add8x8_idct( &p_dst[8], &dct[4] );
407 add8x8_idct( &p_dst[8*FDEC_STRIDE+0], &dct[8] );
408 add8x8_idct( &p_dst[8*FDEC_STRIDE+8], &dct[12] );
411 /****************************************************************************
413 ****************************************************************************/
416 int s07 = SRC(0) + SRC(7);\
417 int s16 = SRC(1) + SRC(6);\
418 int s25 = SRC(2) + SRC(5);\
419 int s34 = SRC(3) + SRC(4);\
424 int d07 = SRC(0) - SRC(7);\
425 int d16 = SRC(1) - SRC(6);\
426 int d25 = SRC(2) - SRC(5);\
427 int d34 = SRC(3) - SRC(4);\
428 int a4 = d16 + d25 + (d07 + (d07>>1));\
429 int a5 = d07 - d34 - (d25 + (d25>>1));\
430 int a6 = d07 + d34 - (d16 + (d16>>1));\
431 int a7 = d16 - d25 + (d34 + (d34>>1));\
433 DST(1) = a4 + (a7>>2);\
434 DST(2) = a2 + (a3>>1);\
435 DST(3) = a5 + (a6>>2);\
437 DST(5) = a6 - (a5>>2);\
438 DST(6) = (a2>>1) - a3 ;\
439 DST(7) = (a4>>2) - a7 ;\
442 static void sub8x8_dct8( dctcoef dct[64], pixel *pix1, pixel *pix2 )
446 pixel_sub_wxh( tmp, 8, pix1, FENC_STRIDE, pix2, FDEC_STRIDE );
448 #define SRC(x) tmp[x*8+i]
449 #define DST(x) tmp[x*8+i]
450 for( int i = 0; i < 8; i++ )
455 #define SRC(x) tmp[i*8+x]
456 #define DST(x) dct[x*8+i]
457 for( int i = 0; i < 8; i++ )
463 static void sub16x16_dct8( dctcoef dct[4][64], pixel *pix1, pixel *pix2 )
465 sub8x8_dct8( dct[0], &pix1[0], &pix2[0] );
466 sub8x8_dct8( dct[1], &pix1[8], &pix2[8] );
467 sub8x8_dct8( dct[2], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] );
468 sub8x8_dct8( dct[3], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
472 int a0 = SRC(0) + SRC(4);\
473 int a2 = SRC(0) - SRC(4);\
474 int a4 = (SRC(2)>>1) - SRC(6);\
475 int a6 = (SRC(6)>>1) + SRC(2);\
480 int a1 = -SRC(3) + SRC(5) - SRC(7) - (SRC(7)>>1);\
481 int a3 = SRC(1) + SRC(7) - SRC(3) - (SRC(3)>>1);\
482 int a5 = -SRC(1) + SRC(7) + SRC(5) + (SRC(5)>>1);\
483 int a7 = SRC(3) + SRC(5) + SRC(1) + (SRC(1)>>1);\
484 int b1 = (a7>>2) + a1;\
485 int b3 = a3 + (a5>>2);\
486 int b5 = (a3>>2) - a5;\
487 int b7 = a7 - (a1>>2);\
498 static void add8x8_idct8( pixel *dst, dctcoef dct[64] )
500 dct[0] += 32; // rounding for the >>6 at the end
502 #define SRC(x) dct[x*8+i]
503 #define DST(x,rhs) dct[x*8+i] = (rhs)
504 for( int i = 0; i < 8; i++ )
509 #define SRC(x) dct[i*8+x]
510 #define DST(x,rhs) dst[i + x*FDEC_STRIDE] = x264_clip_pixel( dst[i + x*FDEC_STRIDE] + ((rhs) >> 6) );
511 for( int i = 0; i < 8; i++ )
517 static void add16x16_idct8( pixel *dst, dctcoef dct[4][64] )
519 add8x8_idct8( &dst[0], dct[0] );
520 add8x8_idct8( &dst[8], dct[1] );
521 add8x8_idct8( &dst[8*FDEC_STRIDE+0], dct[2] );
522 add8x8_idct8( &dst[8*FDEC_STRIDE+8], dct[3] );
525 static void inline add4x4_idct_dc( pixel *p_dst, dctcoef dc )
528 for( int i = 0; i < 4; i++, p_dst += FDEC_STRIDE )
530 p_dst[0] = x264_clip_pixel( p_dst[0] + dc );
531 p_dst[1] = x264_clip_pixel( p_dst[1] + dc );
532 p_dst[2] = x264_clip_pixel( p_dst[2] + dc );
533 p_dst[3] = x264_clip_pixel( p_dst[3] + dc );
537 static void add8x8_idct_dc( pixel *p_dst, dctcoef dct[4] )
539 add4x4_idct_dc( &p_dst[0], dct[0] );
540 add4x4_idct_dc( &p_dst[4], dct[1] );
541 add4x4_idct_dc( &p_dst[4*FDEC_STRIDE+0], dct[2] );
542 add4x4_idct_dc( &p_dst[4*FDEC_STRIDE+4], dct[3] );
545 static void add16x16_idct_dc( pixel *p_dst, dctcoef dct[16] )
547 for( int i = 0; i < 4; i++, dct += 4, p_dst += 4*FDEC_STRIDE )
549 add4x4_idct_dc( &p_dst[ 0], dct[0] );
550 add4x4_idct_dc( &p_dst[ 4], dct[1] );
551 add4x4_idct_dc( &p_dst[ 8], dct[2] );
552 add4x4_idct_dc( &p_dst[12], dct[3] );
556 /****************************************************************************
557 * 8x8 DCT transforms (for MPEG-2)
558 * These algorithms are part of the Independent JPEG Group's software.
559 * Copyright (C) 1991-1996, Thomas G. Lane.
560 ****************************************************************************/
562 #define RIGHT_SHIFT(x, n) ((x) >> (n))
563 #define DESCALE(x, n) RIGHT_SHIFT((x) + (1 << ((n) - 1)), n)
565 #define CONST_BITS 13
568 #define FIX_0_298631336 ((int32_t) 2446) /* FIX(0.298631336) */
569 #define FIX_0_390180644 ((int32_t) 3196) /* FIX(0.390180644) */
570 #define FIX_0_541196100 ((int32_t) 4433) /* FIX(0.541196100) */
571 #define FIX_0_765366865 ((int32_t) 6270) /* FIX(0.765366865) */
572 #define FIX_0_899976223 ((int32_t) 7373) /* FIX(0.899976223) */
573 #define FIX_1_175875602 ((int32_t) 9633) /* FIX(1.175875602) */
574 #define FIX_1_501321110 ((int32_t) 12299) /* FIX(1.501321110) */
575 #define FIX_1_847759065 ((int32_t) 15137) /* FIX(1.847759065) */
576 #define FIX_1_961570560 ((int32_t) 16069) /* FIX(1.961570560) */
577 #define FIX_2_053119869 ((int32_t) 16819) /* FIX(2.053119869) */
578 #define FIX_2_562915447 ((int32_t) 20995) /* FIX(2.562915447) */
579 #define FIX_3_072711026 ((int32_t) 25172) /* FIX(3.072711026) */
581 static inline void jpeg_fdct_islow( dctcoef *data )
583 int_fast32_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
584 int_fast32_t tmp10, tmp11, tmp12, tmp13;
585 int_fast32_t z1, z2, z3, z4, z5;
588 /* Pass 1: process rows. */
589 /* Note results are scaled up by sqrt(8) compared to a true DCT; */
590 /* furthermore, we scale the results by 2**PASS1_BITS. */
593 for( int i = 0; i < 8; i++ )
595 tmp0 = dataptr[0] + dataptr[7];
596 tmp7 = dataptr[0] - dataptr[7];
597 tmp1 = dataptr[1] + dataptr[6];
598 tmp6 = dataptr[1] - dataptr[6];
599 tmp2 = dataptr[2] + dataptr[5];
600 tmp5 = dataptr[2] - dataptr[5];
601 tmp3 = dataptr[3] + dataptr[4];
602 tmp4 = dataptr[3] - dataptr[4];
604 /* Even part per LL&M figure 1 --- note that published figure is faulty;
605 * rotator "sqrt(2)*c1" should be "sqrt(2)*c6".
613 dataptr[0] = (dctcoef) ((tmp10 + tmp11) << PASS1_BITS);
614 dataptr[4] = (dctcoef) ((tmp10 - tmp11) << PASS1_BITS);
616 z1 = (tmp12 + tmp13) * FIX_0_541196100;
617 dataptr[2] = (dctcoef) DESCALE(z1 + tmp13 * FIX_0_765366865, CONST_BITS - PASS1_BITS);
618 dataptr[6] = (dctcoef) DESCALE(z1 + tmp12 * (-FIX_1_847759065), CONST_BITS - PASS1_BITS);
620 /* Odd part per figure 8 --- note paper omits factor of sqrt(2).
621 * cK represents cos(K*pi/16).
622 * i0..i3 in the paper are tmp4..tmp7 here.
629 z5 = (z3 + z4) * FIX_1_175875602; /* sqrt(2) * c3 */
631 tmp4 *= FIX_0_298631336; /* sqrt(2) * (-c1+c3+c5-c7 ) */
632 tmp5 *= FIX_2_053119869; /* sqrt(2) * ( c1+c3-c5+c7 ) */
633 tmp6 *= FIX_3_072711026; /* sqrt(2) * ( c1+c3+c5-c7 ) */
634 tmp7 *= FIX_1_501321110; /* sqrt(2) * ( c1+c3-c5-c7 ) */
635 z1 *= -FIX_0_899976223; /* sqrt(2) * ( c7-c3 ) */
636 z2 *= -FIX_2_562915447; /* sqrt(2) * (-c1-c3 ) */
637 z3 *= -FIX_1_961570560; /* sqrt(2) * (-c3-c5 ) */
638 z4 *= -FIX_0_390180644; /* sqrt(2) * ( c5-c3 ) */
643 dataptr[7] = (dctcoef) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS);
644 dataptr[5] = (dctcoef) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS);
645 dataptr[3] = (dctcoef) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS);
646 dataptr[1] = (dctcoef) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS);
648 dataptr += 8; /* advance pointer to next row */
651 /* Pass 2: process columns.
652 * We remove the PASS1_BITS scaling, but leave the results scaled up
653 * by an overall factor of 8.
657 for( int i = 0; i < 8; i++ )
659 tmp0 = dataptr[0] + dataptr[56];
660 tmp7 = dataptr[0] - dataptr[56];
661 tmp1 = dataptr[8] + dataptr[48];
662 tmp6 = dataptr[8] - dataptr[48];
663 tmp2 = dataptr[16] + dataptr[40];
664 tmp5 = dataptr[16] - dataptr[40];
665 tmp3 = dataptr[24] + dataptr[32];
666 tmp4 = dataptr[24] - dataptr[32];
668 /* Even part per LL&M figure 1 --- note that published figure is faulty;
669 * rotator "sqrt(2)*c1" should be "sqrt(2)*c6".
677 dataptr[0] = DESCALE(tmp10 + tmp11, PASS1_BITS);
678 dataptr[32] = DESCALE(tmp10 - tmp11, PASS1_BITS);
680 z1 = (tmp12 + tmp13) * FIX_0_541196100;
681 dataptr[16] = DESCALE(z1 + tmp13 * FIX_0_765366865, CONST_BITS + PASS1_BITS);
682 dataptr[48] = DESCALE(z1 + tmp12 * (-FIX_1_847759065), CONST_BITS + PASS1_BITS);
684 /* Odd part per figure 8 --- note paper omits factor of sqrt(2).
685 * cK represents cos(K*pi/16).
686 * i0..i3 in the paper are tmp4..tmp7 here.
693 z5 = (z3 + z4) * FIX_1_175875602; /* sqrt(2) * c3 */
695 tmp4 *= FIX_0_298631336; /* sqrt(2) * (-c1+c3+c5-c7 ) */
696 tmp5 *= FIX_2_053119869; /* sqrt(2) * ( c1+c3-c5+c7 ) */
697 tmp6 *= FIX_3_072711026; /* sqrt(2) * ( c1+c3+c5-c7 ) */
698 tmp7 *= FIX_1_501321110; /* sqrt(2) * ( c1+c3-c5-c7 ) */
699 z1 *= -FIX_0_899976223; /* sqrt(2) * ( c7-c3 ) */
700 z2 *= -FIX_2_562915447; /* sqrt(2) * (-c1-c3 ) */
701 z3 *= -FIX_1_961570560; /* sqrt(2) * (-c3-c5 ) */
702 z4 *= -FIX_0_390180644; /* sqrt(2) * ( c5-c3 ) */
707 dataptr[56] = (dctcoef) DESCALE(tmp4 + z1 + z3, CONST_BITS + PASS1_BITS);
708 dataptr[40] = (dctcoef) DESCALE(tmp5 + z2 + z4, CONST_BITS + PASS1_BITS);
709 dataptr[24] = (dctcoef) DESCALE(tmp6 + z2 + z3, CONST_BITS + PASS1_BITS);
710 dataptr[8] = (dctcoef) DESCALE(tmp7 + z1 + z4, CONST_BITS + PASS1_BITS);
712 dataptr++; /* advance pointer to next column */
715 /* transpose to match zigzag and cqm */
716 for( int i = 0; i < 8; i++ )
717 for( int j = 0; j < i; j++ )
718 XCHG( dctcoef, data[8*i+j], data[8*j+i] );
721 #define W1 2841 /* 2048*sqrt(2)*cos(1*pi/16) */
722 #define W2 2676 /* 2048*sqrt(2)*cos(2*pi/16) */
723 #define W3 2408 /* 2048*sqrt(2)*cos(3*pi/16) */
724 #define W5 1609 /* 2048*sqrt(2)*cos(5*pi/16) */
725 #define W6 1108 /* 2048*sqrt(2)*cos(6*pi/16) */
726 #define W7 565 /* 2048*sqrt(2)*cos(7*pi/16) */
728 static short iclip[1024]; /* clipping table */
731 static void x264_idct_init_mpeg2( void )
734 for( int i = -512; i < 512; i++ )
735 iclp[i] = (i < -256) ? -256 : ((i > 255) ? 255 : i);
738 static void idctrow( dctcoef *blk )
740 int_fast32_t X0, X1, X2, X3, X4, X5, X6, X7, X8;
742 if( !( (X1 = blk[4] << 11) | (X2 = blk[6]) | (X3 = blk[2]) |
743 (X4 = blk[1]) | (X5 = blk[7]) | (X6 = blk[5]) | (X7 = blk[3]) ) )
745 blk[0] = blk[1] = blk[2] = blk[3] = blk[4] = blk[5] = blk[6] =
746 blk[7] = blk[0] << 3;
750 X0 = (blk[0] << 11) + 128; /* for proper rounding in the fourth stage */
754 X4 = X8 + (W1 - W7) * X4;
755 X5 = X8 - (W1 + W7) * X5;
757 X6 = X8 - (W3 - W5) * X6;
758 X7 = X8 - (W3 + W5) * X7;
764 X2 = X1 - (W2 + W6) * X2;
765 X3 = X1 + (W2 - W6) * X3;
776 X2 = (181 * (X4 + X5) + 128) >> 8;
777 X4 = (181 * (X4 - X5) + 128) >> 8;
780 blk[0] = (X7 + X1) >> 8;
781 blk[1] = (X3 + X2) >> 8;
782 blk[2] = (X0 + X4) >> 8;
783 blk[3] = (X8 + X6) >> 8;
784 blk[4] = (X8 - X6) >> 8;
785 blk[5] = (X0 - X4) >> 8;
786 blk[6] = (X3 - X2) >> 8;
787 blk[7] = (X7 - X1) >> 8;
790 static void idctcol( dctcoef *blk )
792 int_fast32_t X0, X1, X2, X3, X4, X5, X6, X7, X8;
795 if( !( (X1 = (blk[8 * 4] << 8)) | (X2 = blk[8 * 6]) |
796 (X3 = blk[8 * 2]) | (X4 = blk[8 * 1]) |
797 (X5 = blk[8 * 7]) | (X6 = blk[8 * 5]) | (X7 = blk[8 * 3]) ) )
799 blk[8 * 0] = blk[8 * 1] = blk[8 * 2] =
800 blk[8 * 3] = blk[8 * 4] = blk[8 * 5] =
801 blk[8 * 6] = blk[8 * 7] = iclp[(blk[8 * 0] + 32) >> 6];
805 X0 = (blk[8 * 0] << 8) + 8192;
808 X8 = W7 * (X4 + X5) + 4;
809 X4 = (X8 + (W1 - W7) * X4) >> 3;
810 X5 = (X8 - (W1 + W7) * X5) >> 3;
811 X8 = W3 * (X6 + X7) + 4;
812 X6 = (X8 - (W3 - W5) * X6) >> 3;
813 X7 = (X8 - (W3 + W5) * X7) >> 3;
818 X1 = W6 * (X3 + X2) + 4;
819 X2 = (X1 - (W2 + W6) * X2) >> 3;
820 X3 = (X1 + (W2 - W6) * X3) >> 3;
831 X2 = (181 * (X4 + X5) + 128) >> 8;
832 X4 = (181 * (X4 - X5) + 128) >> 8;
835 blk[8 * 0] = iclp[(X7 + X1) >> 14];
836 blk[8 * 1] = iclp[(X3 + X2) >> 14];
837 blk[8 * 2] = iclp[(X0 + X4) >> 14];
838 blk[8 * 3] = iclp[(X8 + X6) >> 14];
839 blk[8 * 4] = iclp[(X8 - X6) >> 14];
840 blk[8 * 5] = iclp[(X0 - X4) >> 14];
841 blk[8 * 6] = iclp[(X3 - X2) >> 14];
842 blk[8 * 7] = iclp[(X7 - X1) >> 14];
845 static void add8x8_idct_mpeg2( pixel *p_dst, dctcoef dct[64] )
849 /* transpose to match zigzag and cqm */
850 for( int i = 0; i < 8; i++ )
851 for( int j = 0; j < 8; j++ )
852 tmp[i * 8 + j] = dct[j * 8 + i];
854 for( int i = 0; i < 8; i++ )
855 idctrow( tmp + i * 8 );
857 for( int i = 0; i < 8; i++ )
860 for( int i = 0; i < 8; i++ )
861 for( int j = 0; j < 8; j++ )
862 p_dst[i*FDEC_STRIDE + j] =
863 x264_clip_pixel( p_dst[i*FDEC_STRIDE + j] + tmp[i*8 + j] );
866 static void add16x16_idct_mpeg2( pixel *p_dst, dctcoef dct[4][64] )
868 add8x8_idct_mpeg2( &p_dst[0], dct[0] );
869 add8x8_idct_mpeg2( &p_dst[8], dct[1] );
870 add8x8_idct_mpeg2( &p_dst[8*FDEC_STRIDE+0], dct[2] );
871 add8x8_idct_mpeg2( &p_dst[8*FDEC_STRIDE+8], dct[3] );
874 static void sub8x8_dct_mpeg2( dctcoef dct[64], pixel *pix1, pixel *pix2 )
876 pixel_sub_wxh( dct, 8, pix1, FENC_STRIDE, pix2, FDEC_STRIDE );
877 jpeg_fdct_islow( dct );
880 static void sub16x16_dct_mpeg2( dctcoef dct[4][64], pixel *pix1, pixel *pix2 )
882 sub8x8_dct_mpeg2( dct[0], &pix1[0], &pix2[0] );
883 sub8x8_dct_mpeg2( dct[1], &pix1[8], &pix2[8] );
884 sub8x8_dct_mpeg2( dct[2], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] );
885 sub8x8_dct_mpeg2( dct[3], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
888 /****************************************************************************
890 ****************************************************************************/
891 void x264_dct_init( int cpu, x264_dct_function_t *dctf, int b_mpeg2 )
893 dctf->sub4x4_dct = sub4x4_dct;
894 dctf->add4x4_idct = add4x4_idct;
896 dctf->sub8x8_dct = sub8x8_dct;
897 dctf->sub8x8_dct_dc = sub8x8_dct_dc;
898 dctf->add8x8_idct = add8x8_idct;
899 dctf->add8x8_idct_dc = add8x8_idct_dc;
901 dctf->sub8x16_dct_dc = sub8x16_dct_dc;
903 dctf->sub16x16_dct = sub16x16_dct;
904 dctf->add16x16_idct = add16x16_idct;
905 dctf->add16x16_idct_dc = add16x16_idct_dc;
907 dctf->sub8x8_dct8 = sub8x8_dct8;
908 dctf->add8x8_idct8 = add8x8_idct8;
910 dctf->sub16x16_dct8 = sub16x16_dct8;
911 dctf->add16x16_idct8 = add16x16_idct8;
913 dctf->dct4x4dc = dct4x4dc;
914 dctf->idct4x4dc = idct4x4dc;
916 dctf->dct2x4dc = dct2x4dc;
920 if( cpu&X264_CPU_MMX )
922 dctf->sub4x4_dct = x264_sub4x4_dct_mmx;
923 dctf->sub8x8_dct = x264_sub8x8_dct_mmx;
924 dctf->sub16x16_dct = x264_sub16x16_dct_mmx;
926 if( cpu&X264_CPU_SSE2 )
928 dctf->add4x4_idct = x264_add4x4_idct_sse2;
929 dctf->dct4x4dc = x264_dct4x4dc_sse2;
930 dctf->idct4x4dc = x264_idct4x4dc_sse2;
931 dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse2;
932 dctf->sub16x16_dct8 = x264_sub16x16_dct8_sse2;
933 dctf->add8x8_idct = x264_add8x8_idct_sse2;
934 dctf->add16x16_idct = x264_add16x16_idct_sse2;
935 dctf->add8x8_idct8 = x264_add8x8_idct8_sse2;
936 dctf->add16x16_idct8 = x264_add16x16_idct8_sse2;
937 dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_sse2;
938 dctf->add8x8_idct_dc = x264_add8x8_idct_dc_sse2;
939 dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_sse2;
940 dctf->add16x16_idct_dc= x264_add16x16_idct_dc_sse2;
942 if( cpu&X264_CPU_SSE4 )
944 dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse4;
945 dctf->sub16x16_dct8 = x264_sub16x16_dct8_sse4;
947 if( cpu&X264_CPU_AVX )
949 dctf->add4x4_idct = x264_add4x4_idct_avx;
950 dctf->dct4x4dc = x264_dct4x4dc_avx;
951 dctf->idct4x4dc = x264_idct4x4dc_avx;
952 dctf->sub8x8_dct8 = x264_sub8x8_dct8_avx;
953 dctf->sub16x16_dct8 = x264_sub16x16_dct8_avx;
954 dctf->add8x8_idct = x264_add8x8_idct_avx;
955 dctf->add16x16_idct = x264_add16x16_idct_avx;
956 dctf->add8x8_idct8 = x264_add8x8_idct8_avx;
957 dctf->add16x16_idct8 = x264_add16x16_idct8_avx;
958 dctf->add8x8_idct_dc = x264_add8x8_idct_dc_avx;
959 dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_avx;
960 dctf->add16x16_idct_dc= x264_add16x16_idct_dc_avx;
963 #else // !HIGH_BIT_DEPTH
965 if( cpu&X264_CPU_MMX )
967 dctf->sub4x4_dct = x264_sub4x4_dct_mmx;
968 dctf->add4x4_idct = x264_add4x4_idct_mmx;
969 dctf->dct4x4dc = x264_dct4x4dc_mmx;
970 dctf->idct4x4dc = x264_idct4x4dc_mmx;
971 dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_mmx2;
974 dctf->sub8x8_dct = x264_sub8x8_dct_mmx;
975 dctf->sub16x16_dct = x264_sub16x16_dct_mmx;
976 dctf->add8x8_idct = x264_add8x8_idct_mmx;
977 dctf->add16x16_idct = x264_add16x16_idct_mmx;
979 dctf->sub8x8_dct8 = x264_sub8x8_dct8_mmx;
980 dctf->sub16x16_dct8 = x264_sub16x16_dct8_mmx;
981 dctf->add8x8_idct8 = x264_add8x8_idct8_mmx;
982 dctf->add16x16_idct8= x264_add16x16_idct8_mmx;
986 if( cpu&X264_CPU_MMX2 )
988 dctf->add8x8_idct_dc = x264_add8x8_idct_dc_mmx2;
989 dctf->add16x16_idct_dc = x264_add16x16_idct_dc_mmx2;
992 if( cpu&X264_CPU_SSE2 )
994 dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse2;
995 dctf->sub16x16_dct8 = x264_sub16x16_dct8_sse2;
996 dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_sse2;
997 dctf->sub8x16_dct_dc= x264_sub8x16_dct_dc_sse2;
998 dctf->add8x8_idct8 = x264_add8x8_idct8_sse2;
999 dctf->add16x16_idct8= x264_add16x16_idct8_sse2;
1001 if( !(cpu&X264_CPU_SSE2_IS_SLOW) )
1003 dctf->sub8x8_dct = x264_sub8x8_dct_sse2;
1004 dctf->sub16x16_dct = x264_sub16x16_dct_sse2;
1005 dctf->add8x8_idct = x264_add8x8_idct_sse2;
1006 dctf->add16x16_idct = x264_add16x16_idct_sse2;
1007 dctf->add16x16_idct_dc = x264_add16x16_idct_dc_sse2;
1011 if( (cpu&X264_CPU_SSSE3) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
1013 dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_ssse3;
1014 if( !(cpu&X264_CPU_SLOW_ATOM) )
1016 dctf->sub4x4_dct = x264_sub4x4_dct_ssse3;
1017 dctf->sub8x8_dct = x264_sub8x8_dct_ssse3;
1018 dctf->sub16x16_dct = x264_sub16x16_dct_ssse3;
1019 dctf->sub8x8_dct8 = x264_sub8x8_dct8_ssse3;
1020 dctf->sub16x16_dct8 = x264_sub16x16_dct8_ssse3;
1021 if( !(cpu&X264_CPU_SLOW_PSHUFB) )
1023 dctf->add8x8_idct_dc = x264_add8x8_idct_dc_ssse3;
1024 dctf->add16x16_idct_dc = x264_add16x16_idct_dc_ssse3;
1029 if( cpu&X264_CPU_SSE4 )
1030 dctf->add4x4_idct = x264_add4x4_idct_sse4;
1032 if( cpu&X264_CPU_AVX )
1034 dctf->add4x4_idct = x264_add4x4_idct_avx;
1035 dctf->add8x8_idct = x264_add8x8_idct_avx;
1036 dctf->add16x16_idct = x264_add16x16_idct_avx;
1037 dctf->add8x8_idct8 = x264_add8x8_idct8_avx;
1038 dctf->add16x16_idct8 = x264_add16x16_idct8_avx;
1039 dctf->add16x16_idct_dc = x264_add16x16_idct_dc_avx;
1040 dctf->sub8x8_dct = x264_sub8x8_dct_avx;
1041 dctf->sub16x16_dct = x264_sub16x16_dct_avx;
1042 dctf->sub8x8_dct8 = x264_sub8x8_dct8_avx;
1043 dctf->sub16x16_dct8 = x264_sub16x16_dct8_avx;
1046 if( cpu&X264_CPU_XOP )
1048 dctf->sub8x8_dct = x264_sub8x8_dct_xop;
1049 dctf->sub16x16_dct = x264_sub16x16_dct_xop;
1052 if( cpu&X264_CPU_AVX2 )
1054 dctf->add8x8_idct = x264_add8x8_idct_avx2;
1055 dctf->add16x16_idct = x264_add16x16_idct_avx2;
1056 dctf->sub8x8_dct = x264_sub8x8_dct_avx2;
1057 dctf->sub16x16_dct = x264_sub16x16_dct_avx2;
1058 dctf->add16x16_idct_dc = x264_add16x16_idct_dc_avx2;
1060 dctf->sub16x16_dct8 = x264_sub16x16_dct8_avx2;
1066 if( cpu&X264_CPU_ALTIVEC )
1068 dctf->sub4x4_dct = x264_sub4x4_dct_altivec;
1069 dctf->sub8x8_dct = x264_sub8x8_dct_altivec;
1070 dctf->sub16x16_dct = x264_sub16x16_dct_altivec;
1072 dctf->add4x4_idct = x264_add4x4_idct_altivec;
1073 dctf->add8x8_idct = x264_add8x8_idct_altivec;
1074 dctf->add16x16_idct = x264_add16x16_idct_altivec;
1076 dctf->sub8x8_dct8 = x264_sub8x8_dct8_altivec;
1077 dctf->sub16x16_dct8 = x264_sub16x16_dct8_altivec;
1079 dctf->add8x8_idct8 = x264_add8x8_idct8_altivec;
1080 dctf->add16x16_idct8= x264_add16x16_idct8_altivec;
1085 if( cpu&X264_CPU_NEON )
1087 dctf->sub4x4_dct = x264_sub4x4_dct_neon;
1088 dctf->sub8x8_dct = x264_sub8x8_dct_neon;
1089 dctf->sub16x16_dct = x264_sub16x16_dct_neon;
1090 dctf->add8x8_idct_dc = x264_add8x8_idct_dc_neon;
1091 dctf->add16x16_idct_dc = x264_add16x16_idct_dc_neon;
1092 dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_neon;
1093 dctf->dct4x4dc = x264_dct4x4dc_neon;
1094 dctf->idct4x4dc = x264_idct4x4dc_neon;
1096 dctf->add4x4_idct = x264_add4x4_idct_neon;
1097 dctf->add8x8_idct = x264_add8x8_idct_neon;
1098 dctf->add16x16_idct = x264_add16x16_idct_neon;
1100 dctf->sub8x8_dct8 = x264_sub8x8_dct8_neon;
1101 dctf->sub16x16_dct8 = x264_sub16x16_dct8_neon;
1103 dctf->add8x8_idct8 = x264_add8x8_idct8_neon;
1104 dctf->add16x16_idct8= x264_add16x16_idct8_neon;
1110 dctf->sub8x8_dct8 = sub8x8_dct_mpeg2;
1111 dctf->add8x8_idct8 = add8x8_idct_mpeg2;
1113 dctf->sub16x16_dct8 = sub16x16_dct_mpeg2;
1114 dctf->add16x16_idct8 = add16x16_idct_mpeg2;
1116 x264_idct_init_mpeg2();
1119 #endif // HIGH_BIT_DEPTH
1123 #define ZIG(i,y,x) level[i] = dct[x*8+y];
1124 #define ZIGZAG8_FRAME\
1125 ZIG( 0,0,0) ZIG( 1,0,1) ZIG( 2,1,0) ZIG( 3,2,0)\
1126 ZIG( 4,1,1) ZIG( 5,0,2) ZIG( 6,0,3) ZIG( 7,1,2)\
1127 ZIG( 8,2,1) ZIG( 9,3,0) ZIG(10,4,0) ZIG(11,3,1)\
1128 ZIG(12,2,2) ZIG(13,1,3) ZIG(14,0,4) ZIG(15,0,5)\
1129 ZIG(16,1,4) ZIG(17,2,3) ZIG(18,3,2) ZIG(19,4,1)\
1130 ZIG(20,5,0) ZIG(21,6,0) ZIG(22,5,1) ZIG(23,4,2)\
1131 ZIG(24,3,3) ZIG(25,2,4) ZIG(26,1,5) ZIG(27,0,6)\
1132 ZIG(28,0,7) ZIG(29,1,6) ZIG(30,2,5) ZIG(31,3,4)\
1133 ZIG(32,4,3) ZIG(33,5,2) ZIG(34,6,1) ZIG(35,7,0)\
1134 ZIG(36,7,1) ZIG(37,6,2) ZIG(38,5,3) ZIG(39,4,4)\
1135 ZIG(40,3,5) ZIG(41,2,6) ZIG(42,1,7) ZIG(43,2,7)\
1136 ZIG(44,3,6) ZIG(45,4,5) ZIG(46,5,4) ZIG(47,6,3)\
1137 ZIG(48,7,2) ZIG(49,7,3) ZIG(50,6,4) ZIG(51,5,5)\
1138 ZIG(52,4,6) ZIG(53,3,7) ZIG(54,4,7) ZIG(55,5,6)\
1139 ZIG(56,6,5) ZIG(57,7,4) ZIG(58,7,5) ZIG(59,6,6)\
1140 ZIG(60,5,7) ZIG(61,6,7) ZIG(62,7,6) ZIG(63,7,7)\
1142 #define ZIGZAG8_FIELD\
1143 ZIG( 0,0,0) ZIG( 1,1,0) ZIG( 2,2,0) ZIG( 3,0,1)\
1144 ZIG( 4,1,1) ZIG( 5,3,0) ZIG( 6,4,0) ZIG( 7,2,1)\
1145 ZIG( 8,0,2) ZIG( 9,3,1) ZIG(10,5,0) ZIG(11,6,0)\
1146 ZIG(12,7,0) ZIG(13,4,1) ZIG(14,1,2) ZIG(15,0,3)\
1147 ZIG(16,2,2) ZIG(17,5,1) ZIG(18,6,1) ZIG(19,7,1)\
1148 ZIG(20,3,2) ZIG(21,1,3) ZIG(22,0,4) ZIG(23,2,3)\
1149 ZIG(24,4,2) ZIG(25,5,2) ZIG(26,6,2) ZIG(27,7,2)\
1150 ZIG(28,3,3) ZIG(29,1,4) ZIG(30,0,5) ZIG(31,2,4)\
1151 ZIG(32,4,3) ZIG(33,5,3) ZIG(34,6,3) ZIG(35,7,3)\
1152 ZIG(36,3,4) ZIG(37,1,5) ZIG(38,0,6) ZIG(39,2,5)\
1153 ZIG(40,4,4) ZIG(41,5,4) ZIG(42,6,4) ZIG(43,7,4)\
1154 ZIG(44,3,5) ZIG(45,1,6) ZIG(46,2,6) ZIG(47,4,5)\
1155 ZIG(48,5,5) ZIG(49,6,5) ZIG(50,7,5) ZIG(51,3,6)\
1156 ZIG(52,0,7) ZIG(53,1,7) ZIG(54,4,6) ZIG(55,5,6)\
1157 ZIG(56,6,6) ZIG(57,7,6) ZIG(58,2,7) ZIG(59,3,7)\
1158 ZIG(60,4,7) ZIG(61,5,7) ZIG(62,6,7) ZIG(63,7,7)
1160 #define ZIGZAG8_FIELD_MPEG2\
1161 ZIG( 0,0,0) ZIG( 1,1,0) ZIG( 2,2,0) ZIG( 3,3,0)\
1162 ZIG( 4,0,1) ZIG( 5,1,1) ZIG( 6,0,2) ZIG( 7,1,2)\
1163 ZIG( 8,2,1) ZIG( 9,3,1) ZIG(10,4,0) ZIG(11,5,0)\
1164 ZIG(12,6,0) ZIG(13,7,0) ZIG(14,7,1) ZIG(15,6,1)\
1165 ZIG(16,5,1) ZIG(17,4,1) ZIG(18,3,2) ZIG(19,2,2)\
1166 ZIG(20,0,3) ZIG(21,1,3) ZIG(22,0,4) ZIG(23,1,4)\
1167 ZIG(24,2,3) ZIG(25,3,3) ZIG(26,4,2) ZIG(27,5,2)\
1168 ZIG(28,6,2) ZIG(29,7,2) ZIG(30,4,3) ZIG(31,5,3)\
1169 ZIG(32,6,3) ZIG(33,7,3) ZIG(34,2,4) ZIG(35,3,4)\
1170 ZIG(36,0,5) ZIG(37,1,5) ZIG(38,0,6) ZIG(39,1,6)\
1171 ZIG(40,2,5) ZIG(41,3,5) ZIG(42,4,4) ZIG(43,5,4)\
1172 ZIG(44,6,4) ZIG(45,7,4) ZIG(46,4,5) ZIG(47,5,5)\
1173 ZIG(48,6,5) ZIG(49,7,5) ZIG(50,2,6) ZIG(51,3,6)\
1174 ZIG(52,0,7) ZIG(53,1,7) ZIG(54,2,7) ZIG(55,3,7)\
1175 ZIG(56,4,6) ZIG(57,5,6) ZIG(58,6,6) ZIG(59,7,6)\
1176 ZIG(60,4,7) ZIG(61,5,7) ZIG(62,6,7) ZIG(63,7,7)
1178 #define ZIGZAG4_FRAME\
1179 ZIGDC( 0,0,0) ZIG( 1,0,1) ZIG( 2,1,0) ZIG( 3,2,0)\
1180 ZIG( 4,1,1) ZIG( 5,0,2) ZIG( 6,0,3) ZIG( 7,1,2)\
1181 ZIG( 8,2,1) ZIG( 9,3,0) ZIG(10,3,1) ZIG(11,2,2)\
1182 ZIG(12,1,3) ZIG(13,2,3) ZIG(14,3,2) ZIG(15,3,3)
1184 #define ZIGZAG4_FIELD\
1185 ZIGDC( 0,0,0) ZIG( 1,1,0) ZIG( 2,0,1) ZIG( 3,2,0)\
1186 ZIG( 4,3,0) ZIG( 5,1,1) ZIG( 6,2,1) ZIG( 7,3,1)\
1187 ZIG( 8,0,2) ZIG( 9,1,2) ZIG(10,2,2) ZIG(11,3,2)\
1188 ZIG(12,0,3) ZIG(13,1,3) ZIG(14,2,3) ZIG(15,3,3)
1190 static void zigzag_scan_8x8_frame( dctcoef level[64], dctcoef dct[64] )
1195 static void zigzag_scan_8x8_field( dctcoef level[64], dctcoef dct[64] )
1200 static void zigzag_scan_8x8_field_mpeg2( dctcoef level[64], dctcoef dct[64] )
1205 void zigzag_scan_8x8_cqm( uint8_t level[64], const uint8_t dct[64] )
1211 #define ZIG(i,y,x) level[i] = dct[x*4+y];
1212 #define ZIGDC(i,y,x) ZIG(i,y,x)
1214 static void zigzag_scan_4x4_frame( dctcoef level[16], dctcoef dct[16] )
1219 static void zigzag_scan_4x4_field( dctcoef level[16], dctcoef dct[16] )
1221 memcpy( level, dct, 2 * sizeof(dctcoef) );
1222 ZIG(2,0,1) ZIG(3,2,0) ZIG(4,3,0) ZIG(5,1,1)
1223 memcpy( level+6, dct+6, 10 * sizeof(dctcoef) );
1227 #define ZIG(i,y,x) {\
1228 int oe = x+y*FENC_STRIDE;\
1229 int od = x+y*FDEC_STRIDE;\
1230 level[i] = p_src[oe] - p_dst[od];\
1234 CPPIXEL_X4( p_dst+0*FDEC_STRIDE, p_src+0*FENC_STRIDE );\
1235 CPPIXEL_X4( p_dst+1*FDEC_STRIDE, p_src+1*FENC_STRIDE );\
1236 CPPIXEL_X4( p_dst+2*FDEC_STRIDE, p_src+2*FENC_STRIDE );\
1237 CPPIXEL_X4( p_dst+3*FDEC_STRIDE, p_src+3*FENC_STRIDE );
1238 #define CPPIXEL_X8(dst,src) ( CPPIXEL_X4(dst,src), CPPIXEL_X4(dst+4,src+4) )
1240 CPPIXEL_X8( p_dst+0*FDEC_STRIDE, p_src+0*FENC_STRIDE );\
1241 CPPIXEL_X8( p_dst+1*FDEC_STRIDE, p_src+1*FENC_STRIDE );\
1242 CPPIXEL_X8( p_dst+2*FDEC_STRIDE, p_src+2*FENC_STRIDE );\
1243 CPPIXEL_X8( p_dst+3*FDEC_STRIDE, p_src+3*FENC_STRIDE );\
1244 CPPIXEL_X8( p_dst+4*FDEC_STRIDE, p_src+4*FENC_STRIDE );\
1245 CPPIXEL_X8( p_dst+5*FDEC_STRIDE, p_src+5*FENC_STRIDE );\
1246 CPPIXEL_X8( p_dst+6*FDEC_STRIDE, p_src+6*FENC_STRIDE );\
1247 CPPIXEL_X8( p_dst+7*FDEC_STRIDE, p_src+7*FENC_STRIDE );
1249 static int zigzag_sub_4x4_frame( dctcoef level[16], const pixel *p_src, pixel *p_dst )
1257 static int zigzag_sub_4x4_field( dctcoef level[16], const pixel *p_src, pixel *p_dst )
1266 #define ZIGDC(i,y,x) {\
1267 int oe = x+y*FENC_STRIDE;\
1268 int od = x+y*FDEC_STRIDE;\
1269 *dc = p_src[oe] - p_dst[od];\
1273 static int zigzag_sub_4x4ac_frame( dctcoef level[16], const pixel *p_src, pixel *p_dst, dctcoef *dc )
1281 static int zigzag_sub_4x4ac_field( dctcoef level[16], const pixel *p_src, pixel *p_dst, dctcoef *dc )
1289 static int zigzag_sub_8x8_frame( dctcoef level[64], const pixel *p_src, pixel *p_dst )
1296 static int zigzag_sub_8x8_field( dctcoef level[64], const pixel *p_src, pixel *p_dst )
1307 static void zigzag_interleave_8x8_cavlc( dctcoef *dst, dctcoef *src, uint8_t *nnz )
1309 for( int i = 0; i < 4; i++ )
1312 for( int j = 0; j < 16; j++ )
1315 dst[i*16+j] = src[i+j*4];
1317 nnz[(i&1) + (i>>1)*8] = !!nz;
1321 void x264_zigzag_init( int cpu, x264_zigzag_function_t *pf_progressive, x264_zigzag_function_t *pf_interlaced, int b_mpeg2 )
1323 pf_interlaced->scan_8x8 = zigzag_scan_8x8_field;
1324 pf_progressive->scan_8x8 = zigzag_scan_8x8_frame;
1325 pf_interlaced->scan_4x4 = zigzag_scan_4x4_field;
1326 pf_progressive->scan_4x4 = zigzag_scan_4x4_frame;
1327 pf_interlaced->sub_8x8 = zigzag_sub_8x8_field;
1328 pf_progressive->sub_8x8 = zigzag_sub_8x8_frame;
1329 pf_interlaced->sub_4x4 = zigzag_sub_4x4_field;
1330 pf_progressive->sub_4x4 = zigzag_sub_4x4_frame;
1331 pf_interlaced->sub_4x4ac = zigzag_sub_4x4ac_field;
1332 pf_progressive->sub_4x4ac = zigzag_sub_4x4ac_frame;
1336 if( cpu&X264_CPU_SSE2 )
1338 pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_sse2;
1339 pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_sse2;
1340 pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_sse2;
1342 if( cpu&X264_CPU_SSE4 )
1343 pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_sse4;
1344 if( cpu&X264_CPU_AVX )
1345 pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_avx;
1347 if( cpu&X264_CPU_AVX )
1349 pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_avx;
1350 pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_avx;
1352 #endif // ARCH_X86_64
1356 if( cpu&X264_CPU_MMX )
1357 pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_mmx;
1358 if( cpu&X264_CPU_MMX2 )
1360 pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_mmx2;
1361 pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_mmx2;
1362 pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_mmx2;
1364 if( cpu&X264_CPU_SSE2_IS_FAST )
1365 pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_sse2;
1366 if( cpu&X264_CPU_SSSE3 )
1368 pf_interlaced->sub_4x4 = x264_zigzag_sub_4x4_field_ssse3;
1369 pf_progressive->sub_4x4 = x264_zigzag_sub_4x4_frame_ssse3;
1370 pf_interlaced->sub_4x4ac = x264_zigzag_sub_4x4ac_field_ssse3;
1371 pf_progressive->sub_4x4ac= x264_zigzag_sub_4x4ac_frame_ssse3;
1372 pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_ssse3;
1373 if( !(cpu&X264_CPU_SLOW_SHUFFLE) )
1374 pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_ssse3;
1376 if( cpu&X264_CPU_AVX )
1378 pf_interlaced->sub_4x4 = x264_zigzag_sub_4x4_field_avx;
1379 pf_progressive->sub_4x4 = x264_zigzag_sub_4x4_frame_avx;
1381 pf_interlaced->sub_4x4ac = x264_zigzag_sub_4x4ac_field_avx;
1382 pf_progressive->sub_4x4ac= x264_zigzag_sub_4x4ac_frame_avx;
1384 pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_avx;
1386 if( cpu&X264_CPU_XOP )
1388 pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_xop;
1389 pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_xop;
1390 pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_xop;
1394 if( cpu&X264_CPU_ALTIVEC )
1396 pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_altivec;
1397 pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_altivec;
1401 if( cpu&X264_CPU_NEON )
1402 pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_neon;
1404 #endif // HIGH_BIT_DEPTH
1406 pf_interlaced->interleave_8x8_cavlc =
1407 pf_progressive->interleave_8x8_cavlc = zigzag_interleave_8x8_cavlc;
1410 if( cpu&X264_CPU_SSE2 )
1412 pf_interlaced->interleave_8x8_cavlc =
1413 pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_sse2;
1415 if( cpu&X264_CPU_AVX )
1417 pf_interlaced->interleave_8x8_cavlc =
1418 pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx;
1421 if( cpu&X264_CPU_MMX )
1423 pf_interlaced->interleave_8x8_cavlc =
1424 pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_mmx;
1426 if( (cpu&X264_CPU_SSE2) && !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SSE2_IS_SLOW)) )
1428 pf_interlaced->interleave_8x8_cavlc =
1429 pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_sse2;
1432 if( cpu&X264_CPU_AVX )
1434 pf_interlaced->interleave_8x8_cavlc =
1435 pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx;
1438 if( cpu&X264_CPU_AVX2 )
1440 pf_interlaced->interleave_8x8_cavlc =
1441 pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx2;
1443 #endif // HIGH_BIT_DEPTH
1446 /* MPEG-2 TODO: Write SIMD */
1448 pf_interlaced->scan_8x8 = zigzag_scan_8x8_field_mpeg2;