2 * Copyright (c) 2002 Brian Foley
3 * Copyright (c) 2002 Dieter Shirley
4 * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "../dsputil.h"
25 #include "gcc_fixes.h"
27 #include "dsputil_altivec.h"
30 #include <sys/sysctl.h>
31 #else /* CONFIG_DARWIN */
33 #include <exec/exec.h>
34 #include <interfaces/exec.h>
35 #include <proto/exec.h>
36 #else /* __AMIGAOS4__ */
40 static sigjmp_buf jmpbuf;
41 static volatile sig_atomic_t canjump = 0;
43 static void sigill_handler (int sig)
46 signal (sig, SIG_DFL);
51 siglongjmp (jmpbuf, 1);
53 #endif /* CONFIG_DARWIN */
54 #endif /* __AMIGAOS4__ */
56 int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
59 int s __attribute__((aligned(16)));
60 const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
61 vector unsigned char *tv;
62 vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
63 vector unsigned int sad;
64 vector signed int sumdiffs;
67 sad = (vector unsigned int)vec_splat_u32(0);
70 Read unaligned pixels into our vectors. The vectors are as follows:
71 pix1v: pix1[0]-pix1[15]
72 pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
74 tv = (vector unsigned char *) pix1;
75 pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
77 tv = (vector unsigned char *) &pix2[0];
78 pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
80 tv = (vector unsigned char *) &pix2[1];
81 pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
83 /* Calculate the average vector */
84 avgv = vec_avg(pix2v, pix2iv);
86 /* Calculate a sum of abs differences vector */
87 t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
89 /* Add each 4 pixel group together and put 4 results into sad */
90 sad = vec_sum4s(t5, sad);
95 /* Sum up the four partial sums, and put the result into s */
96 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
97 sumdiffs = vec_splat(sumdiffs, 3);
98 vec_ste(sumdiffs, 0, &s);
103 int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
106 int s __attribute__((aligned(16)));
107 const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
108 vector unsigned char *tv;
109 vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
110 vector unsigned int sad;
111 vector signed int sumdiffs;
112 uint8_t *pix3 = pix2 + line_size;
115 sad = (vector unsigned int)vec_splat_u32(0);
118 Due to the fact that pix3 = pix2 + line_size, the pix3 of one
119 iteration becomes pix2 in the next iteration. We can use this
120 fact to avoid a potentially expensive unaligned read, each
121 time around the loop.
122 Read unaligned pixels into our vectors. The vectors are as follows:
123 pix2v: pix2[0]-pix2[15]
124 Split the pixel vectors into shorts
126 tv = (vector unsigned char *) &pix2[0];
127 pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
131 Read unaligned pixels into our vectors. The vectors are as follows:
132 pix1v: pix1[0]-pix1[15]
133 pix3v: pix3[0]-pix3[15]
135 tv = (vector unsigned char *) pix1;
136 pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
138 tv = (vector unsigned char *) &pix3[0];
139 pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
141 /* Calculate the average vector */
142 avgv = vec_avg(pix2v, pix3v);
144 /* Calculate a sum of abs differences vector */
145 t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
147 /* Add each 4 pixel group together and put 4 results into sad */
148 sad = vec_sum4s(t5, sad);
156 /* Sum up the four partial sums, and put the result into s */
157 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
158 sumdiffs = vec_splat(sumdiffs, 3);
159 vec_ste(sumdiffs, 0, &s);
163 int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
166 int s __attribute__((aligned(16)));
167 uint8_t *pix3 = pix2 + line_size;
168 const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
169 const_vector unsigned short two = (const_vector unsigned short)vec_splat_u16(2);
170 vector unsigned char *tv, avgv, t5;
171 vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
172 vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
173 vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
174 vector unsigned short avghv, avglv;
175 vector unsigned short t1, t2, t3, t4;
176 vector unsigned int sad;
177 vector signed int sumdiffs;
179 sad = (vector unsigned int)vec_splat_u32(0);
184 Due to the fact that pix3 = pix2 + line_size, the pix3 of one
185 iteration becomes pix2 in the next iteration. We can use this
186 fact to avoid a potentially expensive unaligned read, as well
187 as some splitting, and vector addition each time around the loop.
188 Read unaligned pixels into our vectors. The vectors are as follows:
189 pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
190 Split the pixel vectors into shorts
192 tv = (vector unsigned char *) &pix2[0];
193 pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
195 tv = (vector unsigned char *) &pix2[1];
196 pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
198 pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
199 pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
200 pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
201 pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
202 t1 = vec_add(pix2hv, pix2ihv);
203 t2 = vec_add(pix2lv, pix2ilv);
207 Read unaligned pixels into our vectors. The vectors are as follows:
208 pix1v: pix1[0]-pix1[15]
209 pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16]
211 tv = (vector unsigned char *) pix1;
212 pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
214 tv = (vector unsigned char *) &pix3[0];
215 pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
217 tv = (vector unsigned char *) &pix3[1];
218 pix3iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[1]));
221 Note that Altivec does have vec_avg, but this works on vector pairs
222 and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
223 would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
224 Instead, we have to split the pixel vectors into vectors of shorts,
225 and do the averaging by hand.
228 /* Split the pixel vectors into shorts */
229 pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
230 pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
231 pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
232 pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
234 /* Do the averaging on them */
235 t3 = vec_add(pix3hv, pix3ihv);
236 t4 = vec_add(pix3lv, pix3ilv);
238 avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
239 avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
241 /* Pack the shorts back into a result */
242 avgv = vec_pack(avghv, avglv);
244 /* Calculate a sum of abs differences vector */
245 t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
247 /* Add each 4 pixel group together and put 4 results into sad */
248 sad = vec_sum4s(t5, sad);
252 /* Transfer the calculated values for pix3 into pix2 */
256 /* Sum up the four partial sums, and put the result into s */
257 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
258 sumdiffs = vec_splat(sumdiffs, 3);
259 vec_ste(sumdiffs, 0, &s);
264 int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
267 int s __attribute__((aligned(16)));
268 const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
269 vector unsigned char perm1, perm2, *pix1v, *pix2v;
270 vector unsigned char t1, t2, t3,t4, t5;
271 vector unsigned int sad;
272 vector signed int sumdiffs;
274 sad = (vector unsigned int)vec_splat_u32(0);
278 /* Read potentially unaligned pixels into t1 and t2 */
279 perm1 = vec_lvsl(0, pix1);
280 pix1v = (vector unsigned char *) pix1;
281 perm2 = vec_lvsl(0, pix2);
282 pix2v = (vector unsigned char *) pix2;
283 t1 = vec_perm(pix1v[0], pix1v[1], perm1);
284 t2 = vec_perm(pix2v[0], pix2v[1], perm2);
286 /* Calculate a sum of abs differences vector */
287 t3 = vec_max(t1, t2);
288 t4 = vec_min(t1, t2);
289 t5 = vec_sub(t3, t4);
291 /* Add each 4 pixel group together and put 4 results into sad */
292 sad = vec_sum4s(t5, sad);
298 /* Sum up the four partial sums, and put the result into s */
299 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
300 sumdiffs = vec_splat(sumdiffs, 3);
301 vec_ste(sumdiffs, 0, &s);
306 int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
309 int s __attribute__((aligned(16)));
310 const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
311 vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
312 vector unsigned char t1, t2, t3,t4, t5;
313 vector unsigned int sad;
314 vector signed int sumdiffs;
316 sad = (vector unsigned int)vec_splat_u32(0);
318 permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
321 /* Read potentially unaligned pixels into t1 and t2
322 Since we're reading 16 pixels, and actually only want 8,
323 mask out the last 8 pixels. The 0s don't change the sum. */
324 perm1 = vec_lvsl(0, pix1);
325 pix1v = (vector unsigned char *) pix1;
326 perm2 = vec_lvsl(0, pix2);
327 pix2v = (vector unsigned char *) pix2;
328 t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
329 t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
331 /* Calculate a sum of abs differences vector */
332 t3 = vec_max(t1, t2);
333 t4 = vec_min(t1, t2);
334 t5 = vec_sub(t3, t4);
336 /* Add each 4 pixel group together and put 4 results into sad */
337 sad = vec_sum4s(t5, sad);
343 /* Sum up the four partial sums, and put the result into s */
344 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
345 sumdiffs = vec_splat(sumdiffs, 3);
346 vec_ste(sumdiffs, 0, &s);
351 int pix_norm1_altivec(uint8_t *pix, int line_size)
354 int s __attribute__((aligned(16)));
355 const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
356 vector unsigned char *tv;
357 vector unsigned char pixv;
358 vector unsigned int sv;
359 vector signed int sum;
361 sv = (vector unsigned int)vec_splat_u32(0);
364 for (i = 0; i < 16; i++) {
365 /* Read in the potentially unaligned pixels */
366 tv = (vector unsigned char *) pix;
367 pixv = vec_perm(tv[0], tv[1], vec_lvsl(0, pix));
369 /* Square the values, and add them to our sum */
370 sv = vec_msum(pixv, pixv, sv);
374 /* Sum up the four partial sums, and put the result into s */
375 sum = vec_sums((vector signed int) sv, (vector signed int) zero);
376 sum = vec_splat(sum, 3);
383 * Sum of Squared Errors for a 8x8 block.
385 * It's the sad8_altivec code above w/ squaring added.
387 int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
390 int s __attribute__((aligned(16)));
391 const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
392 vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
393 vector unsigned char t1, t2, t3,t4, t5;
394 vector unsigned int sum;
395 vector signed int sumsqr;
397 sum = (vector unsigned int)vec_splat_u32(0);
399 permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
403 /* Read potentially unaligned pixels into t1 and t2
404 Since we're reading 16 pixels, and actually only want 8,
405 mask out the last 8 pixels. The 0s don't change the sum. */
406 perm1 = vec_lvsl(0, pix1);
407 pix1v = (vector unsigned char *) pix1;
408 perm2 = vec_lvsl(0, pix2);
409 pix2v = (vector unsigned char *) pix2;
410 t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
411 t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
414 Since we want to use unsigned chars, we can take advantage
415 of the fact that abs(a-b)^2 = (a-b)^2.
418 /* Calculate abs differences vector */
419 t3 = vec_max(t1, t2);
420 t4 = vec_min(t1, t2);
421 t5 = vec_sub(t3, t4);
423 /* Square the values and add them to our sum */
424 sum = vec_msum(t5, t5, sum);
430 /* Sum up the four partial sums, and put the result into s */
431 sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
432 sumsqr = vec_splat(sumsqr, 3);
433 vec_ste(sumsqr, 0, &s);
439 * Sum of Squared Errors for a 16x16 block.
441 * It's the sad16_altivec code above w/ squaring added.
443 int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
446 int s __attribute__((aligned(16)));
447 const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
448 vector unsigned char perm1, perm2, *pix1v, *pix2v;
449 vector unsigned char t1, t2, t3,t4, t5;
450 vector unsigned int sum;
451 vector signed int sumsqr;
453 sum = (vector unsigned int)vec_splat_u32(0);
456 /* Read potentially unaligned pixels into t1 and t2 */
457 perm1 = vec_lvsl(0, pix1);
458 pix1v = (vector unsigned char *) pix1;
459 perm2 = vec_lvsl(0, pix2);
460 pix2v = (vector unsigned char *) pix2;
461 t1 = vec_perm(pix1v[0], pix1v[1], perm1);
462 t2 = vec_perm(pix2v[0], pix2v[1], perm2);
465 Since we want to use unsigned chars, we can take advantage
466 of the fact that abs(a-b)^2 = (a-b)^2.
469 /* Calculate abs differences vector */
470 t3 = vec_max(t1, t2);
471 t4 = vec_min(t1, t2);
472 t5 = vec_sub(t3, t4);
474 /* Square the values and add them to our sum */
475 sum = vec_msum(t5, t5, sum);
481 /* Sum up the four partial sums, and put the result into s */
482 sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
483 sumsqr = vec_splat(sumsqr, 3);
484 vec_ste(sumsqr, 0, &s);
489 int pix_sum_altivec(uint8_t * pix, int line_size)
491 const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
492 vector unsigned char perm, *pixv;
493 vector unsigned char t1;
494 vector unsigned int sad;
495 vector signed int sumdiffs;
498 int s __attribute__((aligned(16)));
500 sad = (vector unsigned int)vec_splat_u32(0);
502 for (i = 0; i < 16; i++) {
503 /* Read the potentially unaligned 16 pixels into t1 */
504 perm = vec_lvsl(0, pix);
505 pixv = (vector unsigned char *) pix;
506 t1 = vec_perm(pixv[0], pixv[1], perm);
508 /* Add each 4 pixel group together and put 4 results into sad */
509 sad = vec_sum4s(t1, sad);
514 /* Sum up the four partial sums, and put the result into s */
515 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
516 sumdiffs = vec_splat(sumdiffs, 3);
517 vec_ste(sumdiffs, 0, &s);
522 void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
525 vector unsigned char perm, bytes, *pixv;
526 const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
527 vector signed short shorts;
531 // Read potentially unaligned pixels.
532 // We're reading 16 pixels, and actually only want 8,
533 // but we simply ignore the extras.
534 perm = vec_lvsl(0, pixels);
535 pixv = (vector unsigned char *) pixels;
536 bytes = vec_perm(pixv[0], pixv[1], perm);
538 // convert the bytes into shorts
539 shorts = (vector signed short)vec_mergeh(zero, bytes);
541 // save the data to the block, we assume the block is 16-byte aligned
542 vec_st(shorts, i*16, (vector signed short*)block);
548 void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
549 const uint8_t *s2, int stride)
552 vector unsigned char perm, bytes, *pixv;
553 const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
554 vector signed short shorts1, shorts2;
558 // Read potentially unaligned pixels
559 // We're reading 16 pixels, and actually only want 8,
560 // but we simply ignore the extras.
561 perm = vec_lvsl(0, s1);
562 pixv = (vector unsigned char *) s1;
563 bytes = vec_perm(pixv[0], pixv[1], perm);
565 // convert the bytes into shorts
566 shorts1 = (vector signed short)vec_mergeh(zero, bytes);
568 // Do the same for the second block of pixels
569 perm = vec_lvsl(0, s2);
570 pixv = (vector unsigned char *) s2;
571 bytes = vec_perm(pixv[0], pixv[1], perm);
573 // convert the bytes into shorts
574 shorts2 = (vector signed short)vec_mergeh(zero, bytes);
576 // Do the subtraction
577 shorts1 = vec_sub(shorts1, shorts2);
579 // save the data to the block, we assume the block is 16-byte aligned
580 vec_st(shorts1, 0, (vector signed short*)block);
587 // The code below is a copy of the code above... This is a manual
590 // Read potentially unaligned pixels
591 // We're reading 16 pixels, and actually only want 8,
592 // but we simply ignore the extras.
593 perm = vec_lvsl(0, s1);
594 pixv = (vector unsigned char *) s1;
595 bytes = vec_perm(pixv[0], pixv[1], perm);
597 // convert the bytes into shorts
598 shorts1 = (vector signed short)vec_mergeh(zero, bytes);
600 // Do the same for the second block of pixels
601 perm = vec_lvsl(0, s2);
602 pixv = (vector unsigned char *) s2;
603 bytes = vec_perm(pixv[0], pixv[1], perm);
605 // convert the bytes into shorts
606 shorts2 = (vector signed short)vec_mergeh(zero, bytes);
608 // Do the subtraction
609 shorts1 = vec_sub(shorts1, shorts2);
611 // save the data to the block, we assume the block is 16-byte aligned
612 vec_st(shorts1, 0, (vector signed short*)block);
620 void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
621 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
623 for(i=0; i+7<w; i++){
624 dst[i+0] += src[i+0];
625 dst[i+1] += src[i+1];
626 dst[i+2] += src[i+2];
627 dst[i+3] += src[i+3];
628 dst[i+4] += src[i+4];
629 dst[i+5] += src[i+5];
630 dst[i+6] += src[i+6];
631 dst[i+7] += src[i+7];
634 dst[i+0] += src[i+0];
635 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
637 register vector unsigned char vdst, vsrc;
639 /* dst and src are 16 bytes-aligned (guaranteed) */
640 for(i = 0 ; (i + 15) < w ; i++)
642 vdst = vec_ld(i << 4, (unsigned char*)dst);
643 vsrc = vec_ld(i << 4, (unsigned char*)src);
644 vdst = vec_add(vsrc, vdst);
645 vec_st(vdst, i << 4, (unsigned char*)dst);
647 /* if w is not a multiple of 16 */
648 for (; (i < w) ; i++)
652 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
655 /* next one assumes that ((line_size % 16) == 0) */
656 void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
658 POWERPC_PERF_DECLARE(altivec_put_pixels16_num, 1);
659 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
662 POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
665 *((uint32_t*)(block)) = LD32(pixels);
666 *((uint32_t*)(block+4)) = LD32(pixels+4);
667 *((uint32_t*)(block+8)) = LD32(pixels+8);
668 *((uint32_t*)(block+12)) = LD32(pixels+12);
673 POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
675 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
676 register vector unsigned char pixelsv1, pixelsv2;
677 register vector unsigned char pixelsv1B, pixelsv2B;
678 register vector unsigned char pixelsv1C, pixelsv2C;
679 register vector unsigned char pixelsv1D, pixelsv2D;
681 register vector unsigned char perm = vec_lvsl(0, pixels);
683 register int line_size_2 = line_size << 1;
684 register int line_size_3 = line_size + line_size_2;
685 register int line_size_4 = line_size << 2;
687 POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
688 // hand-unrolling the loop by 4 gains about 15%
689 // mininum execution time goes from 74 to 60 cycles
690 // it's faster than -funroll-loops, but using
691 // -funroll-loops w/ this is bad - 74 cycles again.
692 // all this is on a 7450, tuning for the 7450
695 pixelsv1 = vec_ld(0, (unsigned char*)pixels);
696 pixelsv2 = vec_ld(16, (unsigned char*)pixels);
697 vec_st(vec_perm(pixelsv1, pixelsv2, perm),
698 0, (unsigned char*)block);
703 for(i=0; i<h; i+=4) {
704 pixelsv1 = vec_ld(0, (unsigned char*)pixels);
705 pixelsv2 = vec_ld(15, (unsigned char*)pixels);
706 pixelsv1B = vec_ld(line_size, (unsigned char*)pixels);
707 pixelsv2B = vec_ld(15 + line_size, (unsigned char*)pixels);
708 pixelsv1C = vec_ld(line_size_2, (unsigned char*)pixels);
709 pixelsv2C = vec_ld(15 + line_size_2, (unsigned char*)pixels);
710 pixelsv1D = vec_ld(line_size_3, (unsigned char*)pixels);
711 pixelsv2D = vec_ld(15 + line_size_3, (unsigned char*)pixels);
712 vec_st(vec_perm(pixelsv1, pixelsv2, perm),
713 0, (unsigned char*)block);
714 vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
715 line_size, (unsigned char*)block);
716 vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
717 line_size_2, (unsigned char*)block);
718 vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
719 line_size_3, (unsigned char*)block);
724 POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
726 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
729 /* next one assumes that ((line_size % 16) == 0) */
730 #define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
731 void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
733 POWERPC_PERF_DECLARE(altivec_avg_pixels16_num, 1);
734 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
737 POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
740 op_avg(*((uint32_t*)(block)),LD32(pixels));
741 op_avg(*((uint32_t*)(block+4)),LD32(pixels+4));
742 op_avg(*((uint32_t*)(block+8)),LD32(pixels+8));
743 op_avg(*((uint32_t*)(block+12)),LD32(pixels+12));
748 POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
750 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
751 register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
752 register vector unsigned char perm = vec_lvsl(0, pixels);
755 POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
758 pixelsv1 = vec_ld(0, (unsigned char*)pixels);
759 pixelsv2 = vec_ld(16, (unsigned char*)pixels);
760 blockv = vec_ld(0, block);
761 pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
762 blockv = vec_avg(blockv,pixelsv);
763 vec_st(blockv, 0, (unsigned char*)block);
768 POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
770 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
773 /* next one assumes that ((line_size % 8) == 0) */
774 void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
776 POWERPC_PERF_DECLARE(altivec_avg_pixels8_num, 1);
777 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
779 POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
780 for (i = 0; i < h; i++) {
781 *((uint32_t *) (block)) =
782 (((*((uint32_t *) (block))) |
783 ((((const struct unaligned_32 *) (pixels))->l))) -
784 ((((*((uint32_t *) (block))) ^
785 ((((const struct unaligned_32 *) (pixels))->
786 l))) & 0xFEFEFEFEUL) >> 1));
787 *((uint32_t *) (block + 4)) =
788 (((*((uint32_t *) (block + 4))) |
789 ((((const struct unaligned_32 *) (pixels + 4))->l))) -
790 ((((*((uint32_t *) (block + 4))) ^
791 ((((const struct unaligned_32 *) (pixels +
793 l))) & 0xFEFEFEFEUL) >> 1));
797 POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
799 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
800 register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
803 POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
805 for (i = 0; i < h; i++) {
807 block is 8 bytes-aligned, so we're either in the
808 left block (16 bytes-aligned) or in the right block (not)
810 int rightside = ((unsigned long)block & 0x0000000F);
812 blockv = vec_ld(0, block);
813 pixelsv1 = vec_ld(0, (unsigned char*)pixels);
814 pixelsv2 = vec_ld(16, (unsigned char*)pixels);
815 pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
819 pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
823 pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
826 blockv = vec_avg(blockv, pixelsv);
828 vec_st(blockv, 0, block);
834 POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
836 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
839 /* next one assumes that ((line_size % 8) == 0) */
840 void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
842 POWERPC_PERF_DECLARE(altivec_put_pixels8_xy2_num, 1);
843 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
845 POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
846 for (j = 0; j < 2; j++) {
848 const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
850 (((const struct unaligned_32 *) (pixels + 1))->l);
852 (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
854 ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
857 for (i = 0; i < h; i += 2) {
858 uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
859 uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
860 l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
861 h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
862 *((uint32_t *) block) =
863 h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
866 a = (((const struct unaligned_32 *) (pixels))->l);
867 b = (((const struct unaligned_32 *) (pixels + 1))->l);
868 l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
869 h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
870 *((uint32_t *) block) =
871 h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
874 } pixels += 4 - line_size * (h + 1);
875 block += 4 - line_size * h;
878 POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
880 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
882 register vector unsigned char
885 register vector unsigned char
886 blockv, temp1, temp2;
887 register vector unsigned short
888 pixelssum1, pixelssum2, temp3;
889 register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
890 register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
892 temp1 = vec_ld(0, pixels);
893 temp2 = vec_ld(16, pixels);
894 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
895 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
901 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
903 pixelsv1 = vec_mergeh(vczero, pixelsv1);
904 pixelsv2 = vec_mergeh(vczero, pixelsv2);
905 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
906 (vector unsigned short)pixelsv2);
907 pixelssum1 = vec_add(pixelssum1, vctwo);
909 POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
910 for (i = 0; i < h ; i++) {
911 int rightside = ((unsigned long)block & 0x0000000F);
912 blockv = vec_ld(0, block);
914 temp1 = vec_ld(line_size, pixels);
915 temp2 = vec_ld(line_size + 16, pixels);
916 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
917 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
923 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
926 pixelsv1 = vec_mergeh(vczero, pixelsv1);
927 pixelsv2 = vec_mergeh(vczero, pixelsv2);
928 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
929 (vector unsigned short)pixelsv2);
930 temp3 = vec_add(pixelssum1, pixelssum2);
931 temp3 = vec_sra(temp3, vctwo);
932 pixelssum1 = vec_add(pixelssum2, vctwo);
933 pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
937 blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
941 blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
944 vec_st(blockv, 0, block);
950 POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
951 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
954 /* next one assumes that ((line_size % 8) == 0) */
955 void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
957 POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels8_xy2_num, 1);
958 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
960 POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
961 for (j = 0; j < 2; j++) {
963 const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
965 (((const struct unaligned_32 *) (pixels + 1))->l);
967 (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
969 ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
972 for (i = 0; i < h; i += 2) {
973 uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
974 uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
975 l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
976 h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
977 *((uint32_t *) block) =
978 h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
981 a = (((const struct unaligned_32 *) (pixels))->l);
982 b = (((const struct unaligned_32 *) (pixels + 1))->l);
983 l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
984 h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
985 *((uint32_t *) block) =
986 h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
989 } pixels += 4 - line_size * (h + 1);
990 block += 4 - line_size * h;
993 POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
995 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
997 register vector unsigned char
1000 register vector unsigned char
1001 blockv, temp1, temp2;
1002 register vector unsigned short
1003 pixelssum1, pixelssum2, temp3;
1004 register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
1005 register const_vector unsigned short vcone = (const_vector unsigned short)vec_splat_u16(1);
1006 register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
1008 temp1 = vec_ld(0, pixels);
1009 temp2 = vec_ld(16, pixels);
1010 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
1011 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
1017 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
1019 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1020 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1021 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
1022 (vector unsigned short)pixelsv2);
1023 pixelssum1 = vec_add(pixelssum1, vcone);
1025 POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
1026 for (i = 0; i < h ; i++) {
1027 int rightside = ((unsigned long)block & 0x0000000F);
1028 blockv = vec_ld(0, block);
1030 temp1 = vec_ld(line_size, pixels);
1031 temp2 = vec_ld(line_size + 16, pixels);
1032 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
1033 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
1039 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
1042 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1043 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1044 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
1045 (vector unsigned short)pixelsv2);
1046 temp3 = vec_add(pixelssum1, pixelssum2);
1047 temp3 = vec_sra(temp3, vctwo);
1048 pixelssum1 = vec_add(pixelssum2, vcone);
1049 pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
1053 blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
1057 blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
1060 vec_st(blockv, 0, block);
1063 pixels += line_size;
1066 POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
1067 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
1070 /* next one assumes that ((line_size % 16) == 0) */
1071 void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
1073 POWERPC_PERF_DECLARE(altivec_put_pixels16_xy2_num, 1);
1074 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
1076 POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
1077 for (j = 0; j < 4; j++) {
1079 const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
1081 (((const struct unaligned_32 *) (pixels + 1))->l);
1083 (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
1085 ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
1087 pixels += line_size;
1088 for (i = 0; i < h; i += 2) {
1089 uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
1090 uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
1091 l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
1092 h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
1093 *((uint32_t *) block) =
1094 h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
1095 pixels += line_size;
1097 a = (((const struct unaligned_32 *) (pixels))->l);
1098 b = (((const struct unaligned_32 *) (pixels + 1))->l);
1099 l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
1100 h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
1101 *((uint32_t *) block) =
1102 h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
1103 pixels += line_size;
1105 } pixels += 4 - line_size * (h + 1);
1106 block += 4 - line_size * h;
1109 POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
1111 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
1113 register vector unsigned char
1114 pixelsv1, pixelsv2, pixelsv3, pixelsv4;
1115 register vector unsigned char
1116 blockv, temp1, temp2;
1117 register vector unsigned short
1118 pixelssum1, pixelssum2, temp3,
1119 pixelssum3, pixelssum4, temp4;
1120 register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
1121 register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
1123 POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
1125 temp1 = vec_ld(0, pixels);
1126 temp2 = vec_ld(16, pixels);
1127 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
1128 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
1134 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
1136 pixelsv3 = vec_mergel(vczero, pixelsv1);
1137 pixelsv4 = vec_mergel(vczero, pixelsv2);
1138 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1139 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1140 pixelssum3 = vec_add((vector unsigned short)pixelsv3,
1141 (vector unsigned short)pixelsv4);
1142 pixelssum3 = vec_add(pixelssum3, vctwo);
1143 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
1144 (vector unsigned short)pixelsv2);
1145 pixelssum1 = vec_add(pixelssum1, vctwo);
1147 for (i = 0; i < h ; i++) {
1148 blockv = vec_ld(0, block);
1150 temp1 = vec_ld(line_size, pixels);
1151 temp2 = vec_ld(line_size + 16, pixels);
1152 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
1153 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
1159 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
1162 pixelsv3 = vec_mergel(vczero, pixelsv1);
1163 pixelsv4 = vec_mergel(vczero, pixelsv2);
1164 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1165 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1167 pixelssum4 = vec_add((vector unsigned short)pixelsv3,
1168 (vector unsigned short)pixelsv4);
1169 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
1170 (vector unsigned short)pixelsv2);
1171 temp4 = vec_add(pixelssum3, pixelssum4);
1172 temp4 = vec_sra(temp4, vctwo);
1173 temp3 = vec_add(pixelssum1, pixelssum2);
1174 temp3 = vec_sra(temp3, vctwo);
1176 pixelssum3 = vec_add(pixelssum4, vctwo);
1177 pixelssum1 = vec_add(pixelssum2, vctwo);
1179 blockv = vec_packsu(temp3, temp4);
1181 vec_st(blockv, 0, block);
1184 pixels += line_size;
1187 POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
1188 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
1191 /* next one assumes that ((line_size % 16) == 0) */
1192 void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
1194 POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels16_xy2_num, 1);
1195 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
1197 POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
1198 for (j = 0; j < 4; j++) {
1200 const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
1202 (((const struct unaligned_32 *) (pixels + 1))->l);
1204 (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
1206 ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
1208 pixels += line_size;
1209 for (i = 0; i < h; i += 2) {
1210 uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
1211 uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
1212 l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
1213 h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
1214 *((uint32_t *) block) =
1215 h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
1216 pixels += line_size;
1218 a = (((const struct unaligned_32 *) (pixels))->l);
1219 b = (((const struct unaligned_32 *) (pixels + 1))->l);
1220 l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
1221 h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
1222 *((uint32_t *) block) =
1223 h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
1224 pixels += line_size;
1226 } pixels += 4 - line_size * (h + 1);
1227 block += 4 - line_size * h;
1230 POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
1232 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
1234 register vector unsigned char
1235 pixelsv1, pixelsv2, pixelsv3, pixelsv4;
1236 register vector unsigned char
1237 blockv, temp1, temp2;
1238 register vector unsigned short
1239 pixelssum1, pixelssum2, temp3,
1240 pixelssum3, pixelssum4, temp4;
1241 register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
1242 register const_vector unsigned short vcone = (const_vector unsigned short)vec_splat_u16(1);
1243 register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
1245 POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
1247 temp1 = vec_ld(0, pixels);
1248 temp2 = vec_ld(16, pixels);
1249 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
1250 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
1256 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
1258 pixelsv3 = vec_mergel(vczero, pixelsv1);
1259 pixelsv4 = vec_mergel(vczero, pixelsv2);
1260 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1261 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1262 pixelssum3 = vec_add((vector unsigned short)pixelsv3,
1263 (vector unsigned short)pixelsv4);
1264 pixelssum3 = vec_add(pixelssum3, vcone);
1265 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
1266 (vector unsigned short)pixelsv2);
1267 pixelssum1 = vec_add(pixelssum1, vcone);
1269 for (i = 0; i < h ; i++) {
1270 blockv = vec_ld(0, block);
1272 temp1 = vec_ld(line_size, pixels);
1273 temp2 = vec_ld(line_size + 16, pixels);
1274 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
1275 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
1281 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
1284 pixelsv3 = vec_mergel(vczero, pixelsv1);
1285 pixelsv4 = vec_mergel(vczero, pixelsv2);
1286 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1287 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1289 pixelssum4 = vec_add((vector unsigned short)pixelsv3,
1290 (vector unsigned short)pixelsv4);
1291 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
1292 (vector unsigned short)pixelsv2);
1293 temp4 = vec_add(pixelssum3, pixelssum4);
1294 temp4 = vec_sra(temp4, vctwo);
1295 temp3 = vec_add(pixelssum1, pixelssum2);
1296 temp3 = vec_sra(temp3, vctwo);
1298 pixelssum3 = vec_add(pixelssum4, vcone);
1299 pixelssum1 = vec_add(pixelssum2, vcone);
1301 blockv = vec_packsu(temp3, temp4);
1303 vec_st(blockv, 0, block);
1306 pixels += line_size;
1309 POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
1310 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
1313 int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
1314 POWERPC_PERF_DECLARE(altivec_hadamard8_diff8x8_num, 1);
1316 register const_vector unsigned char vzero =
1317 (const_vector unsigned char)vec_splat_u8(0);
1318 register vector signed short temp0, temp1, temp2, temp3, temp4,
1319 temp5, temp6, temp7;
1320 POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1);
1322 register const_vector signed short vprod1 =(const_vector signed short)
1323 AVV( 1,-1, 1,-1, 1,-1, 1,-1);
1324 register const_vector signed short vprod2 =(const_vector signed short)
1325 AVV( 1, 1,-1,-1, 1, 1,-1,-1);
1326 register const_vector signed short vprod3 =(const_vector signed short)
1327 AVV( 1, 1, 1, 1,-1,-1,-1,-1);
1328 register const_vector unsigned char perm1 = (const_vector unsigned char)
1329 AVV(0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
1330 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D);
1331 register const_vector unsigned char perm2 = (const_vector unsigned char)
1332 AVV(0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
1333 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B);
1334 register const_vector unsigned char perm3 = (const_vector unsigned char)
1335 AVV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
1336 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
1338 #define ONEITERBUTTERFLY(i, res) \
1340 register vector unsigned char src1, src2, srcO; \
1341 register vector unsigned char dst1, dst2, dstO; \
1342 register vector signed short srcV, dstV; \
1343 register vector signed short but0, but1, but2, op1, op2, op3; \
1344 src1 = vec_ld(stride * i, src); \
1345 if ((((stride * i) + (unsigned long)src) & 0x0000000F) > 8) \
1346 src2 = vec_ld((stride * i) + 16, src); \
1347 srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
1348 dst1 = vec_ld(stride * i, dst); \
1349 if ((((stride * i) + (unsigned long)dst) & 0x0000000F) > 8) \
1350 dst2 = vec_ld((stride * i) + 16, dst); \
1351 dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
1352 /* promote the unsigned chars to signed shorts */ \
1353 /* we're in the 8x8 function, we only care for the first 8 */ \
1355 (vector signed short)vec_mergeh((vector signed char)vzero, \
1356 (vector signed char)srcO); \
1358 (vector signed short)vec_mergeh((vector signed char)vzero, \
1359 (vector signed char)dstO); \
1360 /* substractions inside the first butterfly */ \
1361 but0 = vec_sub(srcV, dstV); \
1362 op1 = vec_perm(but0, but0, perm1); \
1363 but1 = vec_mladd(but0, vprod1, op1); \
1364 op2 = vec_perm(but1, but1, perm2); \
1365 but2 = vec_mladd(but1, vprod2, op2); \
1366 op3 = vec_perm(but2, but2, perm3); \
1367 res = vec_mladd(but2, vprod3, op3); \
1369 ONEITERBUTTERFLY(0, temp0);
1370 ONEITERBUTTERFLY(1, temp1);
1371 ONEITERBUTTERFLY(2, temp2);
1372 ONEITERBUTTERFLY(3, temp3);
1373 ONEITERBUTTERFLY(4, temp4);
1374 ONEITERBUTTERFLY(5, temp5);
1375 ONEITERBUTTERFLY(6, temp6);
1376 ONEITERBUTTERFLY(7, temp7);
1378 #undef ONEITERBUTTERFLY
1380 register vector signed int vsum;
1381 register vector signed short line0 = vec_add(temp0, temp1);
1382 register vector signed short line1 = vec_sub(temp0, temp1);
1383 register vector signed short line2 = vec_add(temp2, temp3);
1384 register vector signed short line3 = vec_sub(temp2, temp3);
1385 register vector signed short line4 = vec_add(temp4, temp5);
1386 register vector signed short line5 = vec_sub(temp4, temp5);
1387 register vector signed short line6 = vec_add(temp6, temp7);
1388 register vector signed short line7 = vec_sub(temp6, temp7);
1390 register vector signed short line0B = vec_add(line0, line2);
1391 register vector signed short line2B = vec_sub(line0, line2);
1392 register vector signed short line1B = vec_add(line1, line3);
1393 register vector signed short line3B = vec_sub(line1, line3);
1394 register vector signed short line4B = vec_add(line4, line6);
1395 register vector signed short line6B = vec_sub(line4, line6);
1396 register vector signed short line5B = vec_add(line5, line7);
1397 register vector signed short line7B = vec_sub(line5, line7);
1399 register vector signed short line0C = vec_add(line0B, line4B);
1400 register vector signed short line4C = vec_sub(line0B, line4B);
1401 register vector signed short line1C = vec_add(line1B, line5B);
1402 register vector signed short line5C = vec_sub(line1B, line5B);
1403 register vector signed short line2C = vec_add(line2B, line6B);
1404 register vector signed short line6C = vec_sub(line2B, line6B);
1405 register vector signed short line3C = vec_add(line3B, line7B);
1406 register vector signed short line7C = vec_sub(line3B, line7B);
1408 vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
1409 vsum = vec_sum4s(vec_abs(line1C), vsum);
1410 vsum = vec_sum4s(vec_abs(line2C), vsum);
1411 vsum = vec_sum4s(vec_abs(line3C), vsum);
1412 vsum = vec_sum4s(vec_abs(line4C), vsum);
1413 vsum = vec_sum4s(vec_abs(line5C), vsum);
1414 vsum = vec_sum4s(vec_abs(line6C), vsum);
1415 vsum = vec_sum4s(vec_abs(line7C), vsum);
1416 vsum = vec_sums(vsum, (vector signed int)vzero);
1417 vsum = vec_splat(vsum, 3);
1418 vec_ste(vsum, 0, &sum);
1420 POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1);
1425 16x8 works with 16 elements ; it allows to avoid replicating
1426 loads, and give the compiler more rooms for scheduling.
1427 It's only used from inside hadamard8_diff16_altivec.
1429 Unfortunately, it seems gcc-3.3 is a bit dumb, and
1430 the compiled code has a LOT of spill code, it seems
1431 gcc (unlike xlc) cannot keep everything in registers
1432 by itself. The following code include hand-made
1433 registers allocation. It's not clean, but on
1434 a 7450 the resulting code is much faster (best case
1435 fall from 700+ cycles to 550).
1437 xlc doesn't add spill code, but it doesn't know how to
1438 schedule for the 7450, and its code isn't much faster than
1439 gcc-3.3 on the 7450 (but uses 25% less instructions...)
1441 On the 970, the hand-made RA is still a win (arount 690
1442 vs. around 780), but xlc goes to around 660 on the
1446 static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
1448 register vector signed short
1457 register vector signed short
1466 register const_vector unsigned char vzero REG_v(v31)=
1467 (const_vector unsigned char)vec_splat_u8(0);
1469 register const_vector signed short vprod1 REG_v(v16)=
1470 (const_vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1);
1471 register const_vector signed short vprod2 REG_v(v17)=
1472 (const_vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1);
1473 register const_vector signed short vprod3 REG_v(v18)=
1474 (const_vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1);
1475 register const_vector unsigned char perm1 REG_v(v19)=
1476 (const_vector unsigned char)
1477 AVV(0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
1478 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D);
1479 register const_vector unsigned char perm2 REG_v(v20)=
1480 (const_vector unsigned char)
1481 AVV(0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
1482 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B);
1483 register const_vector unsigned char perm3 REG_v(v21)=
1484 (const_vector unsigned char)
1485 AVV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
1486 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
1488 #define ONEITERBUTTERFLY(i, res1, res2) \
1490 register vector unsigned char src1 REG_v(v22), \
1497 register vector signed short srcV REG_v(v24), \
1514 src1 = vec_ld(stride * i, src); \
1515 src2 = vec_ld((stride * i) + 16, src); \
1516 srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
1517 dst1 = vec_ld(stride * i, dst); \
1518 dst2 = vec_ld((stride * i) + 16, dst); \
1519 dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
1520 /* promote the unsigned chars to signed shorts */ \
1522 (vector signed short)vec_mergeh((vector signed char)vzero, \
1523 (vector signed char)srcO); \
1525 (vector signed short)vec_mergeh((vector signed char)vzero, \
1526 (vector signed char)dstO); \
1528 (vector signed short)vec_mergel((vector signed char)vzero, \
1529 (vector signed char)srcO); \
1531 (vector signed short)vec_mergel((vector signed char)vzero, \
1532 (vector signed char)dstO); \
1533 /* substractions inside the first butterfly */ \
1534 but0 = vec_sub(srcV, dstV); \
1535 but0S = vec_sub(srcW, dstW); \
1536 op1 = vec_perm(but0, but0, perm1); \
1537 but1 = vec_mladd(but0, vprod1, op1); \
1538 op1S = vec_perm(but0S, but0S, perm1); \
1539 but1S = vec_mladd(but0S, vprod1, op1S); \
1540 op2 = vec_perm(but1, but1, perm2); \
1541 but2 = vec_mladd(but1, vprod2, op2); \
1542 op2S = vec_perm(but1S, but1S, perm2); \
1543 but2S = vec_mladd(but1S, vprod2, op2S); \
1544 op3 = vec_perm(but2, but2, perm3); \
1545 res1 = vec_mladd(but2, vprod3, op3); \
1546 op3S = vec_perm(but2S, but2S, perm3); \
1547 res2 = vec_mladd(but2S, vprod3, op3S); \
1549 ONEITERBUTTERFLY(0, temp0, temp0S);
1550 ONEITERBUTTERFLY(1, temp1, temp1S);
1551 ONEITERBUTTERFLY(2, temp2, temp2S);
1552 ONEITERBUTTERFLY(3, temp3, temp3S);
1553 ONEITERBUTTERFLY(4, temp4, temp4S);
1554 ONEITERBUTTERFLY(5, temp5, temp5S);
1555 ONEITERBUTTERFLY(6, temp6, temp6S);
1556 ONEITERBUTTERFLY(7, temp7, temp7S);
1558 #undef ONEITERBUTTERFLY
1560 register vector signed int vsum;
1561 register vector signed short line0S, line1S, line2S, line3S, line4S,
1562 line5S, line6S, line7S, line0BS,line2BS,
1563 line1BS,line3BS,line4BS,line6BS,line5BS,
1564 line7BS,line0CS,line4CS,line1CS,line5CS,
1565 line2CS,line6CS,line3CS,line7CS;
1567 register vector signed short line0 = vec_add(temp0, temp1);
1568 register vector signed short line1 = vec_sub(temp0, temp1);
1569 register vector signed short line2 = vec_add(temp2, temp3);
1570 register vector signed short line3 = vec_sub(temp2, temp3);
1571 register vector signed short line4 = vec_add(temp4, temp5);
1572 register vector signed short line5 = vec_sub(temp4, temp5);
1573 register vector signed short line6 = vec_add(temp6, temp7);
1574 register vector signed short line7 = vec_sub(temp6, temp7);
1576 register vector signed short line0B = vec_add(line0, line2);
1577 register vector signed short line2B = vec_sub(line0, line2);
1578 register vector signed short line1B = vec_add(line1, line3);
1579 register vector signed short line3B = vec_sub(line1, line3);
1580 register vector signed short line4B = vec_add(line4, line6);
1581 register vector signed short line6B = vec_sub(line4, line6);
1582 register vector signed short line5B = vec_add(line5, line7);
1583 register vector signed short line7B = vec_sub(line5, line7);
1585 register vector signed short line0C = vec_add(line0B, line4B);
1586 register vector signed short line4C = vec_sub(line0B, line4B);
1587 register vector signed short line1C = vec_add(line1B, line5B);
1588 register vector signed short line5C = vec_sub(line1B, line5B);
1589 register vector signed short line2C = vec_add(line2B, line6B);
1590 register vector signed short line6C = vec_sub(line2B, line6B);
1591 register vector signed short line3C = vec_add(line3B, line7B);
1592 register vector signed short line7C = vec_sub(line3B, line7B);
1594 vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
1595 vsum = vec_sum4s(vec_abs(line1C), vsum);
1596 vsum = vec_sum4s(vec_abs(line2C), vsum);
1597 vsum = vec_sum4s(vec_abs(line3C), vsum);
1598 vsum = vec_sum4s(vec_abs(line4C), vsum);
1599 vsum = vec_sum4s(vec_abs(line5C), vsum);
1600 vsum = vec_sum4s(vec_abs(line6C), vsum);
1601 vsum = vec_sum4s(vec_abs(line7C), vsum);
1603 line0S = vec_add(temp0S, temp1S);
1604 line1S = vec_sub(temp0S, temp1S);
1605 line2S = vec_add(temp2S, temp3S);
1606 line3S = vec_sub(temp2S, temp3S);
1607 line4S = vec_add(temp4S, temp5S);
1608 line5S = vec_sub(temp4S, temp5S);
1609 line6S = vec_add(temp6S, temp7S);
1610 line7S = vec_sub(temp6S, temp7S);
1612 line0BS = vec_add(line0S, line2S);
1613 line2BS = vec_sub(line0S, line2S);
1614 line1BS = vec_add(line1S, line3S);
1615 line3BS = vec_sub(line1S, line3S);
1616 line4BS = vec_add(line4S, line6S);
1617 line6BS = vec_sub(line4S, line6S);
1618 line5BS = vec_add(line5S, line7S);
1619 line7BS = vec_sub(line5S, line7S);
1621 line0CS = vec_add(line0BS, line4BS);
1622 line4CS = vec_sub(line0BS, line4BS);
1623 line1CS = vec_add(line1BS, line5BS);
1624 line5CS = vec_sub(line1BS, line5BS);
1625 line2CS = vec_add(line2BS, line6BS);
1626 line6CS = vec_sub(line2BS, line6BS);
1627 line3CS = vec_add(line3BS, line7BS);
1628 line7CS = vec_sub(line3BS, line7BS);
1630 vsum = vec_sum4s(vec_abs(line0CS), vsum);
1631 vsum = vec_sum4s(vec_abs(line1CS), vsum);
1632 vsum = vec_sum4s(vec_abs(line2CS), vsum);
1633 vsum = vec_sum4s(vec_abs(line3CS), vsum);
1634 vsum = vec_sum4s(vec_abs(line4CS), vsum);
1635 vsum = vec_sum4s(vec_abs(line5CS), vsum);
1636 vsum = vec_sum4s(vec_abs(line6CS), vsum);
1637 vsum = vec_sum4s(vec_abs(line7CS), vsum);
1638 vsum = vec_sums(vsum, (vector signed int)vzero);
1639 vsum = vec_splat(vsum, 3);
1640 vec_ste(vsum, 0, &sum);
1645 int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
1646 POWERPC_PERF_DECLARE(altivec_hadamard8_diff16_num, 1);
1648 POWERPC_PERF_START_COUNT(altivec_hadamard8_diff16_num, 1);
1649 score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
1653 score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
1655 POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1);
1659 int has_altivec(void)
1663 extern struct ExecIFace *IExec;
1665 IExec->GetCPUInfoTags(GCIT_VectorUnit, &result, TAG_DONE);
1666 if (result == VECTORTYPE_ALTIVEC) return 1;
1668 #else /* __AMIGAOS4__ */
1670 #ifdef CONFIG_DARWIN
1671 int sels[2] = {CTL_HW, HW_VECTORUNIT};
1673 size_t len = sizeof(has_vu);
1676 err = sysctl(sels, 2, &has_vu, &len, NULL, 0);
1678 if (err == 0) return (has_vu != 0);
1679 #else /* CONFIG_DARWIN */
1680 /* no Darwin, do it the brute-force way */
1681 /* this is borrowed from the libmpeg2 library */
1683 signal (SIGILL, sigill_handler);
1684 if (sigsetjmp (jmpbuf, 1)) {
1685 signal (SIGILL, SIG_DFL);
1689 asm volatile ("mtspr 256, %0\n\t"
1690 "vand %%v0, %%v0, %%v0"
1694 signal (SIGILL, SIG_DFL);
1698 #endif /* CONFIG_DARWIN */
1700 #endif /* __AMIGAOS4__ */
1703 static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
1708 vector bool int t0, t1;
1709 const vector unsigned int v_31 = //XXX
1710 vec_add(vec_add(vec_splat_u32(15),vec_splat_u32(15)),vec_splat_u32(1));
1711 for(i=0; i<blocksize; i+=4) {
1712 m = vec_ld(0, mag+i);
1713 a = vec_ld(0, ang+i);
1714 t0 = vec_cmple(m, (vector float)vec_splat_u32(0));
1715 t1 = vec_cmple(a, (vector float)vec_splat_u32(0));
1716 a = vec_xor(a, (vector float) vec_sl((vector unsigned int)t0, v_31));
1717 t0 = (vector bool int)vec_and(a, t1);
1718 t1 = (vector bool int)vec_andc(a, t1);
1719 a = vec_sub(m, (vector float)t1);
1720 m = vec_add(m, (vector float)t0);
1721 vec_stl(a, 0, ang+i);
1722 vec_stl(m, 0, mag+i);
1726 /* next one assumes that ((line_size % 8) == 0) */
1727 void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
1729 POWERPC_PERF_DECLARE(altivec_avg_pixels8_xy2_num, 1);
1730 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
1733 POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
1734 for (j = 0; j < 2; j++) {
1736 const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
1737 const uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
1738 uint32_t l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
1739 uint32_t h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
1741 pixels += line_size;
1742 for (i = 0; i < h; i += 2) {
1743 uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
1744 uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
1745 l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
1746 h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
1747 *((uint32_t *) block) = rnd_avg32(*((uint32_t *) block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
1748 pixels += line_size;
1750 a = (((const struct unaligned_32 *) (pixels))->l);
1751 b = (((const struct unaligned_32 *) (pixels + 1))->l);
1752 l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
1753 h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
1754 *((uint32_t *) block) = rnd_avg32(*((uint32_t *) block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
1755 pixels += line_size;
1757 } pixels += 4 - line_size * (h + 1);
1758 block += 4 - line_size * h;
1760 POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
1761 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
1763 register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
1764 register vector unsigned char blockv, temp1, temp2, blocktemp;
1765 register vector unsigned short pixelssum1, pixelssum2, temp3;
1767 register const_vector unsigned char vczero = (const_vector unsigned char)
1769 register const_vector unsigned short vctwo = (const_vector unsigned short)
1772 temp1 = vec_ld(0, pixels);
1773 temp2 = vec_ld(16, pixels);
1774 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
1775 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
1778 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
1780 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1781 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1782 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
1783 (vector unsigned short)pixelsv2);
1784 pixelssum1 = vec_add(pixelssum1, vctwo);
1786 POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
1787 for (i = 0; i < h ; i++) {
1788 int rightside = ((unsigned long)block & 0x0000000F);
1789 blockv = vec_ld(0, block);
1791 temp1 = vec_ld(line_size, pixels);
1792 temp2 = vec_ld(line_size + 16, pixels);
1793 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
1794 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
1798 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
1801 pixelsv1 = vec_mergeh(vczero, pixelsv1);
1802 pixelsv2 = vec_mergeh(vczero, pixelsv2);
1803 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
1804 (vector unsigned short)pixelsv2);
1805 temp3 = vec_add(pixelssum1, pixelssum2);
1806 temp3 = vec_sra(temp3, vctwo);
1807 pixelssum1 = vec_add(pixelssum2, vctwo);
1808 pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
1811 blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
1813 blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
1816 blockv = vec_avg(blocktemp, blockv);
1817 vec_st(blockv, 0, block);
1820 pixels += line_size;
1823 POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
1824 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
1827 void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
1829 c->pix_abs[0][1] = sad16_x2_altivec;
1830 c->pix_abs[0][2] = sad16_y2_altivec;
1831 c->pix_abs[0][3] = sad16_xy2_altivec;
1832 c->pix_abs[0][0] = sad16_altivec;
1833 c->pix_abs[1][0] = sad8_altivec;
1834 c->sad[0]= sad16_altivec;
1835 c->sad[1]= sad8_altivec;
1836 c->pix_norm1 = pix_norm1_altivec;
1837 c->sse[1]= sse8_altivec;
1838 c->sse[0]= sse16_altivec;
1839 c->pix_sum = pix_sum_altivec;
1840 c->diff_pixels = diff_pixels_altivec;
1841 c->get_pixels = get_pixels_altivec;
1842 // next one disabled as it's untested.
1844 c->add_bytes= add_bytes_altivec;
1846 c->put_pixels_tab[0][0] = put_pixels16_altivec;
1847 /* the two functions do the same thing, so use the same code */
1848 c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec;
1849 c->avg_pixels_tab[0][0] = avg_pixels16_altivec;
1850 c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
1851 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
1852 c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
1853 c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
1854 c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
1855 c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
1857 c->hadamard8_diff[0] = hadamard8_diff16_altivec;
1858 c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
1859 #ifdef CONFIG_VORBIS_DECODER
1860 c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec;