2 * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 //#define DEBUG_ALIGNMENT
22 #ifdef DEBUG_ALIGNMENT
23 #define ASSERT_ALIGNED(ptr) assert(((unsigned long)ptr&0x0000000F));
25 #define ASSERT_ALIGNED(ptr) ;
28 /* this code assume that stride % 16 == 0 */
30 #define CHROMA_MC8_ALTIVEC_CORE \
31 vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc2uc);\
32 vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc3uc);\
34 psum = vec_mladd(vA, vsrc0ssH, v32ss);\
35 psum = vec_mladd(vB, vsrc1ssH, psum);\
36 psum = vec_mladd(vC, vsrc2ssH, psum);\
37 psum = vec_mladd(vD, vsrc3ssH, psum);\
38 psum = vec_sr(psum, v6us);\
40 vdst = vec_ld(0, dst);\
41 ppsum = (vec_u8_t)vec_pack(psum, psum);\
42 vfdst = vec_perm(vdst, ppsum, fperm);\
44 OP_U8_ALTIVEC(fsum, vfdst, vdst);\
46 vec_st(fsum, 0, dst);\
54 #define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \
56 vsrc0ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc0uc);\
57 vsrc1ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc1uc);\
59 psum = vec_mladd(vA, vsrc0ssH, v32ss);\
60 psum = vec_mladd(vE, vsrc1ssH, psum);\
61 psum = vec_sr(psum, v6us);\
63 vdst = vec_ld(0, dst);\
64 ppsum = (vec_u8_t)vec_pack(psum, psum);\
65 vfdst = vec_perm(vdst, ppsum, fperm);\
67 OP_U8_ALTIVEC(fsum, vfdst, vdst);\
69 vec_st(fsum, 0, dst);\
74 void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
75 int stride, int h, int x, int y) {
76 POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1);
77 DECLARE_ALIGNED_16(signed int, ABCD[4]) =
84 const vec_s32_t vABCD = vec_ld(0, ABCD);
85 const vec_s16_t vA = vec_splat((vec_s16_t)vABCD, 1);
86 const vec_s16_t vB = vec_splat((vec_s16_t)vABCD, 3);
87 const vec_s16_t vC = vec_splat((vec_s16_t)vABCD, 5);
88 const vec_s16_t vD = vec_splat((vec_s16_t)vABCD, 7);
90 const vec_s16_t v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
91 const vec_u16_t v6us = vec_splat_u16(6);
92 register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
93 register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
95 vec_u8_t vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
96 vec_u8_t vsrc0uc, vsrc1uc;
97 vec_s16_t vsrc0ssH, vsrc1ssH;
98 vec_u8_t vsrcCuc, vsrc2uc, vsrc3uc;
99 vec_s16_t vsrc2ssH, vsrc3ssH, psum;
100 vec_u8_t vdst, ppsum, vfdst, fsum;
102 POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1);
104 if (((unsigned long)dst) % 16 == 0) {
105 fperm = (vec_u8_t)AVV(0x10, 0x11, 0x12, 0x13,
106 0x14, 0x15, 0x16, 0x17,
107 0x08, 0x09, 0x0A, 0x0B,
108 0x0C, 0x0D, 0x0E, 0x0F);
110 fperm = (vec_u8_t)AVV(0x00, 0x01, 0x02, 0x03,
111 0x04, 0x05, 0x06, 0x07,
112 0x18, 0x19, 0x1A, 0x1B,
113 0x1C, 0x1D, 0x1E, 0x1F);
116 vsrcAuc = vec_ld(0, src);
119 vsrcBuc = vec_ld(16, src);
120 vsrcperm0 = vec_lvsl(0, src);
121 vsrcperm1 = vec_lvsl(1, src);
123 vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
127 vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
129 vsrc0ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc0uc);
130 vsrc1ssH = (vec_s16_t)vec_mergeh(zero_u8v,(vec_u8_t)vsrc1uc);
133 if (!loadSecond) {// -> !reallyBadAlign
134 for (i = 0 ; i < h ; i++) {
135 vsrcCuc = vec_ld(stride + 0, src);
136 vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
137 vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
139 CHROMA_MC8_ALTIVEC_CORE
143 for (i = 0 ; i < h ; i++) {
144 vsrcCuc = vec_ld(stride + 0, src);
145 vsrcDuc = vec_ld(stride + 16, src);
146 vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
150 vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
152 CHROMA_MC8_ALTIVEC_CORE
156 const vec_s16_t vE = vec_add(vB, vC);
157 if (ABCD[2]) { // x == 0 B == 0
158 if (!loadSecond) {// -> !reallyBadAlign
159 for (i = 0 ; i < h ; i++) {
160 vsrcCuc = vec_ld(stride + 0, src);
161 vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
162 CHROMA_MC8_ALTIVEC_CORE_SIMPLE
168 for (i = 0 ; i < h ; i++) {
169 vsrcCuc = vec_ld(0, src);
170 vsrcDuc = vec_ld(15, src);
171 vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
172 CHROMA_MC8_ALTIVEC_CORE_SIMPLE
177 } else { // y == 0 C == 0
178 if (!loadSecond) {// -> !reallyBadAlign
179 for (i = 0 ; i < h ; i++) {
180 vsrcCuc = vec_ld(0, src);
181 vsrc0uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
182 vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
184 CHROMA_MC8_ALTIVEC_CORE_SIMPLE
188 for (i = 0 ; i < h ; i++) {
189 vsrcCuc = vec_ld(0, src);
190 vsrcDuc = vec_ld(15, src);
191 vsrc0uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
192 vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
194 CHROMA_MC8_ALTIVEC_CORE_SIMPLE
199 POWERPC_PERF_STOP_COUNT(PREFIX_h264_chroma_mc8_num, 1);
202 #undef CHROMA_MC8_ALTIVEC_CORE
204 /* this code assume stride % 16 == 0 */
205 static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
206 POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_h_lowpass_num, 1);
210 const vec_u8_t permM2 = vec_lvsl(-2, src);
211 const vec_u8_t permM1 = vec_lvsl(-1, src);
212 const vec_u8_t permP0 = vec_lvsl(+0, src);
213 const vec_u8_t permP1 = vec_lvsl(+1, src);
214 const vec_u8_t permP2 = vec_lvsl(+2, src);
215 const vec_u8_t permP3 = vec_lvsl(+3, src);
216 const vec_s16_t v5ss = vec_splat_s16(5);
217 const vec_u16_t v5us = vec_splat_u16(5);
218 const vec_s16_t v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
219 const vec_s16_t v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));
221 vec_u8_t srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
223 register int align = ((((unsigned long)src) - 2) % 16);
225 vec_s16_t srcP0A, srcP0B, srcP1A, srcP1B,
226 srcP2A, srcP2B, srcP3A, srcP3B,
227 srcM1A, srcM1B, srcM2A, srcM2B,
228 sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
229 pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
230 psumA, psumB, sumA, sumB;
232 vec_u8_t sum, vdst, fsum;
234 POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
236 for (i = 0 ; i < 16 ; i ++) {
237 vec_u8_t srcR1 = vec_ld(-2, src);
238 vec_u8_t srcR2 = vec_ld(14, src);
242 srcM2 = vec_perm(srcR1, srcR2, permM2);
243 srcM1 = vec_perm(srcR1, srcR2, permM1);
244 srcP0 = vec_perm(srcR1, srcR2, permP0);
245 srcP1 = vec_perm(srcR1, srcR2, permP1);
246 srcP2 = vec_perm(srcR1, srcR2, permP2);
247 srcP3 = vec_perm(srcR1, srcR2, permP3);
250 srcM2 = vec_perm(srcR1, srcR2, permM2);
251 srcM1 = vec_perm(srcR1, srcR2, permM1);
252 srcP0 = vec_perm(srcR1, srcR2, permP0);
253 srcP1 = vec_perm(srcR1, srcR2, permP1);
254 srcP2 = vec_perm(srcR1, srcR2, permP2);
258 vec_u8_t srcR3 = vec_ld(30, src);
259 srcM2 = vec_perm(srcR1, srcR2, permM2);
260 srcM1 = vec_perm(srcR1, srcR2, permM1);
261 srcP0 = vec_perm(srcR1, srcR2, permP0);
262 srcP1 = vec_perm(srcR1, srcR2, permP1);
264 srcP3 = vec_perm(srcR2, srcR3, permP3);
267 vec_u8_t srcR3 = vec_ld(30, src);
268 srcM2 = vec_perm(srcR1, srcR2, permM2);
269 srcM1 = vec_perm(srcR1, srcR2, permM1);
270 srcP0 = vec_perm(srcR1, srcR2, permP0);
272 srcP2 = vec_perm(srcR2, srcR3, permP2);
273 srcP3 = vec_perm(srcR2, srcR3, permP3);
276 vec_u8_t srcR3 = vec_ld(30, src);
277 srcM2 = vec_perm(srcR1, srcR2, permM2);
278 srcM1 = vec_perm(srcR1, srcR2, permM1);
280 srcP1 = vec_perm(srcR2, srcR3, permP1);
281 srcP2 = vec_perm(srcR2, srcR3, permP2);
282 srcP3 = vec_perm(srcR2, srcR3, permP3);
285 vec_u8_t srcR3 = vec_ld(30, src);
286 srcM2 = vec_perm(srcR1, srcR2, permM2);
288 srcP0 = vec_perm(srcR2, srcR3, permP0);
289 srcP1 = vec_perm(srcR2, srcR3, permP1);
290 srcP2 = vec_perm(srcR2, srcR3, permP2);
291 srcP3 = vec_perm(srcR2, srcR3, permP3);
295 srcP0A = (vec_s16_t) vec_mergeh(zero_u8v, srcP0);
296 srcP0B = (vec_s16_t) vec_mergel(zero_u8v, srcP0);
297 srcP1A = (vec_s16_t) vec_mergeh(zero_u8v, srcP1);
298 srcP1B = (vec_s16_t) vec_mergel(zero_u8v, srcP1);
300 srcP2A = (vec_s16_t) vec_mergeh(zero_u8v, srcP2);
301 srcP2B = (vec_s16_t) vec_mergel(zero_u8v, srcP2);
302 srcP3A = (vec_s16_t) vec_mergeh(zero_u8v, srcP3);
303 srcP3B = (vec_s16_t) vec_mergel(zero_u8v, srcP3);
305 srcM1A = (vec_s16_t) vec_mergeh(zero_u8v, srcM1);
306 srcM1B = (vec_s16_t) vec_mergel(zero_u8v, srcM1);
307 srcM2A = (vec_s16_t) vec_mergeh(zero_u8v, srcM2);
308 srcM2B = (vec_s16_t) vec_mergel(zero_u8v, srcM2);
310 sum1A = vec_adds(srcP0A, srcP1A);
311 sum1B = vec_adds(srcP0B, srcP1B);
312 sum2A = vec_adds(srcM1A, srcP2A);
313 sum2B = vec_adds(srcM1B, srcP2B);
314 sum3A = vec_adds(srcM2A, srcP3A);
315 sum3B = vec_adds(srcM2B, srcP3B);
317 pp1A = vec_mladd(sum1A, v20ss, v16ss);
318 pp1B = vec_mladd(sum1B, v20ss, v16ss);
320 pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
321 pp2B = vec_mladd(sum2B, v5ss, zero_s16v);
323 pp3A = vec_add(sum3A, pp1A);
324 pp3B = vec_add(sum3B, pp1B);
326 psumA = vec_sub(pp3A, pp2A);
327 psumB = vec_sub(pp3B, pp2B);
329 sumA = vec_sra(psumA, v5us);
330 sumB = vec_sra(psumB, v5us);
332 sum = vec_packsu(sumA, sumB);
335 vdst = vec_ld(0, dst);
337 OP_U8_ALTIVEC(fsum, sum, vdst);
339 vec_st(fsum, 0, dst);
344 POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
347 /* this code assume stride % 16 == 0 */
348 static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
349 POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_v_lowpass_num, 1);
354 const vec_u8_t perm = vec_lvsl(0, src);
355 const vec_s16_t v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
356 const vec_u16_t v5us = vec_splat_u16(5);
357 const vec_s16_t v5ss = vec_splat_s16(5);
358 const vec_s16_t v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));
360 uint8_t *srcbis = src - (srcStride * 2);
362 const vec_u8_t srcM2a = vec_ld(0, srcbis);
363 const vec_u8_t srcM2b = vec_ld(16, srcbis);
364 const vec_u8_t srcM2 = vec_perm(srcM2a, srcM2b, perm);
365 // srcbis += srcStride;
366 const vec_u8_t srcM1a = vec_ld(0, srcbis += srcStride);
367 const vec_u8_t srcM1b = vec_ld(16, srcbis);
368 const vec_u8_t srcM1 = vec_perm(srcM1a, srcM1b, perm);
369 // srcbis += srcStride;
370 const vec_u8_t srcP0a = vec_ld(0, srcbis += srcStride);
371 const vec_u8_t srcP0b = vec_ld(16, srcbis);
372 const vec_u8_t srcP0 = vec_perm(srcP0a, srcP0b, perm);
373 // srcbis += srcStride;
374 const vec_u8_t srcP1a = vec_ld(0, srcbis += srcStride);
375 const vec_u8_t srcP1b = vec_ld(16, srcbis);
376 const vec_u8_t srcP1 = vec_perm(srcP1a, srcP1b, perm);
377 // srcbis += srcStride;
378 const vec_u8_t srcP2a = vec_ld(0, srcbis += srcStride);
379 const vec_u8_t srcP2b = vec_ld(16, srcbis);
380 const vec_u8_t srcP2 = vec_perm(srcP2a, srcP2b, perm);
381 // srcbis += srcStride;
383 vec_s16_t srcM2ssA = (vec_s16_t) vec_mergeh(zero_u8v, srcM2);
384 vec_s16_t srcM2ssB = (vec_s16_t) vec_mergel(zero_u8v, srcM2);
385 vec_s16_t srcM1ssA = (vec_s16_t) vec_mergeh(zero_u8v, srcM1);
386 vec_s16_t srcM1ssB = (vec_s16_t) vec_mergel(zero_u8v, srcM1);
387 vec_s16_t srcP0ssA = (vec_s16_t) vec_mergeh(zero_u8v, srcP0);
388 vec_s16_t srcP0ssB = (vec_s16_t) vec_mergel(zero_u8v, srcP0);
389 vec_s16_t srcP1ssA = (vec_s16_t) vec_mergeh(zero_u8v, srcP1);
390 vec_s16_t srcP1ssB = (vec_s16_t) vec_mergel(zero_u8v, srcP1);
391 vec_s16_t srcP2ssA = (vec_s16_t) vec_mergeh(zero_u8v, srcP2);
392 vec_s16_t srcP2ssB = (vec_s16_t) vec_mergel(zero_u8v, srcP2);
394 vec_s16_t pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
395 psumA, psumB, sumA, sumB,
397 sum1A, sum1B, sum2A, sum2B, sum3A, sum3B;
399 vec_u8_t sum, vdst, fsum, srcP3a, srcP3b, srcP3;
401 POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
403 for (i = 0 ; i < 16 ; i++) {
404 srcP3a = vec_ld(0, srcbis += srcStride);
405 srcP3b = vec_ld(16, srcbis);
406 srcP3 = vec_perm(srcP3a, srcP3b, perm);
407 srcP3ssA = (vec_s16_t) vec_mergeh(zero_u8v, srcP3);
408 srcP3ssB = (vec_s16_t) vec_mergel(zero_u8v, srcP3);
409 // srcbis += srcStride;
411 sum1A = vec_adds(srcP0ssA, srcP1ssA);
412 sum1B = vec_adds(srcP0ssB, srcP1ssB);
413 sum2A = vec_adds(srcM1ssA, srcP2ssA);
414 sum2B = vec_adds(srcM1ssB, srcP2ssB);
415 sum3A = vec_adds(srcM2ssA, srcP3ssA);
416 sum3B = vec_adds(srcM2ssB, srcP3ssB);
429 pp1A = vec_mladd(sum1A, v20ss, v16ss);
430 pp1B = vec_mladd(sum1B, v20ss, v16ss);
432 pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
433 pp2B = vec_mladd(sum2B, v5ss, zero_s16v);
435 pp3A = vec_add(sum3A, pp1A);
436 pp3B = vec_add(sum3B, pp1B);
438 psumA = vec_sub(pp3A, pp2A);
439 psumB = vec_sub(pp3B, pp2B);
441 sumA = vec_sra(psumA, v5us);
442 sumB = vec_sra(psumB, v5us);
444 sum = vec_packsu(sumA, sumB);
447 vdst = vec_ld(0, dst);
449 OP_U8_ALTIVEC(fsum, sum, vdst);
451 vec_st(fsum, 0, dst);
455 POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
458 /* this code assume stride % 16 == 0 *and* tmp is properly aligned */
459 static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) {
460 POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_hv_lowpass_num, 1);
463 const vec_u8_t permM2 = vec_lvsl(-2, src);
464 const vec_u8_t permM1 = vec_lvsl(-1, src);
465 const vec_u8_t permP0 = vec_lvsl(+0, src);
466 const vec_u8_t permP1 = vec_lvsl(+1, src);
467 const vec_u8_t permP2 = vec_lvsl(+2, src);
468 const vec_u8_t permP3 = vec_lvsl(+3, src);
469 const vec_s16_t v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
470 const vec_u32_t v10ui = vec_splat_u32(10);
471 const vec_s16_t v5ss = vec_splat_s16(5);
472 const vec_s16_t v1ss = vec_splat_s16(1);
473 const vec_s32_t v512si = vec_sl(vec_splat_s32(1),vec_splat_u32(9));
474 const vec_u32_t v16ui = vec_sl(vec_splat_u32(1),vec_splat_u32(4));
476 register int align = ((((unsigned long)src) - 2) % 16);
478 vec_s16_t srcP0A, srcP0B, srcP1A, srcP1B,
479 srcP2A, srcP2B, srcP3A, srcP3B,
480 srcM1A, srcM1B, srcM2A, srcM2B,
481 sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
482 pp1A, pp1B, pp2A, pp2B, psumA, psumB;
484 const vec_u8_t mperm = (const vec_u8_t)
485 AVV(0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
486 0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F);
487 int16_t *tmpbis = tmp;
489 vec_s16_t tmpM1ssA, tmpM1ssB, tmpM2ssA, tmpM2ssB,
490 tmpP0ssA, tmpP0ssB, tmpP1ssA, tmpP1ssB,
493 vec_s32_t pp1Ae, pp1Ao, pp1Be, pp1Bo, pp2Ae, pp2Ao, pp2Be, pp2Bo,
494 pp3Ae, pp3Ao, pp3Be, pp3Bo, pp1cAe, pp1cAo, pp1cBe, pp1cBo,
495 pp32Ae, pp32Ao, pp32Be, pp32Bo, sumAe, sumAo, sumBe, sumBo,
496 ssumAe, ssumAo, ssumBe, ssumBo;
497 vec_u8_t fsum, sumv, sum, vdst;
498 vec_s16_t ssume, ssumo;
500 POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);
501 src -= (2 * srcStride);
502 for (i = 0 ; i < 21 ; i ++) {
503 vec_u8_t srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
504 vec_u8_t srcR1 = vec_ld(-2, src);
505 vec_u8_t srcR2 = vec_ld(14, src);
509 srcM2 = vec_perm(srcR1, srcR2, permM2);
510 srcM1 = vec_perm(srcR1, srcR2, permM1);
511 srcP0 = vec_perm(srcR1, srcR2, permP0);
512 srcP1 = vec_perm(srcR1, srcR2, permP1);
513 srcP2 = vec_perm(srcR1, srcR2, permP2);
514 srcP3 = vec_perm(srcR1, srcR2, permP3);
517 srcM2 = vec_perm(srcR1, srcR2, permM2);
518 srcM1 = vec_perm(srcR1, srcR2, permM1);
519 srcP0 = vec_perm(srcR1, srcR2, permP0);
520 srcP1 = vec_perm(srcR1, srcR2, permP1);
521 srcP2 = vec_perm(srcR1, srcR2, permP2);
525 vec_u8_t srcR3 = vec_ld(30, src);
526 srcM2 = vec_perm(srcR1, srcR2, permM2);
527 srcM1 = vec_perm(srcR1, srcR2, permM1);
528 srcP0 = vec_perm(srcR1, srcR2, permP0);
529 srcP1 = vec_perm(srcR1, srcR2, permP1);
531 srcP3 = vec_perm(srcR2, srcR3, permP3);
534 vec_u8_t srcR3 = vec_ld(30, src);
535 srcM2 = vec_perm(srcR1, srcR2, permM2);
536 srcM1 = vec_perm(srcR1, srcR2, permM1);
537 srcP0 = vec_perm(srcR1, srcR2, permP0);
539 srcP2 = vec_perm(srcR2, srcR3, permP2);
540 srcP3 = vec_perm(srcR2, srcR3, permP3);
543 vec_u8_t srcR3 = vec_ld(30, src);
544 srcM2 = vec_perm(srcR1, srcR2, permM2);
545 srcM1 = vec_perm(srcR1, srcR2, permM1);
547 srcP1 = vec_perm(srcR2, srcR3, permP1);
548 srcP2 = vec_perm(srcR2, srcR3, permP2);
549 srcP3 = vec_perm(srcR2, srcR3, permP3);
552 vec_u8_t srcR3 = vec_ld(30, src);
553 srcM2 = vec_perm(srcR1, srcR2, permM2);
555 srcP0 = vec_perm(srcR2, srcR3, permP0);
556 srcP1 = vec_perm(srcR2, srcR3, permP1);
557 srcP2 = vec_perm(srcR2, srcR3, permP2);
558 srcP3 = vec_perm(srcR2, srcR3, permP3);
562 srcP0A = (vec_s16_t) vec_mergeh(zero_u8v, srcP0);
563 srcP0B = (vec_s16_t) vec_mergel(zero_u8v, srcP0);
564 srcP1A = (vec_s16_t) vec_mergeh(zero_u8v, srcP1);
565 srcP1B = (vec_s16_t) vec_mergel(zero_u8v, srcP1);
567 srcP2A = (vec_s16_t) vec_mergeh(zero_u8v, srcP2);
568 srcP2B = (vec_s16_t) vec_mergel(zero_u8v, srcP2);
569 srcP3A = (vec_s16_t) vec_mergeh(zero_u8v, srcP3);
570 srcP3B = (vec_s16_t) vec_mergel(zero_u8v, srcP3);
572 srcM1A = (vec_s16_t) vec_mergeh(zero_u8v, srcM1);
573 srcM1B = (vec_s16_t) vec_mergel(zero_u8v, srcM1);
574 srcM2A = (vec_s16_t) vec_mergeh(zero_u8v, srcM2);
575 srcM2B = (vec_s16_t) vec_mergel(zero_u8v, srcM2);
577 sum1A = vec_adds(srcP0A, srcP1A);
578 sum1B = vec_adds(srcP0B, srcP1B);
579 sum2A = vec_adds(srcM1A, srcP2A);
580 sum2B = vec_adds(srcM1B, srcP2B);
581 sum3A = vec_adds(srcM2A, srcP3A);
582 sum3B = vec_adds(srcM2B, srcP3B);
584 pp1A = vec_mladd(sum1A, v20ss, sum3A);
585 pp1B = vec_mladd(sum1B, v20ss, sum3B);
587 pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
588 pp2B = vec_mladd(sum2B, v5ss, zero_s16v);
590 psumA = vec_sub(pp1A, pp2A);
591 psumB = vec_sub(pp1B, pp2B);
593 vec_st(psumA, 0, tmp);
594 vec_st(psumB, 16, tmp);
597 tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */
600 tmpM2ssA = vec_ld(0, tmpbis);
601 tmpM2ssB = vec_ld(16, tmpbis);
603 tmpM1ssA = vec_ld(0, tmpbis);
604 tmpM1ssB = vec_ld(16, tmpbis);
606 tmpP0ssA = vec_ld(0, tmpbis);
607 tmpP0ssB = vec_ld(16, tmpbis);
609 tmpP1ssA = vec_ld(0, tmpbis);
610 tmpP1ssB = vec_ld(16, tmpbis);
612 tmpP2ssA = vec_ld(0, tmpbis);
613 tmpP2ssB = vec_ld(16, tmpbis);
616 for (i = 0 ; i < 16 ; i++) {
617 const vec_s16_t tmpP3ssA = vec_ld(0, tmpbis);
618 const vec_s16_t tmpP3ssB = vec_ld(16, tmpbis);
620 const vec_s16_t sum1A = vec_adds(tmpP0ssA, tmpP1ssA);
621 const vec_s16_t sum1B = vec_adds(tmpP0ssB, tmpP1ssB);
622 const vec_s16_t sum2A = vec_adds(tmpM1ssA, tmpP2ssA);
623 const vec_s16_t sum2B = vec_adds(tmpM1ssB, tmpP2ssB);
624 const vec_s16_t sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
625 const vec_s16_t sum3B = vec_adds(tmpM2ssB, tmpP3ssB);
640 pp1Ae = vec_mule(sum1A, v20ss);
641 pp1Ao = vec_mulo(sum1A, v20ss);
642 pp1Be = vec_mule(sum1B, v20ss);
643 pp1Bo = vec_mulo(sum1B, v20ss);
645 pp2Ae = vec_mule(sum2A, v5ss);
646 pp2Ao = vec_mulo(sum2A, v5ss);
647 pp2Be = vec_mule(sum2B, v5ss);
648 pp2Bo = vec_mulo(sum2B, v5ss);
650 pp3Ae = vec_sra((vec_s32_t)sum3A, v16ui);
651 pp3Ao = vec_mulo(sum3A, v1ss);
652 pp3Be = vec_sra((vec_s32_t)sum3B, v16ui);
653 pp3Bo = vec_mulo(sum3B, v1ss);
655 pp1cAe = vec_add(pp1Ae, v512si);
656 pp1cAo = vec_add(pp1Ao, v512si);
657 pp1cBe = vec_add(pp1Be, v512si);
658 pp1cBo = vec_add(pp1Bo, v512si);
660 pp32Ae = vec_sub(pp3Ae, pp2Ae);
661 pp32Ao = vec_sub(pp3Ao, pp2Ao);
662 pp32Be = vec_sub(pp3Be, pp2Be);
663 pp32Bo = vec_sub(pp3Bo, pp2Bo);
665 sumAe = vec_add(pp1cAe, pp32Ae);
666 sumAo = vec_add(pp1cAo, pp32Ao);
667 sumBe = vec_add(pp1cBe, pp32Be);
668 sumBo = vec_add(pp1cBo, pp32Bo);
670 ssumAe = vec_sra(sumAe, v10ui);
671 ssumAo = vec_sra(sumAo, v10ui);
672 ssumBe = vec_sra(sumBe, v10ui);
673 ssumBo = vec_sra(sumBo, v10ui);
675 ssume = vec_packs(ssumAe, ssumBe);
676 ssumo = vec_packs(ssumAo, ssumBo);
678 sumv = vec_packsu(ssume, ssumo);
679 sum = vec_perm(sumv, sumv, mperm);
682 vdst = vec_ld(0, dst);
684 OP_U8_ALTIVEC(fsum, sum, vdst);
686 vec_st(fsum, 0, dst);
690 POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);