437c7d06a451ea5b73f83dede924368446476886
[x262.git] / common / ppc / mc.c
1 /*****************************************************************************
2  * mc.c: ppc motion compensation
3  *****************************************************************************
4  * Copyright (C) 2003-2013 x264 project
5  *
6  * Authors: Eric Petit <eric.petit@lapsus.org>
7  *          Guillaume Poirier <gpoirier@mplayerhq.hu>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
22  *
23  * This program is also available under a commercial proprietary license.
24  * For more information, contact us at licensing@x264.com.
25  *****************************************************************************/
26
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <string.h>
30 #include <stdint.h>
31 #include <stdarg.h>
32
33 #include "x264.h"
34 #include "common/common.h"
35 #include "common/mc.h"
36 #include "mc.h"
37 #include "ppccommon.h"
38
39 #if !HIGH_BIT_DEPTH
40 typedef void (*pf_mc_t)( uint8_t *src, intptr_t i_src,
41                          uint8_t *dst, intptr_t i_dst, int i_height );
42
43
44 static const uint8_t hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
45 static const uint8_t hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
46
47
48 static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
49 {
50     return pix[-2*i_pix_next] - 5*pix[-1*i_pix_next] + 20*(pix[0] +
51            pix[1*i_pix_next]) - 5*pix[ 2*i_pix_next] +
52            pix[ 3*i_pix_next];
53 }
54 static inline int x264_tapfilter1( uint8_t *pix )
55 {
56     return pix[-2] - 5*pix[-1] + 20*(pix[0] + pix[1]) - 5*pix[ 2] +
57            pix[ 3];
58 }
59
60
61 static inline void x264_pixel_avg2_w4_altivec( uint8_t *dst,  intptr_t i_dst,
62                                                uint8_t *src1, intptr_t i_src1,
63                                                uint8_t *src2, int i_height )
64 {
65     for( int y = 0; y < i_height; y++ )
66     {
67         for( int x = 0; x < 4; x++ )
68             dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
69         dst  += i_dst;
70         src1 += i_src1;
71         src2 += i_src1;
72     }
73 }
74
75 static inline void x264_pixel_avg2_w8_altivec( uint8_t *dst,  intptr_t i_dst,
76                                                uint8_t *src1, intptr_t i_src1,
77                                                uint8_t *src2, int i_height )
78 {
79     vec_u8_t src1v, src2v;
80     PREP_LOAD;
81     PREP_STORE8;
82     PREP_LOAD_SRC( src1 );
83     PREP_LOAD_SRC( src2 );
84
85     for( int y = 0; y < i_height; y++ )
86     {
87         VEC_LOAD( src1, src1v, 8, vec_u8_t, src1 );
88         VEC_LOAD( src2, src2v, 8, vec_u8_t, src2 );
89         src1v = vec_avg( src1v, src2v );
90         VEC_STORE8( src1v, dst );
91
92         dst  += i_dst;
93         src1 += i_src1;
94         src2 += i_src1;
95     }
96 }
97
98 static inline void x264_pixel_avg2_w16_altivec( uint8_t *dst,  intptr_t i_dst,
99                                                 uint8_t *src1, intptr_t i_src1,
100                                                 uint8_t *src2, int i_height )
101 {
102     vec_u8_t src1v, src2v;
103     PREP_LOAD;
104     PREP_LOAD_SRC( src1 );
105     PREP_LOAD_SRC( src2 );
106
107     for( int y = 0; y < i_height; y++ )
108     {
109         VEC_LOAD( src1, src1v, 16, vec_u8_t, src1 );
110         VEC_LOAD( src2, src2v, 16, vec_u8_t, src2 );
111         src1v = vec_avg( src1v, src2v );
112         vec_st(src1v, 0, dst);
113
114         dst  += i_dst;
115         src1 += i_src1;
116         src2 += i_src1;
117     }
118 }
119
120 static inline void x264_pixel_avg2_w20_altivec( uint8_t *dst,  intptr_t i_dst,
121                                                 uint8_t *src1, intptr_t i_src1,
122                                                 uint8_t *src2, int i_height )
123 {
124     x264_pixel_avg2_w16_altivec(dst, i_dst, src1, i_src1, src2, i_height);
125     x264_pixel_avg2_w4_altivec(dst+16, i_dst, src1+16, i_src1, src2+16, i_height);
126 }
127
128 /* mc_copy: plain c */
129
130 #define MC_COPY( name, a )                                \
131 static void name( uint8_t *dst, intptr_t i_dst,           \
132                   uint8_t *src, intptr_t i_src, int i_height ) \
133 {                                                         \
134     int y;                                                \
135     for( y = 0; y < i_height; y++ )                       \
136     {                                                     \
137         memcpy( dst, src, a );                            \
138         src += i_src;                                     \
139         dst += i_dst;                                     \
140     }                                                     \
141 }
142 MC_COPY( x264_mc_copy_w4_altivec,  4  )
143 MC_COPY( x264_mc_copy_w8_altivec,  8  )
144
145 static void x264_mc_copy_w16_altivec( uint8_t *dst, intptr_t i_dst,
146                                       uint8_t *src, intptr_t i_src, int i_height )
147 {
148     vec_u8_t cpyV;
149     PREP_LOAD;
150     PREP_LOAD_SRC( src );
151
152     for( int y = 0; y < i_height; y++ )
153     {
154         VEC_LOAD( src, cpyV, 16, vec_u8_t, src );
155         vec_st(cpyV, 0, dst);
156
157         src += i_src;
158         dst += i_dst;
159     }
160 }
161
162
163 static void x264_mc_copy_w16_aligned_altivec( uint8_t *dst, intptr_t i_dst,
164                                               uint8_t *src, intptr_t i_src, int i_height )
165 {
166     for( int y = 0; y < i_height; ++y )
167     {
168         vec_u8_t cpyV = vec_ld( 0, src );
169         vec_st(cpyV, 0, dst);
170
171         src += i_src;
172         dst += i_dst;
173     }
174 }
175
176
177 static void mc_luma_altivec( uint8_t *dst,    intptr_t i_dst_stride,
178                              uint8_t *src[4], intptr_t i_src_stride,
179                              int mvx, int mvy,
180                              int i_width, int i_height, const x264_weight_t *weight )
181 {
182     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
183     intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
184     uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
185     if( qpel_idx & 5 ) /* qpel interpolation needed */
186     {
187         uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
188
189         switch( i_width )
190         {
191             case 4:
192                 x264_pixel_avg2_w4_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
193                 break;
194             case 8:
195                 x264_pixel_avg2_w8_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
196                 break;
197             case 16:
198             default:
199                 x264_pixel_avg2_w16_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
200         }
201         if( weight->weightfn )
202             weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );
203     }
204     else if( weight->weightfn )
205         weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );
206     else
207     {
208         switch( i_width )
209         {
210             case 4:
211                 x264_mc_copy_w4_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
212                 break;
213             case 8:
214                 x264_mc_copy_w8_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
215                 break;
216             case 16:
217                 x264_mc_copy_w16_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
218                 break;
219         }
220     }
221 }
222
223
224
225 static uint8_t *get_ref_altivec( uint8_t *dst,   intptr_t *i_dst_stride,
226                                  uint8_t *src[4], intptr_t i_src_stride,
227                                  int mvx, int mvy,
228                                  int i_width, int i_height, const x264_weight_t *weight )
229 {
230     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
231     intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
232     uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
233     if( qpel_idx & 5 ) /* qpel interpolation needed */
234     {
235         uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
236         switch( i_width )
237         {
238             case 4:
239                 x264_pixel_avg2_w4_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
240                 break;
241             case 8:
242                 x264_pixel_avg2_w8_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
243                 break;
244             case 12:
245             case 16:
246             default:
247                 x264_pixel_avg2_w16_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
248                 break;
249             case 20:
250                 x264_pixel_avg2_w20_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
251                 break;
252         }
253         if( weight->weightfn )
254             weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );
255         return dst;
256     }
257     else if( weight->weightfn )
258     {
259         weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );
260         return dst;
261     }
262     else
263     {
264         *i_dst_stride = i_src_stride;
265         return src1;
266     }
267 }
268
269 static void mc_chroma_2xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
270                            uint8_t *src, intptr_t i_src_stride,
271                            int mvx, int mvy, int i_height )
272 {
273     uint8_t *srcp;
274     int d8x = mvx&0x07;
275     int d8y = mvy&0x07;
276
277     int cA = (8-d8x)*(8-d8y);
278     int cB = d8x    *(8-d8y);
279     int cC = (8-d8x)*d8y;
280     int cD = d8x    *d8y;
281
282     src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
283     srcp = &src[i_src_stride];
284
285     for( int y = 0; y < i_height; y++ )
286     {
287         dstu[0] = ( cA*src[0] + cB*src[2] + cC*srcp[0] + cD*srcp[2] + 32 ) >> 6;
288         dstv[0] = ( cA*src[1] + cB*src[3] + cC*srcp[1] + cD*srcp[3] + 32 ) >> 6;
289         dstu[1] = ( cA*src[2] + cB*src[4] + cC*srcp[2] + cD*srcp[4] + 32 ) >> 6;
290         dstv[1] = ( cA*src[3] + cB*src[5] + cC*srcp[3] + cD*srcp[5] + 32 ) >> 6;
291
292         src  += i_src_stride;
293         srcp += i_src_stride;
294         dstu += i_dst_stride;
295         dstv += i_dst_stride;
296     }
297  }
298
299 static void mc_chroma_altivec_4xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
300                                    uint8_t *src, intptr_t i_src_stride,
301                                    int mvx, int mvy, int i_height )
302 {
303     uint8_t *srcp;
304     int d8x = mvx & 0x07;
305     int d8y = mvy & 0x07;
306
307     ALIGNED_16( uint16_t coeff[4] );
308     coeff[0] = (8-d8x)*(8-d8y);
309     coeff[1] = d8x    *(8-d8y);
310     coeff[2] = (8-d8x)*d8y;
311     coeff[3] = d8x    *d8y;
312
313     src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
314     srcp = &src[i_src_stride];
315
316     LOAD_ZERO;
317     PREP_LOAD;
318     PREP_LOAD_SRC( src );
319     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
320     vec_u8_t    src2v_8, dstuv, dstvv;
321     vec_u16_t   src0v_16, src1v_16, src2v_16, src3v_16, dstv16;
322     vec_u16_t   shiftv, k32v;
323
324     static const vec_u8_t perm0v = CV(1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13);
325     static const vec_u8_t perm1v = CV(3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15);
326
327     coeff0v = vec_ld( 0, coeff );
328     coeff3v = vec_splat( coeff0v, 3 );
329     coeff2v = vec_splat( coeff0v, 2 );
330     coeff1v = vec_splat( coeff0v, 1 );
331     coeff0v = vec_splat( coeff0v, 0 );
332     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
333     shiftv  = vec_splat_u16( 6 );
334
335     VEC_LOAD( src, src2v_8, 9, vec_u8_t, src );
336     src2v_16 = vec_u8_to_u16( src2v_8 );
337     src3v_16 = vec_u8_to_u16( vec_sld( src2v_8, src2v_8, 2 ) );
338
339     for( int y = 0; y < i_height; y += 2 )
340     {
341         src0v_16 = src2v_16;
342         src1v_16 = src3v_16;
343         VEC_LOAD( srcp, src2v_8, 9, vec_u8_t, src );
344         src2v_16 = vec_u8_to_u16( src2v_8 );
345         src3v_16 = vec_u8_to_u16( vec_sld( src2v_8, src2v_8, 2 ) );
346
347         dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
348         dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
349         dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
350         dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
351
352         dstv16 = vec_sr( dstv16, shiftv );
353
354         dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
355         dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
356         vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
357         vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
358
359         srcp += i_src_stride;
360         dstu += i_dst_stride;
361         dstv += i_dst_stride;
362
363         src0v_16 = src2v_16;
364         src1v_16 = src3v_16;
365         VEC_LOAD( srcp, src2v_8, 9, vec_u8_t, src );
366         src2v_16 = vec_u8_to_u16( src2v_8 );
367         src3v_16 = vec_u8_to_u16( vec_sld( src2v_8, src2v_8, 2 ) );
368
369         dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
370         dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
371         dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
372         dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
373
374         dstv16 = vec_sr( dstv16, shiftv );
375
376         dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
377         dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
378         vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
379         vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
380
381         srcp += i_src_stride;
382         dstu += i_dst_stride;
383         dstv += i_dst_stride;
384     }
385 }
386
387 static void mc_chroma_altivec_8xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
388                                    uint8_t *src, intptr_t i_src_stride,
389                                    int mvx, int mvy, int i_height )
390 {
391     uint8_t *srcp;
392     int d8x = mvx & 0x07;
393     int d8y = mvy & 0x07;
394
395     ALIGNED_16( uint16_t coeff[4] );
396     coeff[0] = (8-d8x)*(8-d8y);
397     coeff[1] = d8x    *(8-d8y);
398     coeff[2] = (8-d8x)*d8y;
399     coeff[3] = d8x    *d8y;
400
401     src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
402     srcp = &src[i_src_stride];
403
404     LOAD_ZERO;
405     PREP_LOAD;
406     PREP_LOAD_SRC( src );
407     PREP_STORE8;
408     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
409     vec_u8_t    src0v_8, src1v_8, src2v_8, src3v_8;
410     vec_u8_t    dstuv, dstvv;
411     vec_u16_t   src0v_16h, src1v_16h, src2v_16h, src3v_16h, dstv_16h;
412     vec_u16_t   src0v_16l, src1v_16l, src2v_16l, src3v_16l, dstv_16l;
413     vec_u16_t   shiftv, k32v;
414
415     coeff0v = vec_ld( 0, coeff );
416     coeff3v = vec_splat( coeff0v, 3 );
417     coeff2v = vec_splat( coeff0v, 2 );
418     coeff1v = vec_splat( coeff0v, 1 );
419     coeff0v = vec_splat( coeff0v, 0 );
420     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
421     shiftv  = vec_splat_u16( 6 );
422
423     static const vec_u8_t perm0v = CV(1,5,9,13,17,21,25,29,0,0,0,0,0,0,0,0);
424     static const vec_u8_t perm1v = CV(3,7,11,15,19,23,27,31,0,0,0,0,0,0,0,0);
425
426     VEC_LOAD( src, src2v_8, 16, vec_u8_t, src );
427     VEC_LOAD( src+16, src3v_8, 2, vec_u8_t, src );
428     src3v_8 = vec_sld( src2v_8, src3v_8, 2 );
429
430     for( int y = 0; y < i_height; y += 2 )
431     {
432         src0v_8 = src2v_8;
433         src1v_8 = src3v_8;
434         VEC_LOAD( srcp, src2v_8, 16, vec_u8_t, src );
435         VEC_LOAD( srcp+16, src3v_8, 2, vec_u8_t, src );
436
437         src3v_8 = vec_sld( src2v_8, src3v_8, 2 );
438
439         src0v_16h = vec_u8_to_u16_h( src0v_8 );
440         src0v_16l = vec_u8_to_u16_l( src0v_8 );
441         src1v_16h = vec_u8_to_u16_h( src1v_8 );
442         src1v_16l = vec_u8_to_u16_l( src1v_8 );
443         src2v_16h = vec_u8_to_u16_h( src2v_8 );
444         src2v_16l = vec_u8_to_u16_l( src2v_8 );
445         src3v_16h = vec_u8_to_u16_h( src3v_8 );
446         src3v_16l = vec_u8_to_u16_l( src3v_8 );
447
448         dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
449         dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
450         dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
451         dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
452         dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
453         dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
454         dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
455         dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
456
457         dstv_16h = vec_sr( dstv_16h, shiftv );
458         dstv_16l = vec_sr( dstv_16l, shiftv );
459
460         dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
461         dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
462
463         VEC_STORE8( dstuv, dstu );
464         VEC_STORE8( dstvv, dstv );
465
466         srcp += i_src_stride;
467         dstu += i_dst_stride;
468         dstv += i_dst_stride;
469
470         src0v_8 = src2v_8;
471         src1v_8 = src3v_8;
472         VEC_LOAD( srcp, src2v_8, 16, vec_u8_t, src );
473         VEC_LOAD( srcp+16, src3v_8, 2, vec_u8_t, src );
474
475         src3v_8 = vec_sld( src2v_8, src3v_8, 2 );
476
477         src0v_16h = vec_u8_to_u16_h( src0v_8 );
478         src0v_16l = vec_u8_to_u16_l( src0v_8 );
479         src1v_16h = vec_u8_to_u16_h( src1v_8 );
480         src1v_16l = vec_u8_to_u16_l( src1v_8 );
481         src2v_16h = vec_u8_to_u16_h( src2v_8 );
482         src2v_16l = vec_u8_to_u16_l( src2v_8 );
483         src3v_16h = vec_u8_to_u16_h( src3v_8 );
484         src3v_16l = vec_u8_to_u16_l( src3v_8 );
485
486         dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
487         dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
488         dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
489         dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
490         dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
491         dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
492         dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
493         dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
494
495         dstv_16h = vec_sr( dstv_16h, shiftv );
496         dstv_16l = vec_sr( dstv_16l, shiftv );
497
498         dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
499         dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
500
501         VEC_STORE8( dstuv, dstu );
502         VEC_STORE8( dstvv, dstv );
503
504         srcp += i_src_stride;
505         dstu += i_dst_stride;
506         dstv += i_dst_stride;
507     }
508 }
509
510 static void mc_chroma_altivec( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
511                                uint8_t *src, intptr_t i_src_stride,
512                                int mvx, int mvy, int i_width, int i_height )
513 {
514     if( i_width == 8 )
515         mc_chroma_altivec_8xh( dstu, dstv, i_dst_stride, src, i_src_stride,
516                                mvx, mvy, i_height );
517     else if( i_width == 4 )
518         mc_chroma_altivec_4xh( dstu, dstv, i_dst_stride, src, i_src_stride,
519                                mvx, mvy, i_height );
520     else
521         mc_chroma_2xh( dstu, dstv, i_dst_stride, src, i_src_stride,
522                        mvx, mvy, i_height );
523 }
524
525 #define HPEL_FILTER_1( t1v, t2v, t3v, t4v, t5v, t6v ) \
526 {                                                     \
527     t1v = vec_add( t1v, t6v );                        \
528     t2v = vec_add( t2v, t5v );                        \
529     t3v = vec_add( t3v, t4v );                        \
530                                                       \
531     t1v = vec_sub( t1v, t2v );   /* (a-b) */          \
532     t2v = vec_sub( t2v, t3v );   /* (b-c) */          \
533     t2v = vec_sl(  t2v, twov );  /* (b-c)*4 */        \
534     t1v = vec_sub( t1v, t2v );   /* a-5*b+4*c */      \
535     t3v = vec_sl(  t3v, fourv ); /* 16*c */           \
536     t1v = vec_add( t1v, t3v );   /* a-5*b+20*c */     \
537 }
538
539 #define HPEL_FILTER_2( t1v, t2v, t3v, t4v, t5v, t6v ) \
540 {                                                     \
541     t1v = vec_add( t1v, t6v );                        \
542     t2v = vec_add( t2v, t5v );                        \
543     t3v = vec_add( t3v, t4v );                        \
544                                                       \
545     t1v = vec_sub( t1v, t2v );  /* (a-b) */           \
546     t1v = vec_sra( t1v, twov ); /* (a-b)/4 */         \
547     t1v = vec_sub( t1v, t2v );  /* (a-b)/4-b */       \
548     t1v = vec_add( t1v, t3v );  /* (a-b)/4-b+c */     \
549     t1v = vec_sra( t1v, twov ); /* ((a-b)/4-b+c)/4 */ \
550     t1v = vec_add( t1v, t3v );  /* ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16 */ \
551 }
552
553 #define HPEL_FILTER_HORIZONTAL()                             \
554 {                                                            \
555     VEC_LOAD_G( &src[x- 2+i_stride*y], src1v, 16, vec_u8_t); \
556     VEC_LOAD_G( &src[x+14+i_stride*y], src6v, 16, vec_u8_t); \
557                                                              \
558     src2v = vec_sld( src1v, src6v,  1 );                     \
559     src3v = vec_sld( src1v, src6v,  2 );                     \
560     src4v = vec_sld( src1v, src6v,  3 );                     \
561     src5v = vec_sld( src1v, src6v,  4 );                     \
562     src6v = vec_sld( src1v, src6v,  5 );                     \
563                                                              \
564     temp1v = vec_u8_to_s16_h( src1v );                       \
565     temp2v = vec_u8_to_s16_h( src2v );                       \
566     temp3v = vec_u8_to_s16_h( src3v );                       \
567     temp4v = vec_u8_to_s16_h( src4v );                       \
568     temp5v = vec_u8_to_s16_h( src5v );                       \
569     temp6v = vec_u8_to_s16_h( src6v );                       \
570                                                              \
571     HPEL_FILTER_1( temp1v, temp2v, temp3v,                   \
572                    temp4v, temp5v, temp6v );                 \
573                                                              \
574     dest1v = vec_add( temp1v, sixteenv );                    \
575     dest1v = vec_sra( dest1v, fivev );                       \
576                                                              \
577     temp1v = vec_u8_to_s16_l( src1v );                       \
578     temp2v = vec_u8_to_s16_l( src2v );                       \
579     temp3v = vec_u8_to_s16_l( src3v );                       \
580     temp4v = vec_u8_to_s16_l( src4v );                       \
581     temp5v = vec_u8_to_s16_l( src5v );                       \
582     temp6v = vec_u8_to_s16_l( src6v );                       \
583                                                              \
584     HPEL_FILTER_1( temp1v, temp2v, temp3v,                   \
585                    temp4v, temp5v, temp6v );                 \
586                                                              \
587     dest2v = vec_add( temp1v, sixteenv );                    \
588     dest2v = vec_sra( dest2v, fivev );                       \
589                                                              \
590     destv = vec_packsu( dest1v, dest2v );                    \
591                                                              \
592     VEC_STORE16( destv, &dsth[x+i_stride*y], dsth );         \
593 }
594
595 #define HPEL_FILTER_VERTICAL()                                    \
596 {                                                                 \
597     VEC_LOAD( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src ); \
598     VEC_LOAD( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src ); \
599     VEC_LOAD( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src ); \
600     VEC_LOAD( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src ); \
601     VEC_LOAD( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src ); \
602     VEC_LOAD( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src ); \
603                                                                   \
604     temp1v = vec_u8_to_s16_h( src1v );                            \
605     temp2v = vec_u8_to_s16_h( src2v );                            \
606     temp3v = vec_u8_to_s16_h( src3v );                            \
607     temp4v = vec_u8_to_s16_h( src4v );                            \
608     temp5v = vec_u8_to_s16_h( src5v );                            \
609     temp6v = vec_u8_to_s16_h( src6v );                            \
610                                                                   \
611     HPEL_FILTER_1( temp1v, temp2v, temp3v,                        \
612                    temp4v, temp5v, temp6v );                      \
613                                                                   \
614     dest1v = vec_add( temp1v, sixteenv );                         \
615     dest1v = vec_sra( dest1v, fivev );                            \
616                                                                   \
617     temp4v = vec_u8_to_s16_l( src1v );                            \
618     temp5v = vec_u8_to_s16_l( src2v );                            \
619     temp6v = vec_u8_to_s16_l( src3v );                            \
620     temp7v = vec_u8_to_s16_l( src4v );                            \
621     temp8v = vec_u8_to_s16_l( src5v );                            \
622     temp9v = vec_u8_to_s16_l( src6v );                            \
623                                                                   \
624     HPEL_FILTER_1( temp4v, temp5v, temp6v,                        \
625                    temp7v, temp8v, temp9v );                      \
626                                                                   \
627     dest2v = vec_add( temp4v, sixteenv );                         \
628     dest2v = vec_sra( dest2v, fivev );                            \
629                                                                   \
630     destv = vec_packsu( dest1v, dest2v );                         \
631                                                                   \
632     VEC_STORE16( destv, &dstv[x+i_stride*y], dsth );              \
633 }
634
635 #define HPEL_FILTER_CENTRAL()                           \
636 {                                                       \
637     temp1v = vec_sld( tempav, tempbv, 12 );             \
638     temp2v = vec_sld( tempav, tempbv, 14 );             \
639     temp3v = tempbv;                                    \
640     temp4v = vec_sld( tempbv, tempcv,  2 );             \
641     temp5v = vec_sld( tempbv, tempcv,  4 );             \
642     temp6v = vec_sld( tempbv, tempcv,  6 );             \
643                                                         \
644     HPEL_FILTER_2( temp1v, temp2v, temp3v,              \
645                    temp4v, temp5v, temp6v );            \
646                                                         \
647     dest1v = vec_add( temp1v, thirtytwov );             \
648     dest1v = vec_sra( dest1v, sixv );                   \
649                                                         \
650     temp1v = vec_sld( tempbv, tempcv, 12 );             \
651     temp2v = vec_sld( tempbv, tempcv, 14 );             \
652     temp3v = tempcv;                                    \
653     temp4v = vec_sld( tempcv, tempdv,  2 );             \
654     temp5v = vec_sld( tempcv, tempdv,  4 );             \
655     temp6v = vec_sld( tempcv, tempdv,  6 );             \
656                                                         \
657     HPEL_FILTER_2( temp1v, temp2v, temp3v,              \
658                    temp4v, temp5v, temp6v );            \
659                                                         \
660     dest2v = vec_add( temp1v, thirtytwov );             \
661     dest2v = vec_sra( dest2v, sixv );                   \
662                                                         \
663     destv = vec_packsu( dest1v, dest2v );               \
664                                                         \
665     VEC_STORE16( destv, &dstc[x-16+i_stride*y], dsth ); \
666 }
667
668 void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
669                                intptr_t i_stride, int i_width, int i_height, int16_t *buf )
670 {
671     vec_u8_t destv;
672     vec_u8_t src1v, src2v, src3v, src4v, src5v, src6v;
673     vec_s16_t dest1v, dest2v;
674     vec_s16_t temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, temp8v, temp9v;
675     vec_s16_t tempav, tempbv, tempcv, tempdv, tempev;
676
677     PREP_LOAD;
678     PREP_LOAD_SRC( src);
679     PREP_STORE16;
680     PREP_STORE16_DST( dsth );
681     LOAD_ZERO;
682
683     vec_u16_t twov, fourv, fivev, sixv;
684     vec_s16_t sixteenv, thirtytwov;
685     vec_u16_u temp_u;
686
687     temp_u.s[0]=2;
688     twov = vec_splat( temp_u.v, 0 );
689     temp_u.s[0]=4;
690     fourv = vec_splat( temp_u.v, 0 );
691     temp_u.s[0]=5;
692     fivev = vec_splat( temp_u.v, 0 );
693     temp_u.s[0]=6;
694     sixv = vec_splat( temp_u.v, 0 );
695     temp_u.s[0]=16;
696     sixteenv = (vec_s16_t)vec_splat( temp_u.v, 0 );
697     temp_u.s[0]=32;
698     thirtytwov = (vec_s16_t)vec_splat( temp_u.v, 0 );
699
700     for( int y = 0; y < i_height; y++ )
701     {
702         int x = 0;
703
704         /* horizontal_filter */
705         HPEL_FILTER_HORIZONTAL();
706
707         /* vertical_filter */
708         HPEL_FILTER_VERTICAL();
709
710         /* central_filter */
711         tempav = tempcv;
712         tempbv = tempdv;
713         tempcv = vec_splat( temp1v, 0 ); /* first only */
714         tempdv = temp1v;
715         tempev = temp4v;
716
717         for( x = 16; x < i_width; x+=16 )
718         {
719             /* horizontal_filter */
720             HPEL_FILTER_HORIZONTAL();
721
722             /* vertical_filter */
723             HPEL_FILTER_VERTICAL();
724
725             /* central_filter */
726             tempav = tempcv;
727             tempbv = tempdv;
728             tempcv = tempev;
729             tempdv = temp1v;
730             tempev = temp4v;
731
732             HPEL_FILTER_CENTRAL();
733         }
734
735         /* Partial vertical filter */
736         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src );
737         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src );
738         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src );
739         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src );
740         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src );
741         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src );
742
743         temp1v = vec_u8_to_s16_h( src1v );
744         temp2v = vec_u8_to_s16_h( src2v );
745         temp3v = vec_u8_to_s16_h( src3v );
746         temp4v = vec_u8_to_s16_h( src4v );
747         temp5v = vec_u8_to_s16_h( src5v );
748         temp6v = vec_u8_to_s16_h( src6v );
749
750         HPEL_FILTER_1( temp1v, temp2v, temp3v, temp4v, temp5v, temp6v );
751
752         /* central_filter */
753         tempav = tempcv;
754         tempbv = tempdv;
755         tempcv = tempev;
756         tempdv = temp1v;
757         /* tempev is not used */
758
759         HPEL_FILTER_CENTRAL();
760     }
761 }
762
763 static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
764                                             intptr_t src_stride, intptr_t dst_stride, int width, int height )
765 {
766     int w = width >> 4;
767     int end = (width & 15);
768     vec_u8_t src0v, src1v, src2v;
769     vec_u8_t lv, hv, src1p1v;
770     vec_u8_t avg0v, avg1v, avghv, avghp1v, avgleftv, avgrightv;
771     static const vec_u8_t inverse_bridge_shuffle = CV(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E );
772
773     for( int y = 0; y < height; y++ )
774     {
775         int x;
776         uint8_t *src1 = src0+src_stride;
777         uint8_t *src2 = src1+src_stride;
778
779         src0v = vec_ld(0, src0);
780         src1v = vec_ld(0, src1);
781         src2v = vec_ld(0, src2);
782
783         avg0v = vec_avg(src0v, src1v);
784         avg1v = vec_avg(src1v, src2v);
785
786         for( x = 0; x < w; x++ )
787         {
788             lv = vec_ld(16*(x*2+1), src0);
789             src1v = vec_ld(16*(x*2+1), src1);
790             avghv = vec_avg(lv, src1v);
791
792             lv = vec_ld(16*(x*2+2), src0);
793             src1p1v = vec_ld(16*(x*2+2), src1);
794             avghp1v = vec_avg(lv, src1p1v);
795
796             avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
797             avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
798
799             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dst0);
800             vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dsth);
801
802             avg0v = avghp1v;
803
804             hv = vec_ld(16*(x*2+1), src2);
805             avghv = vec_avg(src1v, hv);
806
807             hv = vec_ld(16*(x*2+2), src2);
808             avghp1v = vec_avg(src1p1v, hv);
809
810             avgleftv = vec_avg(vec_sld(avg1v, avghv, 1), avg1v);
811             avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
812
813             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dstv);
814             vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dstc);
815
816             avg1v = avghp1v;
817
818         }
819         if( end )
820         {
821             lv = vec_ld(16*(x*2+1), src0);
822             src1v = vec_ld(16*(x*2+1), src1);
823             avghv = vec_avg(lv, src1v);
824
825             lv = vec_ld(16*(x*2+1), src2);
826             avghp1v = vec_avg(src1v, lv);
827
828             avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
829             avgrightv = vec_avg(vec_sld(avg1v, avghp1v, 1), avg1v);
830
831             lv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle);
832             hv = (vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv);
833
834             vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dst0);
835             vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dst0);
836             vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dsth);
837             vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dsth);
838
839             lv = vec_sld(lv, lv, 8);
840             hv = vec_sld(hv, hv, 8);
841
842             vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dstv);
843             vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dstv);
844             vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dstc);
845             vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dstc);
846         }
847
848         src0 += src_stride*2;
849         dst0 += dst_stride;
850         dsth += dst_stride;
851         dstv += dst_stride;
852         dstc += dst_stride;
853     }
854 }
855
856 static void mc_weight_w2_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
857                                   const x264_weight_t *weight, int i_height )
858 {
859     LOAD_ZERO;
860     PREP_LOAD;
861     PREP_LOAD_SRC( src );
862     vec_u8_t srcv;
863     vec_s16_t weightv;
864     vec_s16_t scalev, offsetv, denomv, roundv;
865     vec_s16_u loadv;
866
867     int denom = weight->i_denom;
868
869     loadv.s[0] = weight->i_scale;
870     scalev = vec_splat( loadv.v, 0 );
871
872     loadv.s[0] = weight->i_offset;
873     offsetv = vec_splat( loadv.v, 0 );
874
875     if( denom >= 1 )
876     {
877         loadv.s[0] = denom;
878         denomv = vec_splat( loadv.v, 0 );
879
880         loadv.s[0] = 1<<(denom - 1);
881         roundv = vec_splat( loadv.v, 0 );
882
883         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
884         {
885             VEC_LOAD( src, srcv, 2, vec_u8_t, src );
886             weightv = vec_u8_to_s16( srcv );
887
888             weightv = vec_mladd( weightv, scalev, roundv );
889             weightv = vec_sra( weightv, (vec_u16_t)denomv );
890             weightv = vec_add( weightv, offsetv );
891
892             srcv = vec_packsu( weightv, zero_s16v );
893             vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
894         }
895     }
896     else
897     {
898         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
899         {
900             VEC_LOAD( src, srcv, 2, vec_u8_t, src );
901             weightv = vec_u8_to_s16( srcv );
902
903             weightv = vec_mladd( weightv, scalev, offsetv );
904
905             srcv = vec_packsu( weightv, zero_s16v );
906             vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
907         }
908     }
909 }
910 static void mc_weight_w4_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
911                                   const x264_weight_t *weight, int i_height )
912 {
913     LOAD_ZERO;
914     PREP_LOAD;
915     PREP_LOAD_SRC( src );
916     vec_u8_t srcv;
917     vec_s16_t weightv;
918     vec_s16_t scalev, offsetv, denomv, roundv;
919     vec_s16_u loadv;
920
921     int denom = weight->i_denom;
922
923     loadv.s[0] = weight->i_scale;
924     scalev = vec_splat( loadv.v, 0 );
925
926     loadv.s[0] = weight->i_offset;
927     offsetv = vec_splat( loadv.v, 0 );
928
929     if( denom >= 1 )
930     {
931         loadv.s[0] = denom;
932         denomv = vec_splat( loadv.v, 0 );
933
934         loadv.s[0] = 1<<(denom - 1);
935         roundv = vec_splat( loadv.v, 0 );
936
937         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
938         {
939             VEC_LOAD( src, srcv, 4, vec_u8_t, src );
940             weightv = vec_u8_to_s16( srcv );
941
942             weightv = vec_mladd( weightv, scalev, roundv );
943             weightv = vec_sra( weightv, (vec_u16_t)denomv );
944             weightv = vec_add( weightv, offsetv );
945
946             srcv = vec_packsu( weightv, zero_s16v );
947             vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
948         }
949     }
950     else
951     {
952         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
953         {
954             VEC_LOAD( src, srcv, 4, vec_u8_t, src );
955             weightv = vec_u8_to_s16( srcv );
956
957             weightv = vec_mladd( weightv, scalev, offsetv );
958
959             srcv = vec_packsu( weightv, zero_s16v );
960             vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
961         }
962     }
963 }
964 static void mc_weight_w8_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
965                                   const x264_weight_t *weight, int i_height )
966 {
967     LOAD_ZERO;
968     PREP_LOAD;
969     PREP_LOAD_SRC( src );
970     PREP_STORE8;
971     vec_u8_t srcv;
972     vec_s16_t weightv;
973     vec_s16_t scalev, offsetv, denomv, roundv;
974     vec_s16_u loadv;
975
976     int denom = weight->i_denom;
977
978     loadv.s[0] = weight->i_scale;
979     scalev = vec_splat( loadv.v, 0 );
980
981     loadv.s[0] = weight->i_offset;
982     offsetv = vec_splat( loadv.v, 0 );
983
984     if( denom >= 1 )
985     {
986         loadv.s[0] = denom;
987         denomv = vec_splat( loadv.v, 0 );
988
989         loadv.s[0] = 1<<(denom - 1);
990         roundv = vec_splat( loadv.v, 0 );
991
992         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
993         {
994             VEC_LOAD( src, srcv, 8, vec_u8_t, src );
995             weightv = vec_u8_to_s16( srcv );
996
997             weightv = vec_mladd( weightv, scalev, roundv );
998             weightv = vec_sra( weightv, (vec_u16_t)denomv );
999             weightv = vec_add( weightv, offsetv );
1000
1001             srcv = vec_packsu( weightv, zero_s16v );
1002             VEC_STORE8( srcv, dst );
1003         }
1004     }
1005     else
1006     {
1007         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1008         {
1009             VEC_LOAD( src, srcv, 8, vec_u8_t, src );
1010             weightv = vec_u8_to_s16( srcv );
1011
1012             weightv = vec_mladd( weightv, scalev, offsetv );
1013
1014             srcv = vec_packsu( weightv, zero_s16v );
1015             VEC_STORE8( srcv, dst );
1016         }
1017     }
1018 }
1019 static void mc_weight_w16_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
1020                                    const x264_weight_t *weight, int i_height )
1021 {
1022     LOAD_ZERO;
1023     PREP_LOAD;
1024     PREP_LOAD_SRC( src );
1025     vec_u8_t srcv;
1026     vec_s16_t weight_lv, weight_hv;
1027     vec_s16_t scalev, offsetv, denomv, roundv;
1028     vec_s16_u loadv;
1029
1030     int denom = weight->i_denom;
1031
1032     loadv.s[0] = weight->i_scale;
1033     scalev = vec_splat( loadv.v, 0 );
1034
1035     loadv.s[0] = weight->i_offset;
1036     offsetv = vec_splat( loadv.v, 0 );
1037
1038     if( denom >= 1 )
1039     {
1040         loadv.s[0] = denom;
1041         denomv = vec_splat( loadv.v, 0 );
1042
1043         loadv.s[0] = 1<<(denom - 1);
1044         roundv = vec_splat( loadv.v, 0 );
1045
1046         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1047         {
1048             VEC_LOAD( src, srcv, 16, vec_u8_t, src );
1049             weight_hv = vec_u8_to_s16_h( srcv );
1050             weight_lv = vec_u8_to_s16_l( srcv );
1051
1052             weight_hv = vec_mladd( weight_hv, scalev, roundv );
1053             weight_lv = vec_mladd( weight_lv, scalev, roundv );
1054             weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
1055             weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
1056             weight_hv = vec_add( weight_hv, offsetv );
1057             weight_lv = vec_add( weight_lv, offsetv );
1058
1059             srcv = vec_packsu( weight_hv, weight_lv );
1060             vec_st( srcv, 0, dst );
1061         }
1062     }
1063     else
1064     {
1065         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1066         {
1067             VEC_LOAD( src, srcv, 16, vec_u8_t, src );
1068             weight_hv = vec_u8_to_s16_h( srcv );
1069             weight_lv = vec_u8_to_s16_l( srcv );
1070
1071             weight_hv = vec_mladd( weight_hv, scalev, offsetv );
1072             weight_lv = vec_mladd( weight_lv, scalev, offsetv );
1073
1074             srcv = vec_packsu( weight_hv, weight_lv );
1075             vec_st( srcv, 0, dst );
1076         }
1077     }
1078 }
1079 static void mc_weight_w20_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
1080                                    const x264_weight_t *weight, int i_height )
1081 {
1082     LOAD_ZERO;
1083     PREP_LOAD_SRC( src );
1084     vec_u8_t src_1v, src_2v, src_3v;
1085     vec_s16_t weight_lv, weight_hv, weight_3v;
1086     vec_s16_t scalev, offsetv, denomv, roundv;
1087     vec_s16_u loadv;
1088
1089     int denom = weight->i_denom;
1090
1091     loadv.s[0] = weight->i_scale;
1092     scalev = vec_splat( loadv.v, 0 );
1093
1094     loadv.s[0] = weight->i_offset;
1095     offsetv = vec_splat( loadv.v, 0 );
1096
1097     if( denom >= 1 )
1098     {
1099         loadv.s[0] = denom;
1100         denomv = vec_splat( loadv.v, 0 );
1101
1102         loadv.s[0] = 1<<(denom - 1);
1103         roundv = vec_splat( loadv.v, 0 );
1104
1105         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1106         {
1107             src_1v = vec_ld( 0,  src );
1108             src_2v = vec_ld( 16, src );
1109             src_3v = vec_ld( 19, src );
1110             src_1v = vec_perm( src_1v, src_2v, _src_ );
1111             src_3v = vec_perm( src_2v, src_3v, _src_ );
1112             weight_hv = vec_u8_to_s16_h( src_1v );
1113             weight_lv = vec_u8_to_s16_l( src_1v );
1114             weight_3v = vec_u8_to_s16_h( src_3v );
1115
1116             weight_hv = vec_mladd( weight_hv, scalev, roundv );
1117             weight_lv = vec_mladd( weight_lv, scalev, roundv );
1118             weight_3v = vec_mladd( weight_3v, scalev, roundv );
1119             weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
1120             weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
1121             weight_3v = vec_sra( weight_3v, (vec_u16_t)denomv );
1122             weight_hv = vec_add( weight_hv, offsetv );
1123             weight_lv = vec_add( weight_lv, offsetv );
1124             weight_3v = vec_add( weight_3v, offsetv );
1125
1126             src_1v = vec_packsu( weight_hv, weight_lv );
1127             src_3v = vec_packsu( weight_3v, zero_s16v );
1128             vec_st( src_1v, 0, dst );
1129             vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
1130         }
1131     }
1132     else
1133     {
1134         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1135         {
1136             src_1v = vec_ld( 0,  src );
1137             src_2v = vec_ld( 16, src );
1138             src_3v = vec_ld( 19, src );
1139             src_1v = vec_perm( src_1v, src_2v, _src_ );
1140             src_3v = vec_perm( src_2v, src_3v, _src_ );
1141             weight_hv = vec_u8_to_s16_h( src_1v );
1142             weight_lv = vec_u8_to_s16_l( src_1v );
1143             weight_3v = vec_u8_to_s16_h( src_3v );
1144
1145             weight_hv = vec_mladd( weight_hv, scalev, offsetv );
1146             weight_lv = vec_mladd( weight_lv, scalev, offsetv );
1147             weight_3v = vec_mladd( weight_3v, scalev, offsetv );
1148
1149             src_1v = vec_packsu( weight_hv, weight_lv );
1150             src_3v = vec_packsu( weight_3v, zero_s16v );
1151             vec_st( src_1v, 0, dst );
1152             vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
1153         }
1154     }
1155 }
1156
1157 static weight_fn_t x264_mc_weight_wtab_altivec[6] =
1158 {
1159     mc_weight_w2_altivec,
1160     mc_weight_w4_altivec,
1161     mc_weight_w8_altivec,
1162     mc_weight_w16_altivec,
1163     mc_weight_w16_altivec,
1164     mc_weight_w20_altivec,
1165 };
1166
1167 #endif // !HIGH_BIT_DEPTH
1168
1169 void x264_mc_altivec_init( x264_mc_functions_t *pf )
1170 {
1171 #if !HIGH_BIT_DEPTH
1172     pf->mc_luma   = mc_luma_altivec;
1173     pf->get_ref   = get_ref_altivec;
1174     pf->mc_chroma = mc_chroma_altivec;
1175
1176     pf->copy_16x16_unaligned = x264_mc_copy_w16_altivec;
1177     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_altivec;
1178
1179     pf->hpel_filter = x264_hpel_filter_altivec;
1180     pf->frame_init_lowres_core = frame_init_lowres_core_altivec;
1181
1182     pf->weight = x264_mc_weight_wtab_altivec;
1183 #endif // !HIGH_BIT_DEPTH
1184 }