3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * @file libavcodec/dsputil.c
32 #include "simple_idct.h"
38 #include "mpegvideo.h"
42 void ff_spatial_dwt(int *buffer, int width, int height, int stride, int type, int decomposition_count);
45 void vorbis_inverse_coupling(float *mag, float *ang, int blocksize);
48 void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len);
51 void ff_lpc_compute_autocorr(const int32_t *data, int len, int lag, double *autoc);
54 void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp);
57 void ff_ea_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block);
59 uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP] = {0, };
60 uint32_t ff_squareTbl[512] = {0, };
62 // 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
63 #define pb_7f (~0UL/255 * 0x7f)
64 #define pb_80 (~0UL/255 * 0x80)
66 const uint8_t ff_zigzag_direct[64] = {
67 0, 1, 8, 16, 9, 2, 3, 10,
68 17, 24, 32, 25, 18, 11, 4, 5,
69 12, 19, 26, 33, 40, 48, 41, 34,
70 27, 20, 13, 6, 7, 14, 21, 28,
71 35, 42, 49, 56, 57, 50, 43, 36,
72 29, 22, 15, 23, 30, 37, 44, 51,
73 58, 59, 52, 45, 38, 31, 39, 46,
74 53, 60, 61, 54, 47, 55, 62, 63
77 /* Specific zigzag scan for 248 idct. NOTE that unlike the
78 specification, we interleave the fields */
79 const uint8_t ff_zigzag248_direct[64] = {
80 0, 8, 1, 9, 16, 24, 2, 10,
81 17, 25, 32, 40, 48, 56, 33, 41,
82 18, 26, 3, 11, 4, 12, 19, 27,
83 34, 42, 49, 57, 50, 58, 35, 43,
84 20, 28, 5, 13, 6, 14, 21, 29,
85 36, 44, 51, 59, 52, 60, 37, 45,
86 22, 30, 7, 15, 23, 31, 38, 46,
87 53, 61, 54, 62, 39, 47, 55, 63,
90 /* not permutated inverse zigzag_direct + 1 for MMX quantizer */
91 DECLARE_ALIGNED_16(uint16_t, inv_zigzag_direct16[64]);
93 const uint8_t ff_alternate_horizontal_scan[64] = {
94 0, 1, 2, 3, 8, 9, 16, 17,
95 10, 11, 4, 5, 6, 7, 15, 14,
96 13, 12, 19, 18, 24, 25, 32, 33,
97 26, 27, 20, 21, 22, 23, 28, 29,
98 30, 31, 34, 35, 40, 41, 48, 49,
99 42, 43, 36, 37, 38, 39, 44, 45,
100 46, 47, 50, 51, 56, 57, 58, 59,
101 52, 53, 54, 55, 60, 61, 62, 63,
104 const uint8_t ff_alternate_vertical_scan[64] = {
105 0, 8, 16, 24, 1, 9, 2, 10,
106 17, 25, 32, 40, 48, 56, 57, 49,
107 41, 33, 26, 18, 3, 11, 4, 12,
108 19, 27, 34, 42, 50, 58, 35, 43,
109 51, 59, 20, 28, 5, 13, 6, 14,
110 21, 29, 36, 44, 52, 60, 37, 45,
111 53, 61, 22, 30, 7, 15, 23, 31,
112 38, 46, 54, 62, 39, 47, 55, 63,
115 /* a*inverse[b]>>32 == a/b for all 0<=a<=16909558 && 2<=b<=256
116 * for a>16909558, is an overestimate by less than 1 part in 1<<24 */
117 const uint32_t ff_inverse[257]={
118 0, 4294967295U,2147483648U,1431655766, 1073741824, 858993460, 715827883, 613566757,
119 536870912, 477218589, 429496730, 390451573, 357913942, 330382100, 306783379, 286331154,
120 268435456, 252645136, 238609295, 226050911, 214748365, 204522253, 195225787, 186737709,
121 178956971, 171798692, 165191050, 159072863, 153391690, 148102321, 143165577, 138547333,
122 134217728, 130150525, 126322568, 122713352, 119304648, 116080198, 113025456, 110127367,
123 107374183, 104755300, 102261127, 99882961, 97612894, 95443718, 93368855, 91382283,
124 89478486, 87652394, 85899346, 84215046, 82595525, 81037119, 79536432, 78090315,
125 76695845, 75350304, 74051161, 72796056, 71582789, 70409300, 69273667, 68174085,
126 67108864, 66076420, 65075263, 64103990, 63161284, 62245903, 61356676, 60492498,
127 59652324, 58835169, 58040099, 57266231, 56512728, 55778797, 55063684, 54366675,
128 53687092, 53024288, 52377650, 51746594, 51130564, 50529028, 49941481, 49367441,
129 48806447, 48258060, 47721859, 47197443, 46684428, 46182445, 45691142, 45210183,
130 44739243, 44278014, 43826197, 43383509, 42949673, 42524429, 42107523, 41698712,
131 41297763, 40904451, 40518560, 40139882, 39768216, 39403370, 39045158, 38693400,
132 38347923, 38008561, 37675152, 37347542, 37025581, 36709123, 36398028, 36092163,
133 35791395, 35495598, 35204650, 34918434, 34636834, 34359739, 34087043, 33818641,
134 33554432, 33294321, 33038210, 32786010, 32537632, 32292988, 32051995, 31814573,
135 31580642, 31350127, 31122952, 30899046, 30678338, 30460761, 30246249, 30034737,
136 29826162, 29620465, 29417585, 29217465, 29020050, 28825284, 28633116, 28443493,
137 28256364, 28071682, 27889399, 27709467, 27531842, 27356480, 27183338, 27012373,
138 26843546, 26676816, 26512144, 26349493, 26188825, 26030105, 25873297, 25718368,
139 25565282, 25414008, 25264514, 25116768, 24970741, 24826401, 24683721, 24542671,
140 24403224, 24265352, 24129030, 23994231, 23860930, 23729102, 23598722, 23469767,
141 23342214, 23216040, 23091223, 22967740, 22845571, 22724695, 22605092, 22486740,
142 22369622, 22253717, 22139007, 22025474, 21913099, 21801865, 21691755, 21582751,
143 21474837, 21367997, 21262215, 21157475, 21053762, 20951060, 20849356, 20748635,
144 20648882, 20550083, 20452226, 20355296, 20259280, 20164166, 20069941, 19976593,
145 19884108, 19792477, 19701685, 19611723, 19522579, 19434242, 19346700, 19259944,
146 19173962, 19088744, 19004281, 18920561, 18837576, 18755316, 18673771, 18592933,
147 18512791, 18433337, 18354562, 18276457, 18199014, 18122225, 18046082, 17970575,
148 17895698, 17821442, 17747799, 17674763, 17602325, 17530479, 17459217, 17388532,
149 17318417, 17248865, 17179870, 17111424, 17043522, 16976156, 16909321, 16843010,
153 /* Input permutation for the simple_idct_mmx */
154 static const uint8_t simple_mmx_permutation[64]={
155 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
156 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
157 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
158 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
159 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
160 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
161 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
162 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
165 static const uint8_t idct_sse2_row_perm[8] = {0, 4, 1, 5, 2, 6, 3, 7};
167 void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
171 st->scantable= src_scantable;
175 j = src_scantable[i];
176 st->permutated[i] = permutation[j];
185 j = st->permutated[i];
187 st->raster_end[i]= end;
191 static int pix_sum_c(uint8_t * pix, int line_size)
196 for (i = 0; i < 16; i++) {
197 for (j = 0; j < 16; j += 8) {
208 pix += line_size - 16;
213 static int pix_norm1_c(uint8_t * pix, int line_size)
216 uint32_t *sq = ff_squareTbl + 256;
219 for (i = 0; i < 16; i++) {
220 for (j = 0; j < 16; j += 8) {
231 #if LONG_MAX > 2147483647
232 register uint64_t x=*(uint64_t*)pix;
234 s += sq[(x>>8)&0xff];
235 s += sq[(x>>16)&0xff];
236 s += sq[(x>>24)&0xff];
237 s += sq[(x>>32)&0xff];
238 s += sq[(x>>40)&0xff];
239 s += sq[(x>>48)&0xff];
240 s += sq[(x>>56)&0xff];
242 register uint32_t x=*(uint32_t*)pix;
244 s += sq[(x>>8)&0xff];
245 s += sq[(x>>16)&0xff];
246 s += sq[(x>>24)&0xff];
247 x=*(uint32_t*)(pix+4);
249 s += sq[(x>>8)&0xff];
250 s += sq[(x>>16)&0xff];
251 s += sq[(x>>24)&0xff];
256 pix += line_size - 16;
261 static void bswap_buf(uint32_t *dst, const uint32_t *src, int w){
264 for(i=0; i+8<=w; i+=8){
265 dst[i+0]= bswap_32(src[i+0]);
266 dst[i+1]= bswap_32(src[i+1]);
267 dst[i+2]= bswap_32(src[i+2]);
268 dst[i+3]= bswap_32(src[i+3]);
269 dst[i+4]= bswap_32(src[i+4]);
270 dst[i+5]= bswap_32(src[i+5]);
271 dst[i+6]= bswap_32(src[i+6]);
272 dst[i+7]= bswap_32(src[i+7]);
275 dst[i+0]= bswap_32(src[i+0]);
279 static int sse4_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
282 uint32_t *sq = ff_squareTbl + 256;
285 for (i = 0; i < h; i++) {
286 s += sq[pix1[0] - pix2[0]];
287 s += sq[pix1[1] - pix2[1]];
288 s += sq[pix1[2] - pix2[2]];
289 s += sq[pix1[3] - pix2[3]];
296 static int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
299 uint32_t *sq = ff_squareTbl + 256;
302 for (i = 0; i < h; i++) {
303 s += sq[pix1[0] - pix2[0]];
304 s += sq[pix1[1] - pix2[1]];
305 s += sq[pix1[2] - pix2[2]];
306 s += sq[pix1[3] - pix2[3]];
307 s += sq[pix1[4] - pix2[4]];
308 s += sq[pix1[5] - pix2[5]];
309 s += sq[pix1[6] - pix2[6]];
310 s += sq[pix1[7] - pix2[7]];
317 static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
320 uint32_t *sq = ff_squareTbl + 256;
323 for (i = 0; i < h; i++) {
324 s += sq[pix1[ 0] - pix2[ 0]];
325 s += sq[pix1[ 1] - pix2[ 1]];
326 s += sq[pix1[ 2] - pix2[ 2]];
327 s += sq[pix1[ 3] - pix2[ 3]];
328 s += sq[pix1[ 4] - pix2[ 4]];
329 s += sq[pix1[ 5] - pix2[ 5]];
330 s += sq[pix1[ 6] - pix2[ 6]];
331 s += sq[pix1[ 7] - pix2[ 7]];
332 s += sq[pix1[ 8] - pix2[ 8]];
333 s += sq[pix1[ 9] - pix2[ 9]];
334 s += sq[pix1[10] - pix2[10]];
335 s += sq[pix1[11] - pix2[11]];
336 s += sq[pix1[12] - pix2[12]];
337 s += sq[pix1[13] - pix2[13]];
338 s += sq[pix1[14] - pix2[14]];
339 s += sq[pix1[15] - pix2[15]];
348 #if CONFIG_SNOW_ENCODER //dwt is in snow.c
349 static inline int w_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int w, int h, int type){
351 const int dec_count= w==8 ? 3 : 4;
354 static const int scale[2][2][4][4]={
358 {268, 239, 239, 213},
362 // 9/7 16x16 or 32x32 dec=4
363 {344, 310, 310, 280},
371 {275, 245, 245, 218},
375 // 5/3 16x16 or 32x32 dec=4
376 {352, 317, 317, 286},
384 for (i = 0; i < h; i++) {
385 for (j = 0; j < w; j+=4) {
386 tmp[32*i+j+0] = (pix1[j+0] - pix2[j+0])<<4;
387 tmp[32*i+j+1] = (pix1[j+1] - pix2[j+1])<<4;
388 tmp[32*i+j+2] = (pix1[j+2] - pix2[j+2])<<4;
389 tmp[32*i+j+3] = (pix1[j+3] - pix2[j+3])<<4;
395 ff_spatial_dwt(tmp, w, h, 32, type, dec_count);
399 for(level=0; level<dec_count; level++){
400 for(ori= level ? 1 : 0; ori<4; ori++){
401 int size= w>>(dec_count-level);
402 int sx= (ori&1) ? size : 0;
403 int stride= 32<<(dec_count-level);
404 int sy= (ori&2) ? stride>>1 : 0;
406 for(i=0; i<size; i++){
407 for(j=0; j<size; j++){
408 int v= tmp[sx + sy + i*stride + j] * scale[type][dec_count-3][level][ori];
418 static int w53_8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h){
419 return w_c(v, pix1, pix2, line_size, 8, h, 1);
422 static int w97_8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h){
423 return w_c(v, pix1, pix2, line_size, 8, h, 0);
426 static int w53_16_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h){
427 return w_c(v, pix1, pix2, line_size, 16, h, 1);
430 static int w97_16_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h){
431 return w_c(v, pix1, pix2, line_size, 16, h, 0);
434 int w53_32_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h){
435 return w_c(v, pix1, pix2, line_size, 32, h, 1);
438 int w97_32_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h){
439 return w_c(v, pix1, pix2, line_size, 32, h, 0);
443 /* draw the edges of width 'w' of an image of size width, height */
444 //FIXME check that this is ok for mpeg4 interlaced
445 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
447 uint8_t *ptr, *last_line;
450 last_line = buf + (height - 1) * wrap;
453 memcpy(buf - (i + 1) * wrap, buf, width);
454 memcpy(last_line + (i + 1) * wrap, last_line, width);
458 for(i=0;i<height;i++) {
459 memset(ptr - w, ptr[0], w);
460 memset(ptr + width, ptr[width-1], w);
465 memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
466 memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
467 memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
468 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
473 * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
474 * @param buf destination buffer
475 * @param src source buffer
476 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
477 * @param block_w width of block
478 * @param block_h height of block
479 * @param src_x x coordinate of the top left sample of the block in the source buffer
480 * @param src_y y coordinate of the top left sample of the block in the source buffer
481 * @param w width of the source buffer
482 * @param h height of the source buffer
484 void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h,
485 int src_x, int src_y, int w, int h){
487 int start_y, start_x, end_y, end_x;
490 src+= (h-1-src_y)*linesize;
492 }else if(src_y<=-block_h){
493 src+= (1-block_h-src_y)*linesize;
499 }else if(src_x<=-block_w){
500 src+= (1-block_w-src_x);
504 start_y= FFMAX(0, -src_y);
505 start_x= FFMAX(0, -src_x);
506 end_y= FFMIN(block_h, h-src_y);
507 end_x= FFMIN(block_w, w-src_x);
509 // copy existing part
510 for(y=start_y; y<end_y; y++){
511 for(x=start_x; x<end_x; x++){
512 buf[x + y*linesize]= src[x + y*linesize];
517 for(y=0; y<start_y; y++){
518 for(x=start_x; x<end_x; x++){
519 buf[x + y*linesize]= buf[x + start_y*linesize];
524 for(y=end_y; y<block_h; y++){
525 for(x=start_x; x<end_x; x++){
526 buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
530 for(y=0; y<block_h; y++){
532 for(x=0; x<start_x; x++){
533 buf[x + y*linesize]= buf[start_x + y*linesize];
537 for(x=end_x; x<block_w; x++){
538 buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
543 static void get_pixels_c(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
547 /* read the pixels */
549 block[0] = pixels[0];
550 block[1] = pixels[1];
551 block[2] = pixels[2];
552 block[3] = pixels[3];
553 block[4] = pixels[4];
554 block[5] = pixels[5];
555 block[6] = pixels[6];
556 block[7] = pixels[7];
562 static void diff_pixels_c(DCTELEM *restrict block, const uint8_t *s1,
563 const uint8_t *s2, int stride){
566 /* read the pixels */
568 block[0] = s1[0] - s2[0];
569 block[1] = s1[1] - s2[1];
570 block[2] = s1[2] - s2[2];
571 block[3] = s1[3] - s2[3];
572 block[4] = s1[4] - s2[4];
573 block[5] = s1[5] - s2[5];
574 block[6] = s1[6] - s2[6];
575 block[7] = s1[7] - s2[7];
583 static void put_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels,
587 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
589 /* read the pixels */
591 pixels[0] = cm[block[0]];
592 pixels[1] = cm[block[1]];
593 pixels[2] = cm[block[2]];
594 pixels[3] = cm[block[3]];
595 pixels[4] = cm[block[4]];
596 pixels[5] = cm[block[5]];
597 pixels[6] = cm[block[6]];
598 pixels[7] = cm[block[7]];
605 static void put_pixels_clamped4_c(const DCTELEM *block, uint8_t *restrict pixels,
609 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
611 /* read the pixels */
613 pixels[0] = cm[block[0]];
614 pixels[1] = cm[block[1]];
615 pixels[2] = cm[block[2]];
616 pixels[3] = cm[block[3]];
623 static void put_pixels_clamped2_c(const DCTELEM *block, uint8_t *restrict pixels,
627 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
629 /* read the pixels */
631 pixels[0] = cm[block[0]];
632 pixels[1] = cm[block[1]];
639 static void put_signed_pixels_clamped_c(const DCTELEM *block,
640 uint8_t *restrict pixels,
645 for (i = 0; i < 8; i++) {
646 for (j = 0; j < 8; j++) {
649 else if (*block > 127)
652 *pixels = (uint8_t)(*block + 128);
656 pixels += (line_size - 8);
660 static void add_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels,
664 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
666 /* read the pixels */
668 pixels[0] = cm[pixels[0] + block[0]];
669 pixels[1] = cm[pixels[1] + block[1]];
670 pixels[2] = cm[pixels[2] + block[2]];
671 pixels[3] = cm[pixels[3] + block[3]];
672 pixels[4] = cm[pixels[4] + block[4]];
673 pixels[5] = cm[pixels[5] + block[5]];
674 pixels[6] = cm[pixels[6] + block[6]];
675 pixels[7] = cm[pixels[7] + block[7]];
681 static void add_pixels_clamped4_c(const DCTELEM *block, uint8_t *restrict pixels,
685 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
687 /* read the pixels */
689 pixels[0] = cm[pixels[0] + block[0]];
690 pixels[1] = cm[pixels[1] + block[1]];
691 pixels[2] = cm[pixels[2] + block[2]];
692 pixels[3] = cm[pixels[3] + block[3]];
698 static void add_pixels_clamped2_c(const DCTELEM *block, uint8_t *restrict pixels,
702 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
704 /* read the pixels */
706 pixels[0] = cm[pixels[0] + block[0]];
707 pixels[1] = cm[pixels[1] + block[1]];
713 static void add_pixels8_c(uint8_t *restrict pixels, DCTELEM *block, int line_size)
717 pixels[0] += block[0];
718 pixels[1] += block[1];
719 pixels[2] += block[2];
720 pixels[3] += block[3];
721 pixels[4] += block[4];
722 pixels[5] += block[5];
723 pixels[6] += block[6];
724 pixels[7] += block[7];
730 static void add_pixels4_c(uint8_t *restrict pixels, DCTELEM *block, int line_size)
734 pixels[0] += block[0];
735 pixels[1] += block[1];
736 pixels[2] += block[2];
737 pixels[3] += block[3];
743 static int sum_abs_dctelem_c(DCTELEM *block)
747 sum+= FFABS(block[i]);
753 #define PIXOP2(OPNAME, OP) \
754 static void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
758 OP(*((uint64_t*)block), AV_RN64(pixels));\
764 static void OPNAME ## _no_rnd_pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
768 const uint64_t a= AV_RN64(pixels );\
769 const uint64_t b= AV_RN64(pixels+1);\
770 OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
776 static void OPNAME ## _pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
780 const uint64_t a= AV_RN64(pixels );\
781 const uint64_t b= AV_RN64(pixels+1);\
782 OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
788 static void OPNAME ## _no_rnd_pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
792 const uint64_t a= AV_RN64(pixels );\
793 const uint64_t b= AV_RN64(pixels+line_size);\
794 OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
800 static void OPNAME ## _pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
804 const uint64_t a= AV_RN64(pixels );\
805 const uint64_t b= AV_RN64(pixels+line_size);\
806 OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
812 static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
815 const uint64_t a= AV_RN64(pixels );\
816 const uint64_t b= AV_RN64(pixels+1);\
817 uint64_t l0= (a&0x0303030303030303ULL)\
818 + (b&0x0303030303030303ULL)\
819 + 0x0202020202020202ULL;\
820 uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
821 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
825 for(i=0; i<h; i+=2){\
826 uint64_t a= AV_RN64(pixels );\
827 uint64_t b= AV_RN64(pixels+1);\
828 l1= (a&0x0303030303030303ULL)\
829 + (b&0x0303030303030303ULL);\
830 h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
831 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
832 OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
835 a= AV_RN64(pixels );\
836 b= AV_RN64(pixels+1);\
837 l0= (a&0x0303030303030303ULL)\
838 + (b&0x0303030303030303ULL)\
839 + 0x0202020202020202ULL;\
840 h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
841 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
842 OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
848 static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
851 const uint64_t a= AV_RN64(pixels );\
852 const uint64_t b= AV_RN64(pixels+1);\
853 uint64_t l0= (a&0x0303030303030303ULL)\
854 + (b&0x0303030303030303ULL)\
855 + 0x0101010101010101ULL;\
856 uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
857 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
861 for(i=0; i<h; i+=2){\
862 uint64_t a= AV_RN64(pixels );\
863 uint64_t b= AV_RN64(pixels+1);\
864 l1= (a&0x0303030303030303ULL)\
865 + (b&0x0303030303030303ULL);\
866 h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
867 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
868 OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
871 a= AV_RN64(pixels );\
872 b= AV_RN64(pixels+1);\
873 l0= (a&0x0303030303030303ULL)\
874 + (b&0x0303030303030303ULL)\
875 + 0x0101010101010101ULL;\
876 h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
877 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
878 OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
884 CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels_c , 8)\
885 CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels_x2_c , 8)\
886 CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels_y2_c , 8)\
887 CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels_xy2_c, 8)\
888 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels_x2_c , 8)\
889 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels_y2_c , 8)\
890 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels_xy2_c, 8)
892 #define op_avg(a, b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEFEFEFEFEULL)>>1) )
893 #else // 64 bit variant
895 #define PIXOP2(OPNAME, OP) \
896 static void OPNAME ## _pixels2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
899 OP(*((uint16_t*)(block )), AV_RN16(pixels ));\
904 static void OPNAME ## _pixels4_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
907 OP(*((uint32_t*)(block )), AV_RN32(pixels ));\
912 static void OPNAME ## _pixels8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
915 OP(*((uint32_t*)(block )), AV_RN32(pixels ));\
916 OP(*((uint32_t*)(block+4)), AV_RN32(pixels+4));\
921 static inline void OPNAME ## _no_rnd_pixels8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
922 OPNAME ## _pixels8_c(block, pixels, line_size, h);\
925 static inline void OPNAME ## _no_rnd_pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
926 int src_stride1, int src_stride2, int h){\
930 a= AV_RN32(&src1[i*src_stride1 ]);\
931 b= AV_RN32(&src2[i*src_stride2 ]);\
932 OP(*((uint32_t*)&dst[i*dst_stride ]), no_rnd_avg32(a, b));\
933 a= AV_RN32(&src1[i*src_stride1+4]);\
934 b= AV_RN32(&src2[i*src_stride2+4]);\
935 OP(*((uint32_t*)&dst[i*dst_stride+4]), no_rnd_avg32(a, b));\
939 static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
940 int src_stride1, int src_stride2, int h){\
944 a= AV_RN32(&src1[i*src_stride1 ]);\
945 b= AV_RN32(&src2[i*src_stride2 ]);\
946 OP(*((uint32_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
947 a= AV_RN32(&src1[i*src_stride1+4]);\
948 b= AV_RN32(&src2[i*src_stride2+4]);\
949 OP(*((uint32_t*)&dst[i*dst_stride+4]), rnd_avg32(a, b));\
953 static inline void OPNAME ## _pixels4_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
954 int src_stride1, int src_stride2, int h){\
958 a= AV_RN32(&src1[i*src_stride1 ]);\
959 b= AV_RN32(&src2[i*src_stride2 ]);\
960 OP(*((uint32_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
964 static inline void OPNAME ## _pixels2_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
965 int src_stride1, int src_stride2, int h){\
969 a= AV_RN16(&src1[i*src_stride1 ]);\
970 b= AV_RN16(&src2[i*src_stride2 ]);\
971 OP(*((uint16_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
975 static inline void OPNAME ## _pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
976 int src_stride1, int src_stride2, int h){\
977 OPNAME ## _pixels8_l2(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
978 OPNAME ## _pixels8_l2(dst+8, src1+8, src2+8, dst_stride, src_stride1, src_stride2, h);\
981 static inline void OPNAME ## _no_rnd_pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
982 int src_stride1, int src_stride2, int h){\
983 OPNAME ## _no_rnd_pixels8_l2(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
984 OPNAME ## _no_rnd_pixels8_l2(dst+8, src1+8, src2+8, dst_stride, src_stride1, src_stride2, h);\
987 static inline void OPNAME ## _no_rnd_pixels8_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
988 OPNAME ## _no_rnd_pixels8_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
991 static inline void OPNAME ## _pixels8_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
992 OPNAME ## _pixels8_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
995 static inline void OPNAME ## _no_rnd_pixels8_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
996 OPNAME ## _no_rnd_pixels8_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
999 static inline void OPNAME ## _pixels8_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
1000 OPNAME ## _pixels8_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
1003 static inline void OPNAME ## _pixels8_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
1004 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
1006 for(i=0; i<h; i++){\
1007 uint32_t a, b, c, d, l0, l1, h0, h1;\
1008 a= AV_RN32(&src1[i*src_stride1]);\
1009 b= AV_RN32(&src2[i*src_stride2]);\
1010 c= AV_RN32(&src3[i*src_stride3]);\
1011 d= AV_RN32(&src4[i*src_stride4]);\
1012 l0= (a&0x03030303UL)\
1015 h0= ((a&0xFCFCFCFCUL)>>2)\
1016 + ((b&0xFCFCFCFCUL)>>2);\
1017 l1= (c&0x03030303UL)\
1018 + (d&0x03030303UL);\
1019 h1= ((c&0xFCFCFCFCUL)>>2)\
1020 + ((d&0xFCFCFCFCUL)>>2);\
1021 OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
1022 a= AV_RN32(&src1[i*src_stride1+4]);\
1023 b= AV_RN32(&src2[i*src_stride2+4]);\
1024 c= AV_RN32(&src3[i*src_stride3+4]);\
1025 d= AV_RN32(&src4[i*src_stride4+4]);\
1026 l0= (a&0x03030303UL)\
1029 h0= ((a&0xFCFCFCFCUL)>>2)\
1030 + ((b&0xFCFCFCFCUL)>>2);\
1031 l1= (c&0x03030303UL)\
1032 + (d&0x03030303UL);\
1033 h1= ((c&0xFCFCFCFCUL)>>2)\
1034 + ((d&0xFCFCFCFCUL)>>2);\
1035 OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
1039 static inline void OPNAME ## _pixels4_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
1040 OPNAME ## _pixels4_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
1043 static inline void OPNAME ## _pixels4_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
1044 OPNAME ## _pixels4_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
1047 static inline void OPNAME ## _pixels2_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
1048 OPNAME ## _pixels2_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
1051 static inline void OPNAME ## _pixels2_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
1052 OPNAME ## _pixels2_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
1055 static inline void OPNAME ## _no_rnd_pixels8_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
1056 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
1058 for(i=0; i<h; i++){\
1059 uint32_t a, b, c, d, l0, l1, h0, h1;\
1060 a= AV_RN32(&src1[i*src_stride1]);\
1061 b= AV_RN32(&src2[i*src_stride2]);\
1062 c= AV_RN32(&src3[i*src_stride3]);\
1063 d= AV_RN32(&src4[i*src_stride4]);\
1064 l0= (a&0x03030303UL)\
1067 h0= ((a&0xFCFCFCFCUL)>>2)\
1068 + ((b&0xFCFCFCFCUL)>>2);\
1069 l1= (c&0x03030303UL)\
1070 + (d&0x03030303UL);\
1071 h1= ((c&0xFCFCFCFCUL)>>2)\
1072 + ((d&0xFCFCFCFCUL)>>2);\
1073 OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
1074 a= AV_RN32(&src1[i*src_stride1+4]);\
1075 b= AV_RN32(&src2[i*src_stride2+4]);\
1076 c= AV_RN32(&src3[i*src_stride3+4]);\
1077 d= AV_RN32(&src4[i*src_stride4+4]);\
1078 l0= (a&0x03030303UL)\
1081 h0= ((a&0xFCFCFCFCUL)>>2)\
1082 + ((b&0xFCFCFCFCUL)>>2);\
1083 l1= (c&0x03030303UL)\
1084 + (d&0x03030303UL);\
1085 h1= ((c&0xFCFCFCFCUL)>>2)\
1086 + ((d&0xFCFCFCFCUL)>>2);\
1087 OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
1090 static inline void OPNAME ## _pixels16_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
1091 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
1092 OPNAME ## _pixels8_l4(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
1093 OPNAME ## _pixels8_l4(dst+8, src1+8, src2+8, src3+8, src4+8, dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
1095 static inline void OPNAME ## _no_rnd_pixels16_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
1096 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
1097 OPNAME ## _no_rnd_pixels8_l4(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
1098 OPNAME ## _no_rnd_pixels8_l4(dst+8, src1+8, src2+8, src3+8, src4+8, dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
1101 static inline void OPNAME ## _pixels2_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
1103 int i, a0, b0, a1, b1;\
1110 for(i=0; i<h; i+=2){\
1116 block[0]= (a1+a0)>>2; /* FIXME non put */\
1117 block[1]= (b1+b0)>>2;\
1127 block[0]= (a1+a0)>>2;\
1128 block[1]= (b1+b0)>>2;\
1134 static inline void OPNAME ## _pixels4_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
1137 const uint32_t a= AV_RN32(pixels );\
1138 const uint32_t b= AV_RN32(pixels+1);\
1139 uint32_t l0= (a&0x03030303UL)\
1142 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
1143 + ((b&0xFCFCFCFCUL)>>2);\
1147 for(i=0; i<h; i+=2){\
1148 uint32_t a= AV_RN32(pixels );\
1149 uint32_t b= AV_RN32(pixels+1);\
1150 l1= (a&0x03030303UL)\
1151 + (b&0x03030303UL);\
1152 h1= ((a&0xFCFCFCFCUL)>>2)\
1153 + ((b&0xFCFCFCFCUL)>>2);\
1154 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
1157 a= AV_RN32(pixels );\
1158 b= AV_RN32(pixels+1);\
1159 l0= (a&0x03030303UL)\
1162 h0= ((a&0xFCFCFCFCUL)>>2)\
1163 + ((b&0xFCFCFCFCUL)>>2);\
1164 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
1170 static inline void OPNAME ## _pixels8_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
1173 for(j=0; j<2; j++){\
1175 const uint32_t a= AV_RN32(pixels );\
1176 const uint32_t b= AV_RN32(pixels+1);\
1177 uint32_t l0= (a&0x03030303UL)\
1180 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
1181 + ((b&0xFCFCFCFCUL)>>2);\
1185 for(i=0; i<h; i+=2){\
1186 uint32_t a= AV_RN32(pixels );\
1187 uint32_t b= AV_RN32(pixels+1);\
1188 l1= (a&0x03030303UL)\
1189 + (b&0x03030303UL);\
1190 h1= ((a&0xFCFCFCFCUL)>>2)\
1191 + ((b&0xFCFCFCFCUL)>>2);\
1192 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
1195 a= AV_RN32(pixels );\
1196 b= AV_RN32(pixels+1);\
1197 l0= (a&0x03030303UL)\
1200 h0= ((a&0xFCFCFCFCUL)>>2)\
1201 + ((b&0xFCFCFCFCUL)>>2);\
1202 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
1206 pixels+=4-line_size*(h+1);\
1207 block +=4-line_size*h;\
1211 static inline void OPNAME ## _no_rnd_pixels8_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
1214 for(j=0; j<2; j++){\
1216 const uint32_t a= AV_RN32(pixels );\
1217 const uint32_t b= AV_RN32(pixels+1);\
1218 uint32_t l0= (a&0x03030303UL)\
1221 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
1222 + ((b&0xFCFCFCFCUL)>>2);\
1226 for(i=0; i<h; i+=2){\
1227 uint32_t a= AV_RN32(pixels );\
1228 uint32_t b= AV_RN32(pixels+1);\
1229 l1= (a&0x03030303UL)\
1230 + (b&0x03030303UL);\
1231 h1= ((a&0xFCFCFCFCUL)>>2)\
1232 + ((b&0xFCFCFCFCUL)>>2);\
1233 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
1236 a= AV_RN32(pixels );\
1237 b= AV_RN32(pixels+1);\
1238 l0= (a&0x03030303UL)\
1241 h0= ((a&0xFCFCFCFCUL)>>2)\
1242 + ((b&0xFCFCFCFCUL)>>2);\
1243 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
1247 pixels+=4-line_size*(h+1);\
1248 block +=4-line_size*h;\
1252 CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels8_c , 8)\
1253 CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels8_x2_c , 8)\
1254 CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels8_y2_c , 8)\
1255 CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels8_xy2_c, 8)\
1256 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_c , OPNAME ## _pixels8_c , 8)\
1257 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels8_x2_c , 8)\
1258 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels8_y2_c , 8)\
1259 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels8_xy2_c, 8)\
1261 #define op_avg(a, b) a = rnd_avg32(a, b)
1263 #define op_put(a, b) a = b
1270 #define avg2(a,b) ((a+b+1)>>1)
1271 #define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
1273 static void put_no_rnd_pixels16_l2_c(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
1274 put_no_rnd_pixels16_l2(dst, a, b, stride, stride, stride, h);
1277 static void put_no_rnd_pixels8_l2_c(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
1278 put_no_rnd_pixels8_l2(dst, a, b, stride, stride, stride, h);
1281 static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
1283 const int A=(16-x16)*(16-y16);
1284 const int B=( x16)*(16-y16);
1285 const int C=(16-x16)*( y16);
1286 const int D=( x16)*( y16);
1291 dst[0]= (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + rounder)>>8;
1292 dst[1]= (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + rounder)>>8;
1293 dst[2]= (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + rounder)>>8;
1294 dst[3]= (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + rounder)>>8;
1295 dst[4]= (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + rounder)>>8;
1296 dst[5]= (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + rounder)>>8;
1297 dst[6]= (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + rounder)>>8;
1298 dst[7]= (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + rounder)>>8;
1304 void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
1305 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
1308 const int s= 1<<shift;
1318 for(x=0; x<8; x++){ //XXX FIXME optimize
1319 int src_x, src_y, frac_x, frac_y, index;
1323 frac_x= src_x&(s-1);
1324 frac_y= src_y&(s-1);
1328 if((unsigned)src_x < width){
1329 if((unsigned)src_y < height){
1330 index= src_x + src_y*stride;
1331 dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
1332 + src[index +1]* frac_x )*(s-frac_y)
1333 + ( src[index+stride ]*(s-frac_x)
1334 + src[index+stride+1]* frac_x )* frac_y
1337 index= src_x + av_clip(src_y, 0, height)*stride;
1338 dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
1339 + src[index +1]* frac_x )*s
1343 if((unsigned)src_y < height){
1344 index= av_clip(src_x, 0, width) + src_y*stride;
1345 dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
1346 + src[index+stride ]* frac_y )*s
1349 index= av_clip(src_x, 0, width) + av_clip(src_y, 0, height)*stride;
1350 dst[y*stride + x]= src[index ];
1362 static inline void put_tpel_pixels_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1364 case 2: put_pixels2_c (dst, src, stride, height); break;
1365 case 4: put_pixels4_c (dst, src, stride, height); break;
1366 case 8: put_pixels8_c (dst, src, stride, height); break;
1367 case 16:put_pixels16_c(dst, src, stride, height); break;
1371 static inline void put_tpel_pixels_mc10_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1373 for (i=0; i < height; i++) {
1374 for (j=0; j < width; j++) {
1375 dst[j] = (683*(2*src[j] + src[j+1] + 1)) >> 11;
1382 static inline void put_tpel_pixels_mc20_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1384 for (i=0; i < height; i++) {
1385 for (j=0; j < width; j++) {
1386 dst[j] = (683*(src[j] + 2*src[j+1] + 1)) >> 11;
1393 static inline void put_tpel_pixels_mc01_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1395 for (i=0; i < height; i++) {
1396 for (j=0; j < width; j++) {
1397 dst[j] = (683*(2*src[j] + src[j+stride] + 1)) >> 11;
1404 static inline void put_tpel_pixels_mc11_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1406 for (i=0; i < height; i++) {
1407 for (j=0; j < width; j++) {
1408 dst[j] = (2731*(4*src[j] + 3*src[j+1] + 3*src[j+stride] + 2*src[j+stride+1] + 6)) >> 15;
1415 static inline void put_tpel_pixels_mc12_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1417 for (i=0; i < height; i++) {
1418 for (j=0; j < width; j++) {
1419 dst[j] = (2731*(3*src[j] + 2*src[j+1] + 4*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15;
1426 static inline void put_tpel_pixels_mc02_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1428 for (i=0; i < height; i++) {
1429 for (j=0; j < width; j++) {
1430 dst[j] = (683*(src[j] + 2*src[j+stride] + 1)) >> 11;
1437 static inline void put_tpel_pixels_mc21_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1439 for (i=0; i < height; i++) {
1440 for (j=0; j < width; j++) {
1441 dst[j] = (2731*(3*src[j] + 4*src[j+1] + 2*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15;
1448 static inline void put_tpel_pixels_mc22_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1450 for (i=0; i < height; i++) {
1451 for (j=0; j < width; j++) {
1452 dst[j] = (2731*(2*src[j] + 3*src[j+1] + 3*src[j+stride] + 4*src[j+stride+1] + 6)) >> 15;
1459 static inline void avg_tpel_pixels_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1461 case 2: avg_pixels2_c (dst, src, stride, height); break;
1462 case 4: avg_pixels4_c (dst, src, stride, height); break;
1463 case 8: avg_pixels8_c (dst, src, stride, height); break;
1464 case 16:avg_pixels16_c(dst, src, stride, height); break;
1468 static inline void avg_tpel_pixels_mc10_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1470 for (i=0; i < height; i++) {
1471 for (j=0; j < width; j++) {
1472 dst[j] = (dst[j] + ((683*(2*src[j] + src[j+1] + 1)) >> 11) + 1) >> 1;
1479 static inline void avg_tpel_pixels_mc20_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1481 for (i=0; i < height; i++) {
1482 for (j=0; j < width; j++) {
1483 dst[j] = (dst[j] + ((683*(src[j] + 2*src[j+1] + 1)) >> 11) + 1) >> 1;
1490 static inline void avg_tpel_pixels_mc01_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1492 for (i=0; i < height; i++) {
1493 for (j=0; j < width; j++) {
1494 dst[j] = (dst[j] + ((683*(2*src[j] + src[j+stride] + 1)) >> 11) + 1) >> 1;
1501 static inline void avg_tpel_pixels_mc11_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1503 for (i=0; i < height; i++) {
1504 for (j=0; j < width; j++) {
1505 dst[j] = (dst[j] + ((2731*(4*src[j] + 3*src[j+1] + 3*src[j+stride] + 2*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
1512 static inline void avg_tpel_pixels_mc12_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1514 for (i=0; i < height; i++) {
1515 for (j=0; j < width; j++) {
1516 dst[j] = (dst[j] + ((2731*(3*src[j] + 2*src[j+1] + 4*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
1523 static inline void avg_tpel_pixels_mc02_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1525 for (i=0; i < height; i++) {
1526 for (j=0; j < width; j++) {
1527 dst[j] = (dst[j] + ((683*(src[j] + 2*src[j+stride] + 1)) >> 11) + 1) >> 1;
1534 static inline void avg_tpel_pixels_mc21_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1536 for (i=0; i < height; i++) {
1537 for (j=0; j < width; j++) {
1538 dst[j] = (dst[j] + ((2731*(3*src[j] + 4*src[j+1] + 2*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
1545 static inline void avg_tpel_pixels_mc22_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
1547 for (i=0; i < height; i++) {
1548 for (j=0; j < width; j++) {
1549 dst[j] = (dst[j] + ((2731*(2*src[j] + 3*src[j+1] + 3*src[j+stride] + 4*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
1556 #define TPEL_WIDTH(width)\
1557 static void put_tpel_pixels ## width ## _mc00_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
1558 void put_tpel_pixels_mc00_c(dst, src, stride, width, height);}\
1559 static void put_tpel_pixels ## width ## _mc10_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
1560 void put_tpel_pixels_mc10_c(dst, src, stride, width, height);}\
1561 static void put_tpel_pixels ## width ## _mc20_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
1562 void put_tpel_pixels_mc20_c(dst, src, stride, width, height);}\
1563 static void put_tpel_pixels ## width ## _mc01_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
1564 void put_tpel_pixels_mc01_c(dst, src, stride, width, height);}\
1565 static void put_tpel_pixels ## width ## _mc11_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
1566 void put_tpel_pixels_mc11_c(dst, src, stride, width, height);}\
1567 static void put_tpel_pixels ## width ## _mc21_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
1568 void put_tpel_pixels_mc21_c(dst, src, stride, width, height);}\
1569 static void put_tpel_pixels ## width ## _mc02_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
1570 void put_tpel_pixels_mc02_c(dst, src, stride, width, height);}\
1571 static void put_tpel_pixels ## width ## _mc12_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
1572 void put_tpel_pixels_mc12_c(dst, src, stride, width, height);}\
1573 static void put_tpel_pixels ## width ## _mc22_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
1574 void put_tpel_pixels_mc22_c(dst, src, stride, width, height);}
1577 #define H264_CHROMA_MC(OPNAME, OP)\
1578 static void OPNAME ## h264_chroma_mc2_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
1579 const int A=(8-x)*(8-y);\
1580 const int B=( x)*(8-y);\
1581 const int C=(8-x)*( y);\
1582 const int D=( x)*( y);\
1585 assert(x<8 && y<8 && x>=0 && y>=0);\
1588 for(i=0; i<h; i++){\
1589 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
1590 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
1596 const int step= C ? stride : 1;\
1597 for(i=0; i<h; i++){\
1598 OP(dst[0], (A*src[0] + E*src[step+0]));\
1599 OP(dst[1], (A*src[1] + E*src[step+1]));\
1606 static void OPNAME ## h264_chroma_mc4_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
1607 const int A=(8-x)*(8-y);\
1608 const int B=( x)*(8-y);\
1609 const int C=(8-x)*( y);\
1610 const int D=( x)*( y);\
1613 assert(x<8 && y<8 && x>=0 && y>=0);\
1616 for(i=0; i<h; i++){\
1617 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
1618 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
1619 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
1620 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
1626 const int step= C ? stride : 1;\
1627 for(i=0; i<h; i++){\
1628 OP(dst[0], (A*src[0] + E*src[step+0]));\
1629 OP(dst[1], (A*src[1] + E*src[step+1]));\
1630 OP(dst[2], (A*src[2] + E*src[step+2]));\
1631 OP(dst[3], (A*src[3] + E*src[step+3]));\
1638 static void OPNAME ## h264_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
1639 const int A=(8-x)*(8-y);\
1640 const int B=( x)*(8-y);\
1641 const int C=(8-x)*( y);\
1642 const int D=( x)*( y);\
1645 assert(x<8 && y<8 && x>=0 && y>=0);\
1648 for(i=0; i<h; i++){\
1649 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
1650 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
1651 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
1652 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
1653 OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
1654 OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
1655 OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
1656 OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
1662 const int step= C ? stride : 1;\
1663 for(i=0; i<h; i++){\
1664 OP(dst[0], (A*src[0] + E*src[step+0]));\
1665 OP(dst[1], (A*src[1] + E*src[step+1]));\
1666 OP(dst[2], (A*src[2] + E*src[step+2]));\
1667 OP(dst[3], (A*src[3] + E*src[step+3]));\
1668 OP(dst[4], (A*src[4] + E*src[step+4]));\
1669 OP(dst[5], (A*src[5] + E*src[step+5]));\
1670 OP(dst[6], (A*src[6] + E*src[step+6]));\
1671 OP(dst[7], (A*src[7] + E*src[step+7]));\
1678 #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
1679 #define op_put(a, b) a = (((b) + 32)>>6)
1681 H264_CHROMA_MC(put_ , op_put)
1682 H264_CHROMA_MC(avg_ , op_avg)
1686 static void put_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){
1687 const int A=(8-x)*(8-y);
1688 const int B=( x)*(8-y);
1689 const int C=(8-x)*( y);
1690 const int D=( x)*( y);
1693 assert(x<8 && y<8 && x>=0 && y>=0);
1697 dst[0] = (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6;
1698 dst[1] = (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6;
1699 dst[2] = (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6;
1700 dst[3] = (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6;
1701 dst[4] = (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + 32 - 4) >> 6;
1702 dst[5] = (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + 32 - 4) >> 6;
1703 dst[6] = (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + 32 - 4) >> 6;
1704 dst[7] = (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + 32 - 4) >> 6;
1710 static void avg_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){
1711 const int A=(8-x)*(8-y);
1712 const int B=( x)*(8-y);
1713 const int C=(8-x)*( y);
1714 const int D=( x)*( y);
1717 assert(x<8 && y<8 && x>=0 && y>=0);
1721 dst[0] = avg2(dst[0], ((A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6));
1722 dst[1] = avg2(dst[1], ((A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6));
1723 dst[2] = avg2(dst[2], ((A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6));
1724 dst[3] = avg2(dst[3], ((A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6));
1725 dst[4] = avg2(dst[4], ((A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + 32 - 4) >> 6));
1726 dst[5] = avg2(dst[5], ((A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + 32 - 4) >> 6));
1727 dst[6] = avg2(dst[6], ((A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + 32 - 4) >> 6));
1728 dst[7] = avg2(dst[7], ((A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + 32 - 4) >> 6));
1734 #define QPEL_MC(r, OPNAME, RND, OP) \
1735 static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1736 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
1740 OP(dst[0], (src[0]+src[1])*20 - (src[0]+src[2])*6 + (src[1]+src[3])*3 - (src[2]+src[4]));\
1741 OP(dst[1], (src[1]+src[2])*20 - (src[0]+src[3])*6 + (src[0]+src[4])*3 - (src[1]+src[5]));\
1742 OP(dst[2], (src[2]+src[3])*20 - (src[1]+src[4])*6 + (src[0]+src[5])*3 - (src[0]+src[6]));\
1743 OP(dst[3], (src[3]+src[4])*20 - (src[2]+src[5])*6 + (src[1]+src[6])*3 - (src[0]+src[7]));\
1744 OP(dst[4], (src[4]+src[5])*20 - (src[3]+src[6])*6 + (src[2]+src[7])*3 - (src[1]+src[8]));\
1745 OP(dst[5], (src[5]+src[6])*20 - (src[4]+src[7])*6 + (src[3]+src[8])*3 - (src[2]+src[8]));\
1746 OP(dst[6], (src[6]+src[7])*20 - (src[5]+src[8])*6 + (src[4]+src[8])*3 - (src[3]+src[7]));\
1747 OP(dst[7], (src[7]+src[8])*20 - (src[6]+src[8])*6 + (src[5]+src[7])*3 - (src[4]+src[6]));\
1753 static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1755 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
1759 const int src0= src[0*srcStride];\
1760 const int src1= src[1*srcStride];\
1761 const int src2= src[2*srcStride];\
1762 const int src3= src[3*srcStride];\
1763 const int src4= src[4*srcStride];\
1764 const int src5= src[5*srcStride];\
1765 const int src6= src[6*srcStride];\
1766 const int src7= src[7*srcStride];\
1767 const int src8= src[8*srcStride];\
1768 OP(dst[0*dstStride], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
1769 OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
1770 OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
1771 OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
1772 OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
1773 OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
1774 OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
1775 OP(dst[7*dstStride], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
1781 static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1782 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
1787 OP(dst[ 0], (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]));\
1788 OP(dst[ 1], (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]));\
1789 OP(dst[ 2], (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]));\
1790 OP(dst[ 3], (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]));\
1791 OP(dst[ 4], (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]));\
1792 OP(dst[ 5], (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]));\
1793 OP(dst[ 6], (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]));\
1794 OP(dst[ 7], (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]));\
1795 OP(dst[ 8], (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]));\
1796 OP(dst[ 9], (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]));\
1797 OP(dst[10], (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]));\
1798 OP(dst[11], (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]));\
1799 OP(dst[12], (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]));\
1800 OP(dst[13], (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]));\
1801 OP(dst[14], (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]));\
1802 OP(dst[15], (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]));\
1808 static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1809 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
1814 const int src0= src[0*srcStride];\
1815 const int src1= src[1*srcStride];\
1816 const int src2= src[2*srcStride];\
1817 const int src3= src[3*srcStride];\
1818 const int src4= src[4*srcStride];\
1819 const int src5= src[5*srcStride];\
1820 const int src6= src[6*srcStride];\
1821 const int src7= src[7*srcStride];\
1822 const int src8= src[8*srcStride];\
1823 const int src9= src[9*srcStride];\
1824 const int src10= src[10*srcStride];\
1825 const int src11= src[11*srcStride];\
1826 const int src12= src[12*srcStride];\
1827 const int src13= src[13*srcStride];\
1828 const int src14= src[14*srcStride];\
1829 const int src15= src[15*srcStride];\
1830 const int src16= src[16*srcStride];\
1831 OP(dst[ 0*dstStride], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
1832 OP(dst[ 1*dstStride], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
1833 OP(dst[ 2*dstStride], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
1834 OP(dst[ 3*dstStride], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
1835 OP(dst[ 4*dstStride], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
1836 OP(dst[ 5*dstStride], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
1837 OP(dst[ 6*dstStride], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
1838 OP(dst[ 7*dstStride], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
1839 OP(dst[ 8*dstStride], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
1840 OP(dst[ 9*dstStride], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
1841 OP(dst[10*dstStride], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
1842 OP(dst[11*dstStride], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
1843 OP(dst[12*dstStride], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
1844 OP(dst[13*dstStride], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
1845 OP(dst[14*dstStride], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
1846 OP(dst[15*dstStride], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
1852 static void OPNAME ## qpel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
1853 OPNAME ## pixels8_c(dst, src, stride, 8);\
1856 static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
1858 put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
1859 OPNAME ## pixels8_l2(dst, src, half, stride, stride, 8, 8);\
1862 static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
1863 OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
1866 static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
1868 put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
1869 OPNAME ## pixels8_l2(dst, src+1, half, stride, stride, 8, 8);\
1872 static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
1873 uint8_t full[16*9];\
1875 copy_block9(full, src, 16, stride, 9);\
1876 put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
1877 OPNAME ## pixels8_l2(dst, full, half, stride, 16, 8, 8);\
1880 static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
1881 uint8_t full[16*9];\
1882 copy_block9(full, src, 16, stride, 9);\
1883 OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
1886 static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
1887 uint8_t full[16*9];\
1889 copy_block9(full, src, 16, stride, 9);\
1890 put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
1891 OPNAME ## pixels8_l2(dst, full+16, half, stride, 16, 8, 8);\
1893 void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
1894 uint8_t full[16*9];\
1897 uint8_t halfHV[64];\
1898 copy_block9(full, src, 16, stride, 9);\
1899 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1900 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
1901 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1902 OPNAME ## pixels8_l4(dst, full, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
1904 static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
1905 uint8_t full[16*9];\
1907 uint8_t halfHV[64];\
1908 copy_block9(full, src, 16, stride, 9);\
1909 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1910 put ## RND ## pixels8_l2(halfH, halfH, full, 8, 8, 16, 9);\
1911 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1912 OPNAME ## pixels8_l2(dst, halfH, halfHV, stride, 8, 8, 8);\
1914 void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
1915 uint8_t full[16*9];\
1918 uint8_t halfHV[64];\
1919 copy_block9(full, src, 16, stride, 9);\
1920 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1921 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
1922 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1923 OPNAME ## pixels8_l4(dst, full+1, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
1925 static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
1926 uint8_t full[16*9];\
1928 uint8_t halfHV[64];\
1929 copy_block9(full, src, 16, stride, 9);\
1930 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1931 put ## RND ## pixels8_l2(halfH, halfH, full+1, 8, 8, 16, 9);\
1932 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1933 OPNAME ## pixels8_l2(dst, halfH, halfHV, stride, 8, 8, 8);\
1935 void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
1936 uint8_t full[16*9];\
1939 uint8_t halfHV[64];\
1940 copy_block9(full, src, 16, stride, 9);\
1941 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1942 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
1943 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1944 OPNAME ## pixels8_l4(dst, full+16, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
1946 static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
1947 uint8_t full[16*9];\
1949 uint8_t halfHV[64];\
1950 copy_block9(full, src, 16, stride, 9);\
1951 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1952 put ## RND ## pixels8_l2(halfH, halfH, full, 8, 8, 16, 9);\
1953 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1954 OPNAME ## pixels8_l2(dst, halfH+8, halfHV, stride, 8, 8, 8);\
1956 void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
1957 uint8_t full[16*9];\
1960 uint8_t halfHV[64];\
1961 copy_block9(full, src, 16, stride, 9);\
1962 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full , 8, 16, 9);\
1963 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
1964 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1965 OPNAME ## pixels8_l4(dst, full+17, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
1967 static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
1968 uint8_t full[16*9];\
1970 uint8_t halfHV[64];\
1971 copy_block9(full, src, 16, stride, 9);\
1972 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1973 put ## RND ## pixels8_l2(halfH, halfH, full+1, 8, 8, 16, 9);\
1974 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1975 OPNAME ## pixels8_l2(dst, halfH+8, halfHV, stride, 8, 8, 8);\
1977 static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
1979 uint8_t halfHV[64];\
1980 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
1981 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1982 OPNAME ## pixels8_l2(dst, halfH, halfHV, stride, 8, 8, 8);\
1984 static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
1986 uint8_t halfHV[64];\
1987 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
1988 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1989 OPNAME ## pixels8_l2(dst, halfH+8, halfHV, stride, 8, 8, 8);\
1991 void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
1992 uint8_t full[16*9];\
1995 uint8_t halfHV[64];\
1996 copy_block9(full, src, 16, stride, 9);\
1997 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1998 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
1999 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
2000 OPNAME ## pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);\
2002 static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
2003 uint8_t full[16*9];\
2005 copy_block9(full, src, 16, stride, 9);\
2006 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
2007 put ## RND ## pixels8_l2(halfH, halfH, full, 8, 8, 16, 9);\
2008 OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
2010 void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
2011 uint8_t full[16*9];\
2014 uint8_t halfHV[64];\
2015 copy_block9(full, src, 16, stride, 9);\
2016 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
2017 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
2018 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
2019 OPNAME ## pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);\
2021 static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
2022 uint8_t full[16*9];\
2024 copy_block9(full, src, 16, stride, 9);\
2025 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
2026 put ## RND ## pixels8_l2(halfH, halfH, full+1, 8, 8, 16, 9);\
2027 OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
2029 static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
2031 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
2032 OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
2034 static void OPNAME ## qpel16_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
2035 OPNAME ## pixels16_c(dst, src, stride, 16);\
2038 static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
2040 put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
2041 OPNAME ## pixels16_l2(dst, src, half, stride, stride, 16, 16);\
2044 static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
2045 OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
2048 static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
2050 put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
2051 OPNAME ## pixels16_l2(dst, src+1, half, stride, stride, 16, 16);\
2054 static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
2055 uint8_t full[24*17];\
2057 copy_block17(full, src, 24, stride, 17);\
2058 put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
2059 OPNAME ## pixels16_l2(dst, full, half, stride, 24, 16, 16);\
2062 static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
2063 uint8_t full[24*17];\
2064 copy_block17(full, src, 24, stride, 17);\
2065 OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
2068 static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
2069 uint8_t full[24*17];\
2071 copy_block17(full, src, 24, stride, 17);\
2072 put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
2073 OPNAME ## pixels16_l2(dst, full+24, half, stride, 24, 16, 16);\
2075 void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
2076 uint8_t full[24*17];\
2077 uint8_t halfH[272];\
2078 uint8_t halfV[256];\
2079 uint8_t halfHV[256];\
2080 copy_block17(full, src, 24, stride, 17);\
2081 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
2082 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
2083 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
2084 OPNAME ## pixels16_l4(dst, full, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
2086 static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
2087 uint8_t full[24*17];\
2088 uint8_t halfH[272];\
2089 uint8_t halfHV[256];\
2090 copy_block17(full, src, 24, stride, 17);\
2091 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
2092 put ## RND ## pixels16_l2(halfH, halfH, full, 16, 16, 24, 17);\
2093 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
2094 OPNAME ## pixels16_l2(dst, halfH, halfHV, stride, 16, 16, 16);\
2096 void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
2097 uint8_t full[24*17];\
2098 uint8_t halfH[272];\
2099 uint8_t halfV[256];\
2100 uint8_t halfHV[256];\
2101 copy_block17(full, src, 24, stride, 17);\
2102 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
2103 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
2104 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
2105 OPNAME ## pixels16_l4(dst, full+1, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
2107 static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
2108 uint8_t full[24*17];\
2109 uint8_t halfH[272];\
2110 uint8_t halfHV[256];\
2111 copy_block17(full, src, 24, stride, 17);\
2112 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
2113 put ## RND ## pixels16_l2(halfH, halfH, full+1, 16, 16, 24, 17);\
2114 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
2115 OPNAME ## pixels16_l2(dst, halfH, halfHV, stride, 16, 16, 16);\
2117 void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
2118 uint8_t full[24*17];\
2119 uint8_t halfH[272];\
2120 uint8_t halfV[256];\
2121 uint8_t halfHV[256];\
2122 copy_block17(full, src, 24, stride, 17);\
2123 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
2124 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
2125 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
2126 OPNAME ## pixels16_l4(dst, full+24, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
2128 static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
2129 uint8_t full[24*17];\
2130 uint8_t halfH[272];\
2131 uint8_t halfHV[256];\
2132 copy_block17(full, src, 24, stride, 17);\
2133 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
2134 put ## RND ## pixels16_l2(halfH, halfH, full, 16, 16, 24, 17);\
2135 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
2136 OPNAME ## pixels16_l2(dst, halfH+16, halfHV, stride, 16, 16, 16);\
2138 void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
2139 uint8_t full[24*17];\
2140 uint8_t halfH[272];\
2141 uint8_t halfV[256];\
2142 uint8_t halfHV[256];\
2143 copy_block17(full, src, 24, stride, 17);\
2144 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full , 16, 24, 17);\
2145 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
2146 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
2147 OPNAME ## pixels16_l4(dst, full+25, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
2149 static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
2150 uint8_t full[24*17];\
2151 uint8_t halfH[272];\
2152 uint8_t halfHV[256];\
2153 copy_block17(full, src, 24, stride, 17);\
2154 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
2155 put ## RND ## pixels16_l2(halfH, halfH, full+1, 16, 16, 24, 17);\
2156 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
2157 OPNAME ## pixels16_l2(dst, halfH+16, halfHV, stride, 16, 16, 16);\
2159 static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
2160 uint8_t halfH[272];\
2161 uint8_t halfHV[256];\
2162 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
2163 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
2164 OPNAME ## pixels16_l2(dst, halfH, halfHV, stride, 16, 16, 16);\
2166 static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
2167 uint8_t halfH[272];\
2168 uint8_t halfHV[256];\
2169 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
2170 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
2171 OPNAME ## pixels16_l2(dst, halfH+16, halfHV, stride, 16, 16, 16);\
2173 void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
2174 uint8_t full[24*17];\
2175 uint8_t halfH[272];\
2176 uint8_t halfV[256];\
2177 uint8_t halfHV[256];\
2178 copy_block17(full, src, 24, stride, 17);\
2179 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
2180 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
2181 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
2182 OPNAME ## pixels16_l2(dst, halfV, halfHV, stride, 16, 16, 16);\
2184 static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
2185 uint8_t full[24*17];\
2186 uint8_t halfH[272];\
2187 copy_block17(full, src, 24, stride, 17);\
2188 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
2189 put ## RND ## pixels16_l2(halfH, halfH, full, 16, 16, 24, 17);\
2190 OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
2192 void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
2193 uint8_t full[24*17];\
2194 uint8_t halfH[272];\
2195 uint8_t halfV[256];\
2196 uint8_t halfHV[256];\
2197 copy_block17(full, src, 24, stride, 17);\
2198 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
2199 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
2200 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
2201 OPNAME ## pixels16_l2(dst, halfV, halfHV, stride, 16, 16, 16);\
2203 static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
2204 uint8_t full[24*17];\
2205 uint8_t halfH[272];\
2206 copy_block17(full, src, 24, stride, 17);\
2207 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
2208 put ## RND ## pixels16_l2(halfH, halfH, full+1, 16, 16, 24, 17);\
2209 OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
2211 static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
2212 uint8_t halfH[272];\
2213 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
2214 OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
2217 #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
2218 #define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
2219 #define op_put(a, b) a = cm[((b) + 16)>>5]
2220 #define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
2222 QPEL_MC(0, put_ , _ , op_put)
2223 QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
2224 QPEL_MC(0, avg_ , _ , op_avg)
2225 //QPEL_MC(1, avg_no_rnd , _ , op_avg)
2227 #undef op_avg_no_rnd
2229 #undef op_put_no_rnd
2232 #define H264_LOWPASS(OPNAME, OP, OP2) \
2233 static av_unused void OPNAME ## h264_qpel2_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2235 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
2239 OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
2240 OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
2246 static av_unused void OPNAME ## h264_qpel2_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2248 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
2252 const int srcB= src[-2*srcStride];\
2253 const int srcA= src[-1*srcStride];\
2254 const int src0= src[0 *srcStride];\
2255 const int src1= src[1 *srcStride];\
2256 const int src2= src[2 *srcStride];\
2257 const int src3= src[3 *srcStride];\
2258 const int src4= src[4 *srcStride];\
2259 OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
2260 OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
2266 static av_unused void OPNAME ## h264_qpel2_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
2269 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
2271 src -= 2*srcStride;\
2272 for(i=0; i<h+5; i++)\
2274 tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]);\
2275 tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]);\
2279 tmp -= tmpStride*(h+5-2);\
2282 const int tmpB= tmp[-2*tmpStride];\
2283 const int tmpA= tmp[-1*tmpStride];\
2284 const int tmp0= tmp[0 *tmpStride];\
2285 const int tmp1= tmp[1 *tmpStride];\
2286 const int tmp2= tmp[2 *tmpStride];\
2287 const int tmp3= tmp[3 *tmpStride];\
2288 const int tmp4= tmp[4 *tmpStride];\
2289 OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
2290 OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
2295 static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2297 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
2301 OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
2302 OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
2303 OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
2304 OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
2310 static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2312 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
2316 const int srcB= src[-2*srcStride];\
2317 const int srcA= src[-1*srcStride];\
2318 const int src0= src[0 *srcStride];\
2319 const int src1= src[1 *srcStride];\
2320 const int src2= src[2 *srcStride];\
2321 const int src3= src[3 *srcStride];\
2322 const int src4= src[4 *srcStride];\
2323 const int src5= src[5 *srcStride];\
2324 const int src6= src[6 *srcStride];\
2325 OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
2326 OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
2327 OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
2328 OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
2334 static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
2337 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
2339 src -= 2*srcStride;\
2340 for(i=0; i<h+5; i++)\
2342 tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]);\
2343 tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]);\
2344 tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]);\
2345 tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]);\
2349 tmp -= tmpStride*(h+5-2);\
2352 const int tmpB= tmp[-2*tmpStride];\
2353 const int tmpA= tmp[-1*tmpStride];\
2354 const int tmp0= tmp[0 *tmpStride];\
2355 const int tmp1= tmp[1 *tmpStride];\
2356 const int tmp2= tmp[2 *tmpStride];\
2357 const int tmp3= tmp[3 *tmpStride];\
2358 const int tmp4= tmp[4 *tmpStride];\
2359 const int tmp5= tmp[5 *tmpStride];\
2360 const int tmp6= tmp[6 *tmpStride];\
2361 OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
2362 OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
2363 OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
2364 OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
2370 static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2372 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
2376 OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
2377 OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
2378 OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
2379 OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
2380 OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
2381 OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
2382 OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
2383 OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
2389 static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2391 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
2395 const int srcB= src[-2*srcStride];\
2396 const int srcA= src[-1*srcStride];\
2397 const int src0= src[0 *srcStride];\
2398 const int src1= src[1 *srcStride];\
2399 const int src2= src[2 *srcStride];\
2400 const int src3= src[3 *srcStride];\
2401 const int src4= src[4 *srcStride];\
2402 const int src5= src[5 *srcStride];\
2403 const int src6= src[6 *srcStride];\
2404 const int src7= src[7 *srcStride];\
2405 const int src8= src[8 *srcStride];\
2406 const int src9= src[9 *srcStride];\
2407 const int src10=src[10*srcStride];\
2408 OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
2409 OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
2410 OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
2411 OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
2412 OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
2413 OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
2414 OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
2415 OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
2421 static void OPNAME ## h264_qpel8_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
2424 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
2426 src -= 2*srcStride;\
2427 for(i=0; i<h+5; i++)\
2429 tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]);\
2430 tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]);\
2431 tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]);\
2432 tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]);\
2433 tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]);\
2434 tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]);\
2435 tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]);\
2436 tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]);\
2440 tmp -= tmpStride*(h+5-2);\
2443 const int tmpB= tmp[-2*tmpStride];\
2444 const int tmpA= tmp[-1*tmpStride];\
2445 const int tmp0= tmp[0 *tmpStride];\
2446 const int tmp1= tmp[1 *tmpStride];\
2447 const int tmp2= tmp[2 *tmpStride];\
2448 const int tmp3= tmp[3 *tmpStride];\
2449 const int tmp4= tmp[4 *tmpStride];\
2450 const int tmp5= tmp[5 *tmpStride];\
2451 const int tmp6= tmp[6 *tmpStride];\
2452 const int tmp7= tmp[7 *tmpStride];\
2453 const int tmp8= tmp[8 *tmpStride];\
2454 const int tmp9= tmp[9 *tmpStride];\
2455 const int tmp10=tmp[10*tmpStride];\
2456 OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
2457 OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
2458 OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
2459 OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
2460 OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
2461 OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
2462 OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
2463 OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
2469 static void OPNAME ## h264_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2470 OPNAME ## h264_qpel8_v_lowpass(dst , src , dstStride, srcStride);\
2471 OPNAME ## h264_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride);\
2472 src += 8*srcStride;\
2473 dst += 8*dstStride;\
2474 OPNAME ## h264_qpel8_v_lowpass(dst , src , dstStride, srcStride);\
2475 OPNAME ## h264_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride);\
2478 static void OPNAME ## h264_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2479 OPNAME ## h264_qpel8_h_lowpass(dst , src , dstStride, srcStride);\
2480 OPNAME ## h264_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride);\
2481 src += 8*srcStride;\
2482 dst += 8*dstStride;\
2483 OPNAME ## h264_qpel8_h_lowpass(dst , src , dstStride, srcStride);\
2484 OPNAME ## h264_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride);\
2487 static void OPNAME ## h264_qpel16_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
2488 OPNAME ## h264_qpel8_hv_lowpass(dst , tmp , src , dstStride, tmpStride, srcStride);\
2489 OPNAME ## h264_qpel8_hv_lowpass(dst+8, tmp+8, src+8, dstStride, tmpStride, srcStride);\
2490 src += 8*srcStride;\
2491 dst += 8*dstStride;\
2492 OPNAME ## h264_qpel8_hv_lowpass(dst , tmp , src , dstStride, tmpStride, srcStride);\
2493 OPNAME ## h264_qpel8_hv_lowpass(dst+8, tmp+8, src+8, dstStride, tmpStride, srcStride);\
2496 #define H264_MC(OPNAME, SIZE) \
2497 static void OPNAME ## h264_qpel ## SIZE ## _mc00_c (uint8_t *dst, uint8_t *src, int stride){\
2498 OPNAME ## pixels ## SIZE ## _c(dst, src, stride, SIZE);\
2501 static void OPNAME ## h264_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\
2502 uint8_t half[SIZE*SIZE];\
2503 put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
2504 OPNAME ## pixels ## SIZE ## _l2(dst, src, half, stride, stride, SIZE, SIZE);\
2507 static void OPNAME ## h264_qpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\
2508 OPNAME ## h264_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride);\
2511 static void OPNAME ## h264_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, int stride){\
2512 uint8_t half[SIZE*SIZE];\
2513 put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
2514 OPNAME ## pixels ## SIZE ## _l2(dst, src+1, half, stride, stride, SIZE, SIZE);\
2517 static void OPNAME ## h264_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\
2518 uint8_t full[SIZE*(SIZE+5)];\
2519 uint8_t * const full_mid= full + SIZE*2;\
2520 uint8_t half[SIZE*SIZE];\
2521 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
2522 put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
2523 OPNAME ## pixels ## SIZE ## _l2(dst, full_mid, half, stride, SIZE, SIZE, SIZE);\
2526 static void OPNAME ## h264_qpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\
2527 uint8_t full[SIZE*(SIZE+5)];\
2528 uint8_t * const full_mid= full + SIZE*2;\
2529 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
2530 OPNAME ## h264_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE);\
2533 static void OPNAME ## h264_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, int stride){\
2534 uint8_t full[SIZE*(SIZE+5)];\
2535 uint8_t * const full_mid= full + SIZE*2;\
2536 uint8_t half[SIZE*SIZE];\
2537 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
2538 put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
2539 OPNAME ## pixels ## SIZE ## _l2(dst, full_mid+SIZE, half, stride, SIZE, SIZE, SIZE);\
2542 static void OPNAME ## h264_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\
2543 uint8_t full[SIZE*(SIZE+5)];\
2544 uint8_t * const full_mid= full + SIZE*2;\
2545 uint8_t halfH[SIZE*SIZE];\
2546 uint8_t halfV[SIZE*SIZE];\
2547 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
2548 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
2549 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
2550 OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
2553 static void OPNAME ## h264_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, int stride){\
2554 uint8_t full[SIZE*(SIZE+5)];\
2555 uint8_t * const full_mid= full + SIZE*2;\
2556 uint8_t halfH[SIZE*SIZE];\
2557 uint8_t halfV[SIZE*SIZE];\
2558 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
2559 copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
2560 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
2561 OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
2564 static void OPNAME ## h264_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, int stride){\
2565 uint8_t full[SIZE*(SIZE+5)];\
2566 uint8_t * const full_mid= full + SIZE*2;\
2567 uint8_t halfH[SIZE*SIZE];\
2568 uint8_t halfV[SIZE*SIZE];\
2569 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
2570 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
2571 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
2572 OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
2575 static void OPNAME ## h264_qpel ## SIZE ## _mc33_c(uint8_t *dst, uint8_t *src, int stride){\
2576 uint8_t full[SIZE*(SIZE+5)];\
2577 uint8_t * const full_mid= full + SIZE*2;\
2578 uint8_t halfH[SIZE*SIZE];\
2579 uint8_t halfV[SIZE*SIZE];\
2580 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
2581 copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
2582 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
2583 OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
2586 static void OPNAME ## h264_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\
2587 int16_t tmp[SIZE*(SIZE+5)];\
2588 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass(dst, tmp, src, stride, SIZE, stride);\
2591 static void OPNAME ## h264_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\
2592 int16_t tmp[SIZE*(SIZE+5)];\
2593 uint8_t halfH[SIZE*SIZE];\
2594 uint8_t halfHV[SIZE*SIZE];\
2595 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
2596 put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
2597 OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
2600 static void OPNAME ## h264_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, int stride){\
2601 int16_t tmp[SIZE*(SIZE+5)];\
2602 uint8_t halfH[SIZE*SIZE];\
2603 uint8_t halfHV[SIZE*SIZE];\
2604 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
2605 put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
2606 OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
2609 static void OPNAME ## h264_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\
2610 uint8_t full[SIZE*(SIZE+5)];\
2611 uint8_t * const full_mid= full + SIZE*2;\
2612 int16_t tmp[SIZE*(SIZE+5)];\
2613 uint8_t halfV[SIZE*SIZE];\
2614 uint8_t halfHV[SIZE*SIZE];\
2615 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
2616 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
2617 put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
2618 OPNAME ## pixels ## SIZE ## _l2(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
2621 static void OPNAME ## h264_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, int stride){\
2622 uint8_t full[SIZE*(SIZE+5)];\
2623 uint8_t * const full_mid= full + SIZE*2;\
2624 int16_t tmp[SIZE*(SIZE+5)];\
2625 uint8_t halfV[SIZE*SIZE];\
2626 uint8_t halfHV[SIZE*SIZE];\
2627 copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
2628 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
2629 put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
2630 OPNAME ## pixels ## SIZE ## _l2(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
2633 #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
2634 //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
2635 #define op_put(a, b) a = cm[((b) + 16)>>5]
2636 #define op2_avg(a, b) a = (((a)+cm[((b) + 512)>>10]+1)>>1)
2637 #define op2_put(a, b) a = cm[((b) + 512)>>10]
2639 H264_LOWPASS(put_ , op_put, op2_put)
2640 H264_LOWPASS(avg_ , op_avg, op2_avg)
2655 #define op_scale1(x) block[x] = av_clip_uint8( (block[x]*weight + offset) >> log2_denom )
2656 #define op_scale2(x) dst[x] = av_clip_uint8( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1))
2657 #define H264_WEIGHT(W,H) \
2658 static void weight_h264_pixels ## W ## x ## H ## _c(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \
2660 offset <<= log2_denom; \
2661 if(log2_denom) offset += 1<<(log2_denom-1); \
2662 for(y=0; y<H; y++, block += stride){ \
2665 if(W==2) continue; \
2668 if(W==4) continue; \
2673 if(W==8) continue; \
2684 static void biweight_h264_pixels ## W ## x ## H ## _c(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
2686 offset = ((offset + 1) | 1) << log2_denom; \
2687 for(y=0; y<H; y++, dst += stride, src += stride){ \
2690 if(W==2) continue; \
2693 if(W==4) continue; \
2698 if(W==8) continue; \
2725 static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
2726 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
2730 dst[0]= cm[(9*(src[0] + src[1]) - (src[-1] + src[2]) + 8)>>4];
2731 dst[1]= cm[(9*(src[1] + src[2]) - (src[ 0] + src[3]) + 8)>>4];
2732 dst[2]= cm[(9*(src[2] + src[3]) - (src[ 1] + src[4]) + 8)>>4];
2733 dst[3]= cm[(9*(src[3] + src[4]) - (src[ 2] + src[5]) + 8)>>4];
2734 dst[4]= cm[(9*(src[4] + src[5]) - (src[ 3] + src[6]) + 8)>>4];
2735 dst[5]= cm[(9*(src[5] + src[6]) - (src[ 4] + src[7]) + 8)>>4];
2736 dst[6]= cm[(9*(src[6] + src[7]) - (src[ 5] + src[8]) + 8)>>4];
2737 dst[7]= cm[(9*(src[7] + src[8]) - (src[ 6] + src[9]) + 8)>>4];
2743 #if CONFIG_CAVS_DECODER
2745 void ff_cavsdsp_init(DSPContext* c, AVCodecContext *avctx);
2747 void ff_put_cavs_qpel8_mc00_c(uint8_t *dst, uint8_t *src, int stride) {
2748 put_pixels8_c(dst, src, stride, 8);
2750 void ff_avg_cavs_qpel8_mc00_c(uint8_t *dst, uint8_t *src, int stride) {
2751 avg_pixels8_c(dst, src, stride, 8);
2753 void ff_put_cavs_qpel16_mc00_c(uint8_t *dst, uint8_t *src, int stride) {
2754 put_pixels16_c(dst, src, stride, 16);
2756 void ff_avg_cavs_qpel16_mc00_c(uint8_t *dst, uint8_t *src, int stride) {
2757 avg_pixels16_c(dst, src, stride, 16);
2759 #endif /* CONFIG_CAVS_DECODER */
2761 void ff_mlp_init(DSPContext* c, AVCodecContext *avctx);
2763 #if CONFIG_VC1_DECODER
2765 void ff_vc1dsp_init(DSPContext* c, AVCodecContext *avctx);
2767 void ff_put_vc1_mspel_mc00_c(uint8_t *dst, uint8_t *src, int stride, int rnd) {
2768 put_pixels8_c(dst, src, stride, 8);
2770 void ff_avg_vc1_mspel_mc00_c(uint8_t *dst, uint8_t *src, int stride, int rnd) {
2771 avg_pixels8_c(dst, src, stride, 8);
2773 #endif /* CONFIG_VC1_DECODER */
2775 void ff_intrax8dsp_init(DSPContext* c, AVCodecContext *avctx);
2778 void ff_h264dspenc_init(DSPContext* c, AVCodecContext *avctx);
2780 #if CONFIG_RV30_DECODER
2781 void ff_rv30dsp_init(DSPContext* c, AVCodecContext *avctx);
2782 #endif /* CONFIG_RV30_DECODER */
2784 #if CONFIG_RV40_DECODER
2785 static void put_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){
2786 put_pixels16_xy2_c(dst, src, stride, 16);
2788 static void avg_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){
2789 avg_pixels16_xy2_c(dst, src, stride, 16);
2791 static void put_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){
2792 put_pixels8_xy2_c(dst, src, stride, 8);
2794 static void avg_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){
2795 avg_pixels8_xy2_c(dst, src, stride, 8);
2798 void ff_rv40dsp_init(DSPContext* c, AVCodecContext *avctx);
2799 #endif /* CONFIG_RV40_DECODER */
2801 static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
2802 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
2806 const int src_1= src[ -srcStride];
2807 const int src0 = src[0 ];
2808 const int src1 = src[ srcStride];
2809 const int src2 = src[2*srcStride];
2810 const int src3 = src[3*srcStride];
2811 const int src4 = src[4*srcStride];
2812 const int src5 = src[5*srcStride];
2813 const int src6 = src[6*srcStride];
2814 const int src7 = src[7*srcStride];
2815 const int src8 = src[8*srcStride];
2816 const int src9 = src[9*srcStride];
2817 dst[0*dstStride]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
2818 dst[1*dstStride]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
2819 dst[2*dstStride]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
2820 dst[3*dstStride]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
2821 dst[4*dstStride]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
2822 dst[5*dstStride]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
2823 dst[6*dstStride]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
2824 dst[7*dstStride]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
2830 static void put_mspel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){
2831 put_pixels8_c(dst, src, stride, 8);
2834 static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){
2836 wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
2837 put_pixels8_l2(dst, src, half, stride, stride, 8, 8);
2840 static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){
2841 wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
2844 static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){
2846 wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
2847 put_pixels8_l2(dst, src+1, half, stride, stride, 8, 8);
2850 static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){
2851 wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
2854 static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){
2858 wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
2859 wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
2860 wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
2861 put_pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);
2863 static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){
2867 wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
2868 wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
2869 wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
2870 put_pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);
2872 static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){
2874 wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
2875 wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);
2878 static void h263_v_loop_filter_c(uint8_t *src, int stride, int qscale){
2879 if(CONFIG_ANY_H263) {
2881 const int strength= ff_h263_loop_filter_strength[qscale];
2885 int p0= src[x-2*stride];
2886 int p1= src[x-1*stride];
2887 int p2= src[x+0*stride];
2888 int p3= src[x+1*stride];
2889 int d = (p0 - p3 + 4*(p2 - p1)) / 8;
2891 if (d<-2*strength) d1= 0;
2892 else if(d<- strength) d1=-2*strength - d;
2893 else if(d< strength) d1= d;
2894 else if(d< 2*strength) d1= 2*strength - d;
2899 if(p1&256) p1= ~(p1>>31);
2900 if(p2&256) p2= ~(p2>>31);
2902 src[x-1*stride] = p1;
2903 src[x+0*stride] = p2;
2907 d2= av_clip((p0-p3)/4, -ad1, ad1);
2909 src[x-2*stride] = p0 - d2;
2910 src[x+ stride] = p3 + d2;
2915 static void h263_h_loop_filter_c(uint8_t *src, int stride, int qscale){
2916 if(CONFIG_ANY_H263) {
2918 const int strength= ff_h263_loop_filter_strength[qscale];
2922 int p0= src[y*stride-2];
2923 int p1= src[y*stride-1];
2924 int p2= src[y*stride+0];
2925 int p3= src[y*stride+1];
2926 int d = (p0 - p3 + 4*(p2 - p1)) / 8;
2928 if (d<-2*strength) d1= 0;
2929 else if(d<- strength) d1=-2*strength - d;
2930 else if(d< strength) d1= d;
2931 else if(d< 2*strength) d1= 2*strength - d;
2936 if(p1&256) p1= ~(p1>>31);
2937 if(p2&256) p2= ~(p2>>31);
2939 src[y*stride-1] = p1;
2940 src[y*stride+0] = p2;
2944 d2= av_clip((p0-p3)/4, -ad1, ad1);
2946 src[y*stride-2] = p0 - d2;
2947 src[y*stride+1] = p3 + d2;
2952 static void h261_loop_filter_c(uint8_t *src, int stride){
2957 temp[x ] = 4*src[x ];
2958 temp[x + 7*8] = 4*src[x + 7*stride];
2962 xy = y * stride + x;
2964 temp[yz] = src[xy - stride] + 2*src[xy] + src[xy + stride];
2969 src[ y*stride] = (temp[ y*8] + 2)>>2;
2970 src[7+y*stride] = (temp[7+y*8] + 2)>>2;
2972 xy = y * stride + x;
2974 src[xy] = (temp[yz-1] + 2*temp[yz] + temp[yz+1] + 8)>>4;
2979 static inline void h264_loop_filter_luma_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0)
2982 for( i = 0; i < 4; i++ ) {
2987 for( d = 0; d < 4; d++ ) {
2988 const int p0 = pix[-1*xstride];
2989 const int p1 = pix[-2*xstride];
2990 const int p2 = pix[-3*xstride];
2991 const int q0 = pix[0];
2992 const int q1 = pix[1*xstride];
2993 const int q2 = pix[2*xstride];
2995 if( FFABS( p0 - q0 ) < alpha &&
2996 FFABS( p1 - p0 ) < beta &&
2997 FFABS( q1 - q0 ) < beta ) {
3002 if( FFABS( p2 - p0 ) < beta ) {
3003 pix[-2*xstride] = p1 + av_clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] );
3006 if( FFABS( q2 - q0 ) < beta ) {
3007 pix[ xstride] = q1 + av_clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] );
3011 i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
3012 pix[-xstride] = av_clip_uint8( p0 + i_delta ); /* p0' */
3013 pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */
3019 static void h264_v_loop_filter_luma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
3021 h264_loop_filter_luma_c(pix, stride, 1, alpha, beta, tc0);
3023 static void h264_h_loop_filter_luma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
3025 h264_loop_filter_luma_c(pix, 1, stride, alpha, beta, tc0);
3028 static inline void h264_loop_filter_luma_intra_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta)
3031 for( d = 0; d < 16; d++ ) {
3032 const int p2 = pix[-3*xstride];
3033 const int p1 = pix[-2*xstride];
3034 const int p0 = pix[-1*xstride];
3036 const int q0 = pix[ 0*xstride];
3037 const int q1 = pix[ 1*xstride];
3038 const int q2 = pix[ 2*xstride];
3040 if( FFABS( p0 - q0 ) < alpha &&
3041 FFABS( p1 - p0 ) < beta &&
3042 FFABS( q1 - q0 ) < beta ) {
3044 if(FFABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){
3045 if( FFABS( p2 - p0 ) < beta)
3047 const int p3 = pix[-4*xstride];
3049 pix[-1*xstride] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3;
3050 pix[-2*xstride] = ( p2 + p1 + p0 + q0 + 2 ) >> 2;
3051 pix[-3*xstride] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3;
3054 pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
3056 if( FFABS( q2 - q0 ) < beta)
3058 const int q3 = pix[3*xstride];
3060 pix[0*xstride] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3;
3061 pix[1*xstride] = ( p0 + q0 + q1 + q2 + 2 ) >> 2;
3062 pix[2*xstride] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3;
3065 pix[0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
3069 pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
3070 pix[ 0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
3076 static void h264_v_loop_filter_luma_intra_c(uint8_t *pix, int stride, int alpha, int beta)
3078 h264_loop_filter_luma_intra_c(pix, stride, 1, alpha, beta);
3080 static void h264_h_loop_filter_luma_intra_c(uint8_t *pix, int stride, int alpha, int beta)
3082 h264_loop_filter_luma_intra_c(pix, 1, stride, alpha, beta);
3085 static inline void h264_loop_filter_chroma_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0)
3088 for( i = 0; i < 4; i++ ) {
3089 const int tc = tc0[i];
3094 for( d = 0; d < 2; d++ ) {
3095 const int p0 = pix[-1*xstride];
3096 const int p1 = pix[-2*xstride];
3097 const int q0 = pix[0];
3098 const int q1 = pix[1*xstride];
3100 if( FFABS( p0 - q0 ) < alpha &&
3101 FFABS( p1 - p0 ) < beta &&
3102 FFABS( q1 - q0 ) < beta ) {
3104 int delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
3106 pix[-xstride] = av_clip_uint8( p0 + delta ); /* p0' */
3107 pix[0] = av_clip_uint8( q0 - delta ); /* q0' */
3113 static void h264_v_loop_filter_chroma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
3115 h264_loop_filter_chroma_c(pix, stride, 1, alpha, beta, tc0);
3117 static void h264_h_loop_filter_chroma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
3119 h264_loop_filter_chroma_c(pix, 1, stride, alpha, beta, tc0);
3122 static inline void h264_loop_filter_chroma_intra_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta)
3125 for( d = 0; d < 8; d++ ) {
3126 const int p0 = pix[-1*xstride];
3127 const int p1 = pix[-2*xstride];
3128 const int q0 = pix[0];
3129 const int q1 = pix[1*xstride];
3131 if( FFABS( p0 - q0 ) < alpha &&
3132 FFABS( p1 - p0 ) < beta &&
3133 FFABS( q1 - q0 ) < beta ) {
3135 pix[-xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; /* p0' */
3136 pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; /* q0' */
3141 static void h264_v_loop_filter_chroma_intra_c(uint8_t *pix, int stride, int alpha, int beta)
3143 h264_loop_filter_chroma_intra_c(pix, stride, 1, alpha, beta);
3145 static void h264_h_loop_filter_chroma_intra_c(uint8_t *pix, int stride, int alpha, int beta)
3147 h264_loop_filter_chroma_intra_c(pix, 1, stride, alpha, beta);
3150 static inline int pix_abs16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
3156 s += abs(pix1[0] - pix2[0]);
3157 s += abs(pix1[1] - pix2[1]);
3158 s += abs(pix1[2] - pix2[2]);
3159 s += abs(pix1[3] - pix2[3]);
3160 s += abs(pix1[4] - pix2[4]);
3161 s += abs(pix1[5] - pix2[5]);
3162 s += abs(pix1[6] - pix2[6]);
3163 s += abs(pix1[7] - pix2[7]);
3164 s += abs(pix1[8] - pix2[8]);
3165 s += abs(pix1[9] - pix2[9]);
3166 s += abs(pix1[10] - pix2[10]);
3167 s += abs(pix1[11] - pix2[11]);
3168 s += abs(pix1[12] - pix2[12]);
3169 s += abs(pix1[13] - pix2[13]);
3170 s += abs(pix1[14] - pix2[14]);
3171 s += abs(pix1[15] - pix2[15]);
3178 static int pix_abs16_x2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
3184 s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
3185 s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
3186 s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
3187 s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
3188 s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
3189 s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
3190 s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
3191 s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
3192 s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
3193 s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
3194 s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
3195 s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
3196 s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
3197 s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
3198 s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
3199 s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
3206 static int pix_abs16_y2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
3209 uint8_t *pix3 = pix2 + line_size;
3213 s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
3214 s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
3215 s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
3216 s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
3217 s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
3218 s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
3219 s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
3220 s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
3221 s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
3222 s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
3223 s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
3224 s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
3225 s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
3226 s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
3227 s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
3228 s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
3236 static int pix_abs16_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
3239 uint8_t *pix3 = pix2 + line_size;
3243 s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
3244 s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
3245 s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
3246 s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
3247 s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
3248 s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
3249 s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
3250 s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
3251 s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
3252 s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
3253 s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
3254 s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
3255 s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
3256 s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
3257 s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
3258 s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
3266 static inline int pix_abs8_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
3272 s += abs(pix1[0] - pix2[0]);
3273 s += abs(pix1[1] - pix2[1]);
3274 s += abs(pix1[2] - pix2[2]);
3275 s += abs(pix1[3] - pix2[3]);
3276 s += abs(pix1[4] - pix2[4]);
3277 s += abs(pix1[5] - pix2[5]);
3278 s += abs(pix1[6] - pix2[6]);
3279 s += abs(pix1[7] - pix2[7]);
3286 static int pix_abs8_x2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
3292 s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
3293 s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
3294 s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
3295 s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
3296 s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
3297 s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
3298 s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
3299 s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
3306 static int pix_abs8_y2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
3309 uint8_t *pix3 = pix2 + line_size;
3313 s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
3314 s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
3315 s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
3316 s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
3317 s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
3318 s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
3319 s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
3320 s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
3328 static int pix_abs8_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
3331 uint8_t *pix3 = pix2 + line_size;
3335 s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
3336 s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
3337 s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
3338 s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
3339 s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
3340 s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
3341 s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
3342 s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
3350 static int nsse16_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
3351 MpegEncContext *c = v;
3357 for(x=0; x<16; x++){
3358 score1+= (s1[x ] - s2[x ])*(s1[x ] - s2[x ]);
3361 for(x=0; x<15; x++){
3362 score2+= FFABS( s1[x ] - s1[x +stride]
3363 - s1[x+1] + s1[x+1+stride])
3364 -FFABS( s2[x ] - s2[x +stride]
3365 - s2[x+1] + s2[x+1+stride]);
3372 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
3373 else return score1 + FFABS(score2)*8;
3376 static int nsse8_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
3377 MpegEncContext *c = v;
3384 score1+= (s1[x ] - s2[x ])*(s1[x ] - s2[x ]);
3388 score2+= FFABS( s1[x ] - s1[x +stride]
3389 - s1[x+1] + s1[x+1+stride])
3390 -FFABS( s2[x ] - s2[x +stride]
3391 - s2[x+1] + s2[x+1+stride]);
3398 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
3399 else return score1 + FFABS(score2)*8;
3402 static int try_8x8basis_c(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
3406 for(i=0; i<8*8; i++){
3407 int b= rem[i] + ((basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT));
3410 assert(-512<b && b<512);
3412 sum += (w*b)*(w*b)>>4;
3417 static void add_8x8basis_c(int16_t rem[64], int16_t basis[64], int scale){
3420 for(i=0; i<8*8; i++){
3421 rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
3426 * permutes an 8x8 block.
3427 * @param block the block which will be permuted according to the given permutation vector
3428 * @param permutation the permutation vector
3429 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3430 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3431 * (inverse) permutated to scantable order!
3433 void ff_block_permute(DCTELEM *block, uint8_t *permutation, const uint8_t *scantable, int last)
3439 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3441 for(i=0; i<=last; i++){
3442 const int j= scantable[i];
3447 for(i=0; i<=last; i++){
3448 const int j= scantable[i];
3449 const int perm_j= permutation[j];
3450 block[perm_j]= temp[j];
3454 static int zero_cmp(void *s, uint8_t *a, uint8_t *b, int stride, int h){
3458 void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){
3461 memset(cmp, 0, sizeof(void*)*6);
3469 cmp[i]= c->hadamard8_diff[i];
3475 cmp[i]= c->dct_sad[i];
3478 cmp[i]= c->dct264_sad[i];
3481 cmp[i]= c->dct_max[i];
3484 cmp[i]= c->quant_psnr[i];
3504 #if CONFIG_SNOW_ENCODER
3513 av_log(NULL, AV_LOG_ERROR,"internal error in cmp function selection\n");
3518 static void clear_block_c(DCTELEM *block)
3520 memset(block, 0, sizeof(DCTELEM)*64);
3524 * memset(blocks, 0, sizeof(DCTELEM)*6*64)
3526 static void clear_blocks_c(DCTELEM *blocks)
3528 memset(blocks, 0, sizeof(DCTELEM)*6*64);
3531 static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
3533 for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
3534 long a = *(long*)(src+i);
3535 long b = *(long*)(dst+i);
3536 *(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
3539 dst[i+0] += src[i+0];
3542 static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
3544 for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
3545 long a = *(long*)(src1+i);
3546 long b = *(long*)(src2+i);
3547 *(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
3550 dst[i] = src1[i]+src2[i];
3553 static void diff_bytes_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
3555 #if !HAVE_FAST_UNALIGNED
3556 if((long)src2 & (sizeof(long)-1)){
3557 for(i=0; i+7<w; i+=8){
3558 dst[i+0] = src1[i+0]-src2[i+0];
3559 dst[i+1] = src1[i+1]-src2[i+1];
3560 dst[i+2] = src1[i+2]-src2[i+2];
3561 dst[i+3] = src1[i+3]-src2[i+3];
3562 dst[i+4] = src1[i+4]-src2[i+4];
3563 dst[i+5] = src1[i+5]-src2[i+5];
3564 dst[i+6] = src1[i+6]-src2[i+6];
3565 dst[i+7] = src1[i+7]-src2[i+7];
3569 for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
3570 long a = *(long*)(src1+i);
3571 long b = *(long*)(src2+i);
3572 *(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
3575 dst[i+0] = src1[i+0]-src2[i+0];
3578 static void add_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1, const uint8_t *diff, int w, int *left, int *left_top){
3586 l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
3595 static void sub_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top){
3603 const int pred= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF);
3613 static int add_hfyu_left_prediction_c(uint8_t *dst, const uint8_t *src, int w, int acc){
3616 for(i=0; i<w-1; i++){