VP8 MBedge loopfilter MMX/MMX2/SSE2 functions for both luma (width=16)
[ffmpeg.git] / libavcodec / x86 / vc1dsp_mmx.c
1 /*
2  * VC-1 and WMV3 - DSP functions MMX-optimized
3  * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
4  *
5  * Permission is hereby granted, free of charge, to any person
6  * obtaining a copy of this software and associated documentation
7  * files (the "Software"), to deal in the Software without
8  * restriction, including without limitation the rights to use,
9  * copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following
12  * conditions:
13  *
14  * The above copyright notice and this permission notice shall be
15  * included in all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
21  * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24  * OTHER DEALINGS IN THE SOFTWARE.
25  */
26
27 #include "libavutil/x86_cpu.h"
28 #include "libavcodec/dsputil.h"
29 #include "dsputil_mmx.h"
30
31 #define OP_PUT(S,D)
32 #define OP_AVG(S,D) "pavgb " #S ", " #D " \n\t"
33
34 /** Add rounder from mm7 to mm3 and pack result at destination */
35 #define NORMALIZE_MMX(SHIFT)                                    \
36      "paddw     %%mm7, %%mm3           \n\t" /* +bias-r */      \
37      "paddw     %%mm7, %%mm4           \n\t" /* +bias-r */      \
38      "psraw     "SHIFT", %%mm3         \n\t"                    \
39      "psraw     "SHIFT", %%mm4         \n\t"
40
41 #define TRANSFER_DO_PACK(OP)                    \
42      "packuswb  %%mm4, %%mm3           \n\t"    \
43      OP((%2), %%mm3)                            \
44      "movq      %%mm3, (%2)            \n\t"
45
46 #define TRANSFER_DONT_PACK(OP)                  \
47      OP(0(%2), %%mm3)                           \
48      OP(8(%2), %%mm4)                           \
49      "movq      %%mm3, 0(%2)           \n\t"    \
50      "movq      %%mm4, 8(%2)           \n\t"
51
52 /** @see MSPEL_FILTER13_CORE for use as UNPACK macro */
53 #define DO_UNPACK(reg)  "punpcklbw %%mm0, " reg "\n\t"
54 #define DONT_UNPACK(reg)
55
56 /** Compute the rounder 32-r or 8-r and unpacks it to mm7 */
57 #define LOAD_ROUNDER_MMX(ROUND)                 \
58      "movd      "ROUND", %%mm7         \n\t"    \
59      "punpcklwd %%mm7, %%mm7           \n\t"    \
60      "punpckldq %%mm7, %%mm7           \n\t"
61
62 #define SHIFT2_LINE(OFF, R0,R1,R2,R3)           \
63     "paddw     %%mm"#R2", %%mm"#R1"    \n\t"    \
64     "movd      (%0,%3), %%mm"#R0"      \n\t"    \
65     "pmullw    %%mm6, %%mm"#R1"        \n\t"    \
66     "punpcklbw %%mm0, %%mm"#R0"        \n\t"    \
67     "movd      (%0,%2), %%mm"#R3"      \n\t"    \
68     "psubw     %%mm"#R0", %%mm"#R1"    \n\t"    \
69     "punpcklbw %%mm0, %%mm"#R3"        \n\t"    \
70     "paddw     %%mm7, %%mm"#R1"        \n\t"    \
71     "psubw     %%mm"#R3", %%mm"#R1"    \n\t"    \
72     "psraw     %4, %%mm"#R1"           \n\t"    \
73     "movq      %%mm"#R1", "#OFF"(%1)   \n\t"    \
74     "add       %2, %0                  \n\t"
75
76 /** Sacrifying mm6 allows to pipeline loads from src */
77 static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
78                                        const uint8_t *src, x86_reg stride,
79                                        int rnd, int64_t shift)
80 {
81     __asm__ volatile(
82         "mov       $3, %%"REG_c"           \n\t"
83         LOAD_ROUNDER_MMX("%5")
84         "movq      "MANGLE(ff_pw_9)", %%mm6 \n\t"
85         "1:                                \n\t"
86         "movd      (%0), %%mm2             \n\t"
87         "add       %2, %0                  \n\t"
88         "movd      (%0), %%mm3             \n\t"
89         "punpcklbw %%mm0, %%mm2            \n\t"
90         "punpcklbw %%mm0, %%mm3            \n\t"
91         SHIFT2_LINE(  0, 1, 2, 3, 4)
92         SHIFT2_LINE( 24, 2, 3, 4, 1)
93         SHIFT2_LINE( 48, 3, 4, 1, 2)
94         SHIFT2_LINE( 72, 4, 1, 2, 3)
95         SHIFT2_LINE( 96, 1, 2, 3, 4)
96         SHIFT2_LINE(120, 2, 3, 4, 1)
97         SHIFT2_LINE(144, 3, 4, 1, 2)
98         SHIFT2_LINE(168, 4, 1, 2, 3)
99         "sub       %6, %0                  \n\t"
100         "add       $8, %1                  \n\t"
101         "dec       %%"REG_c"               \n\t"
102         "jnz 1b                            \n\t"
103         : "+r"(src), "+r"(dst)
104         : "r"(stride), "r"(-2*stride),
105           "m"(shift), "m"(rnd), "r"(9*stride-4)
106         : "%"REG_c, "memory"
107     );
108 }
109
110 /**
111  * Data is already unpacked, so some operations can directly be made from
112  * memory.
113  */
114 #define VC1_HOR_16b_SHIFT2(OP, OPNAME)\
115 static void OPNAME ## vc1_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,\
116                                              const int16_t *src, int rnd)\
117 {\
118     int h = 8;\
119 \
120     src -= 1;\
121     rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */\
122     __asm__ volatile(\
123         LOAD_ROUNDER_MMX("%4")\
124         "movq      "MANGLE(ff_pw_128)", %%mm6\n\t"\
125         "movq      "MANGLE(ff_pw_9)", %%mm5 \n\t"\
126         "1:                                \n\t"\
127         "movq      2*0+0(%1), %%mm1        \n\t"\
128         "movq      2*0+8(%1), %%mm2        \n\t"\
129         "movq      2*1+0(%1), %%mm3        \n\t"\
130         "movq      2*1+8(%1), %%mm4        \n\t"\
131         "paddw     2*3+0(%1), %%mm1        \n\t"\
132         "paddw     2*3+8(%1), %%mm2        \n\t"\
133         "paddw     2*2+0(%1), %%mm3        \n\t"\
134         "paddw     2*2+8(%1), %%mm4        \n\t"\
135         "pmullw    %%mm5, %%mm3            \n\t"\
136         "pmullw    %%mm5, %%mm4            \n\t"\
137         "psubw     %%mm1, %%mm3            \n\t"\
138         "psubw     %%mm2, %%mm4            \n\t"\
139         NORMALIZE_MMX("$7")\
140         /* Remove bias */\
141         "paddw     %%mm6, %%mm3            \n\t"\
142         "paddw     %%mm6, %%mm4            \n\t"\
143         TRANSFER_DO_PACK(OP)\
144         "add       $24, %1                 \n\t"\
145         "add       %3, %2                  \n\t"\
146         "decl      %0                      \n\t"\
147         "jnz 1b                            \n\t"\
148         : "+r"(h), "+r" (src),  "+r" (dst)\
149         : "r"(stride), "m"(rnd)\
150         : "memory"\
151     );\
152 }
153
154 VC1_HOR_16b_SHIFT2(OP_PUT, put_)
155 VC1_HOR_16b_SHIFT2(OP_AVG, avg_)
156
157
158 /**
159  * Purely vertical or horizontal 1/2 shift interpolation.
160  * Sacrify mm6 for *9 factor.
161  */
162 #define VC1_SHIFT2(OP, OPNAME)\
163 static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\
164                                      x86_reg stride, int rnd, x86_reg offset)\
165 {\
166     rnd = 8-rnd;\
167     __asm__ volatile(\
168         "mov       $8, %%"REG_c"           \n\t"\
169         LOAD_ROUNDER_MMX("%5")\
170         "movq      "MANGLE(ff_pw_9)", %%mm6\n\t"\
171         "1:                                \n\t"\
172         "movd      0(%0   ), %%mm3         \n\t"\
173         "movd      4(%0   ), %%mm4         \n\t"\
174         "movd      0(%0,%2), %%mm1         \n\t"\
175         "movd      4(%0,%2), %%mm2         \n\t"\
176         "add       %2, %0                  \n\t"\
177         "punpcklbw %%mm0, %%mm3            \n\t"\
178         "punpcklbw %%mm0, %%mm4            \n\t"\
179         "punpcklbw %%mm0, %%mm1            \n\t"\
180         "punpcklbw %%mm0, %%mm2            \n\t"\
181         "paddw     %%mm1, %%mm3            \n\t"\
182         "paddw     %%mm2, %%mm4            \n\t"\
183         "movd      0(%0,%3), %%mm1         \n\t"\
184         "movd      4(%0,%3), %%mm2         \n\t"\
185         "pmullw    %%mm6, %%mm3            \n\t" /* 0,9,9,0*/\
186         "pmullw    %%mm6, %%mm4            \n\t" /* 0,9,9,0*/\
187         "punpcklbw %%mm0, %%mm1            \n\t"\
188         "punpcklbw %%mm0, %%mm2            \n\t"\
189         "psubw     %%mm1, %%mm3            \n\t" /*-1,9,9,0*/\
190         "psubw     %%mm2, %%mm4            \n\t" /*-1,9,9,0*/\
191         "movd      0(%0,%2), %%mm1         \n\t"\
192         "movd      4(%0,%2), %%mm2         \n\t"\
193         "punpcklbw %%mm0, %%mm1            \n\t"\
194         "punpcklbw %%mm0, %%mm2            \n\t"\
195         "psubw     %%mm1, %%mm3            \n\t" /*-1,9,9,-1*/\
196         "psubw     %%mm2, %%mm4            \n\t" /*-1,9,9,-1*/\
197         NORMALIZE_MMX("$4")\
198         "packuswb  %%mm4, %%mm3            \n\t"\
199         OP((%1), %%mm3)\
200         "movq      %%mm3, (%1)             \n\t"\
201         "add       %6, %0                  \n\t"\
202         "add       %4, %1                  \n\t"\
203         "dec       %%"REG_c"               \n\t"\
204         "jnz 1b                            \n\t"\
205         : "+r"(src),  "+r"(dst)\
206         : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\
207           "g"(stride-offset)\
208         : "%"REG_c, "memory"\
209     );\
210 }
211
212 VC1_SHIFT2(OP_PUT, put_)
213 VC1_SHIFT2(OP_AVG, avg_)
214
215 /**
216  * Filter coefficients made global to allow access by all 1 or 3 quarter shift
217  * interpolation functions.
218  */
219 DECLARE_ASM_CONST(16, uint64_t, ff_pw_53) = 0x0035003500350035ULL;
220 DECLARE_ASM_CONST(16, uint64_t, ff_pw_18) = 0x0012001200120012ULL;
221
222 /**
223  * Core of the 1/4 and 3/4 shift bicubic interpolation.
224  *
225  * @param UNPACK  Macro unpacking arguments from 8 to 16bits (can be empty).
226  * @param MOVQ    "movd 1" or "movq 2", if data read is already unpacked.
227  * @param A1      Address of 1st tap (beware of unpacked/packed).
228  * @param A2      Address of 2nd tap
229  * @param A3      Address of 3rd tap
230  * @param A4      Address of 4th tap
231  */
232 #define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4)       \
233      MOVQ "*0+"A1", %%mm1       \n\t"                           \
234      MOVQ "*4+"A1", %%mm2       \n\t"                           \
235      UNPACK("%%mm1")                                            \
236      UNPACK("%%mm2")                                            \
237      "pmullw    "MANGLE(ff_pw_3)", %%mm1\n\t"                   \
238      "pmullw    "MANGLE(ff_pw_3)", %%mm2\n\t"                   \
239      MOVQ "*0+"A2", %%mm3       \n\t"                           \
240      MOVQ "*4+"A2", %%mm4       \n\t"                           \
241      UNPACK("%%mm3")                                            \
242      UNPACK("%%mm4")                                            \
243      "pmullw    %%mm6, %%mm3    \n\t" /* *18 */                 \
244      "pmullw    %%mm6, %%mm4    \n\t" /* *18 */                 \
245      "psubw     %%mm1, %%mm3    \n\t" /* 18,-3 */               \
246      "psubw     %%mm2, %%mm4    \n\t" /* 18,-3 */               \
247      MOVQ "*0+"A4", %%mm1       \n\t"                           \
248      MOVQ "*4+"A4", %%mm2       \n\t"                           \
249      UNPACK("%%mm1")                                            \
250      UNPACK("%%mm2")                                            \
251      "psllw     $2, %%mm1       \n\t" /* 4* */                  \
252      "psllw     $2, %%mm2       \n\t" /* 4* */                  \
253      "psubw     %%mm1, %%mm3    \n\t" /* -4,18,-3 */            \
254      "psubw     %%mm2, %%mm4    \n\t" /* -4,18,-3 */            \
255      MOVQ "*0+"A3", %%mm1       \n\t"                           \
256      MOVQ "*4+"A3", %%mm2       \n\t"                           \
257      UNPACK("%%mm1")                                            \
258      UNPACK("%%mm2")                                            \
259      "pmullw    %%mm5, %%mm1    \n\t" /* *53 */                 \
260      "pmullw    %%mm5, %%mm2    \n\t" /* *53 */                 \
261      "paddw     %%mm1, %%mm3    \n\t" /* 4,53,18,-3 */          \
262      "paddw     %%mm2, %%mm4    \n\t" /* 4,53,18,-3 */
263
264 /**
265  * Macro to build the vertical 16bits version of vc1_put_shift[13].
266  * Here, offset=src_stride. Parameters passed A1 to A4 must use
267  * %3 (src_stride) and %4 (3*src_stride).
268  *
269  * @param  NAME   Either 1 or 3
270  * @see MSPEL_FILTER13_CORE for information on A1->A4
271  */
272 #define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4)                    \
273 static void                                                             \
274 vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src,      \
275                                  x86_reg src_stride,                   \
276                                  int rnd, int64_t shift)                \
277 {                                                                       \
278     int h = 8;                                                          \
279     src -= src_stride;                                                  \
280     __asm__ volatile(                                                       \
281         LOAD_ROUNDER_MMX("%5")                                          \
282         "movq      "MANGLE(ff_pw_53)", %%mm5\n\t"                       \
283         "movq      "MANGLE(ff_pw_18)", %%mm6\n\t"                       \
284         ASMALIGN(3)                                                     \
285         "1:                        \n\t"                                \
286         MSPEL_FILTER13_CORE(DO_UNPACK, "movd  1", A1, A2, A3, A4)       \
287         NORMALIZE_MMX("%6")                                             \
288         TRANSFER_DONT_PACK(OP_PUT)                                      \
289         /* Last 3 (in fact 4) bytes on the line */                      \
290         "movd      8+"A1", %%mm1   \n\t"                                \
291         DO_UNPACK("%%mm1")                                              \
292         "movq      %%mm1, %%mm3    \n\t"                                \
293         "paddw     %%mm1, %%mm1    \n\t"                                \
294         "paddw     %%mm3, %%mm1    \n\t" /* 3* */                       \
295         "movd      8+"A2", %%mm3   \n\t"                                \
296         DO_UNPACK("%%mm3")                                              \
297         "pmullw    %%mm6, %%mm3    \n\t" /* *18 */                      \
298         "psubw     %%mm1, %%mm3    \n\t" /*18,-3 */                     \
299         "movd      8+"A3", %%mm1   \n\t"                                \
300         DO_UNPACK("%%mm1")                                              \
301         "pmullw    %%mm5, %%mm1    \n\t" /* *53 */                      \
302         "paddw     %%mm1, %%mm3    \n\t" /*53,18,-3 */                  \
303         "movd      8+"A4", %%mm1   \n\t"                                \
304         DO_UNPACK("%%mm1")                                              \
305         "psllw     $2, %%mm1       \n\t" /* 4* */                       \
306         "psubw     %%mm1, %%mm3    \n\t"                                \
307         "paddw     %%mm7, %%mm3    \n\t"                                \
308         "psraw     %6, %%mm3       \n\t"                                \
309         "movq      %%mm3, 16(%2)   \n\t"                                \
310         "add       %3, %1          \n\t"                                \
311         "add       $24, %2         \n\t"                                \
312         "decl      %0              \n\t"                                \
313         "jnz 1b                    \n\t"                                \
314         : "+r"(h), "+r" (src),  "+r" (dst)                              \
315         : "r"(src_stride), "r"(3*src_stride),                           \
316           "m"(rnd), "m"(shift)                                          \
317         : "memory"                                                      \
318     );                                                                  \
319 }
320
321 /**
322  * Macro to build the horizontal 16bits version of vc1_put_shift[13].
323  * Here, offset=16bits, so parameters passed A1 to A4 should be simple.
324  *
325  * @param  NAME   Either 1 or 3
326  * @see MSPEL_FILTER13_CORE for information on A1->A4
327  */
328 #define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4, OP, OPNAME)        \
329 static void                                                             \
330 OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride,    \
331                                  const int16_t *src, int rnd)           \
332 {                                                                       \
333     int h = 8;                                                          \
334     src -= 1;                                                           \
335     rnd -= (-4+58+13-3)*256; /* Add -256 bias */                        \
336     __asm__ volatile(                                                       \
337         LOAD_ROUNDER_MMX("%4")                                          \
338         "movq      "MANGLE(ff_pw_18)", %%mm6   \n\t"                    \
339         "movq      "MANGLE(ff_pw_53)", %%mm5   \n\t"                    \
340         ASMALIGN(3)                                                     \
341         "1:                        \n\t"                                \
342         MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4)      \
343         NORMALIZE_MMX("$7")                                             \
344         /* Remove bias */                                               \
345         "paddw     "MANGLE(ff_pw_128)", %%mm3  \n\t"                    \
346         "paddw     "MANGLE(ff_pw_128)", %%mm4  \n\t"                    \
347         TRANSFER_DO_PACK(OP)                                            \
348         "add       $24, %1         \n\t"                                \
349         "add       %3, %2          \n\t"                                \
350         "decl      %0              \n\t"                                \
351         "jnz 1b                    \n\t"                                \
352         : "+r"(h), "+r" (src),  "+r" (dst)                              \
353         : "r"(stride), "m"(rnd)                                         \
354         : "memory"                                                      \
355     );                                                                  \
356 }
357
358 /**
359  * Macro to build the 8bits, any direction, version of vc1_put_shift[13].
360  * Here, offset=src_stride. Parameters passed A1 to A4 must use
361  * %3 (offset) and %4 (3*offset).
362  *
363  * @param  NAME   Either 1 or 3
364  * @see MSPEL_FILTER13_CORE for information on A1->A4
365  */
366 #define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4, OP, OPNAME)             \
367 static void                                                             \
368 OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src,         \
369                         x86_reg stride, int rnd, x86_reg offset)      \
370 {                                                                       \
371     int h = 8;                                                          \
372     src -= offset;                                                      \
373     rnd = 32-rnd;                                                       \
374     __asm__ volatile (                                                      \
375         LOAD_ROUNDER_MMX("%6")                                          \
376         "movq      "MANGLE(ff_pw_53)", %%mm5       \n\t"                \
377         "movq      "MANGLE(ff_pw_18)", %%mm6       \n\t"                \
378         ASMALIGN(3)                                                     \
379         "1:                        \n\t"                                \
380         MSPEL_FILTER13_CORE(DO_UNPACK, "movd   1", A1, A2, A3, A4)      \
381         NORMALIZE_MMX("$6")                                             \
382         TRANSFER_DO_PACK(OP)                                            \
383         "add       %5, %1          \n\t"                                \
384         "add       %5, %2          \n\t"                                \
385         "decl      %0              \n\t"                                \
386         "jnz 1b                    \n\t"                                \
387         : "+r"(h), "+r" (src),  "+r" (dst)                              \
388         : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd)             \
389         : "memory"                                                      \
390     );                                                                  \
391 }
392
393 /** 1/4 shift bicubic interpolation */
394 MSPEL_FILTER13_8B     (shift1, "0(%1,%4  )", "0(%1,%3,2)", "0(%1,%3  )", "0(%1     )", OP_PUT, put_)
395 MSPEL_FILTER13_8B     (shift1, "0(%1,%4  )", "0(%1,%3,2)", "0(%1,%3  )", "0(%1     )", OP_AVG, avg_)
396 MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4  )", "0(%1,%3,2)", "0(%1,%3  )", "0(%1     )")
397 MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_PUT, put_)
398 MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_AVG, avg_)
399
400 /** 3/4 shift bicubic interpolation */
401 MSPEL_FILTER13_8B     (shift3, "0(%1     )", "0(%1,%3  )", "0(%1,%3,2)", "0(%1,%4  )", OP_PUT, put_)
402 MSPEL_FILTER13_8B     (shift3, "0(%1     )", "0(%1,%3  )", "0(%1,%3,2)", "0(%1,%4  )", OP_AVG, avg_)
403 MSPEL_FILTER13_VER_16B(shift3, "0(%1     )", "0(%1,%3  )", "0(%1,%3,2)", "0(%1,%4  )")
404 MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_PUT, put_)
405 MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_AVG, avg_)
406
407 typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, x86_reg src_stride, int rnd, int64_t shift);
408 typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, x86_reg dst_stride, const int16_t *src, int rnd);
409 typedef void (*vc1_mspel_mc_filter_8bits)(uint8_t *dst, const uint8_t *src, x86_reg stride, int rnd, x86_reg offset);
410
411 /**
412  * Interpolate fractional pel values by applying proper vertical then
413  * horizontal filter.
414  *
415  * @param  dst     Destination buffer for interpolated pels.
416  * @param  src     Source buffer.
417  * @param  stride  Stride for both src and dst buffers.
418  * @param  hmode   Horizontal filter (expressed in quarter pixels shift).
419  * @param  hmode   Vertical filter.
420  * @param  rnd     Rounding bias.
421  */
422 #define VC1_MSPEL_MC(OP)\
423 static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\
424                                int hmode, int vmode, int rnd)\
425 {\
426     static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =\
427          { NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };\
428     static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =\
429          { NULL, OP ## vc1_hor_16b_shift1_mmx, OP ## vc1_hor_16b_shift2_mmx, OP ## vc1_hor_16b_shift3_mmx };\
430     static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =\
431          { NULL, OP ## vc1_shift1_mmx, OP ## vc1_shift2_mmx, OP ## vc1_shift3_mmx };\
432 \
433     __asm__ volatile(\
434         "pxor %%mm0, %%mm0         \n\t"\
435         ::: "memory"\
436     );\
437 \
438     if (vmode) { /* Vertical filter to apply */\
439         if (hmode) { /* Horizontal filter to apply, output to tmp */\
440             static const int shift_value[] = { 0, 5, 1, 5 };\
441             int              shift = (shift_value[hmode]+shift_value[vmode])>>1;\
442             int              r;\
443             DECLARE_ALIGNED(16, int16_t, tmp)[12*8];\
444 \
445             r = (1<<(shift-1)) + rnd-1;\
446             vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\
447 \
448             vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\
449             return;\
450         }\
451         else { /* No horizontal filter, output 8 lines to dst */\
452             vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);\
453             return;\
454         }\
455     }\
456 \
457     /* Horizontal mode with no vertical mode */\
458     vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);\
459 }
460
461 VC1_MSPEL_MC(put_)
462 VC1_MSPEL_MC(avg_)
463
464 /** Macro to ease bicubic filter interpolation functions declarations */
465 #define DECLARE_FUNCTION(a, b)                                          \
466 static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
467      put_vc1_mspel_mc(dst, src, stride, a, b, rnd);                     \
468 }\
469 static void avg_vc1_mspel_mc ## a ## b ## _mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
470      avg_vc1_mspel_mc(dst, src, stride, a, b, rnd);                     \
471 }
472
473 DECLARE_FUNCTION(0, 1)
474 DECLARE_FUNCTION(0, 2)
475 DECLARE_FUNCTION(0, 3)
476
477 DECLARE_FUNCTION(1, 0)
478 DECLARE_FUNCTION(1, 1)
479 DECLARE_FUNCTION(1, 2)
480 DECLARE_FUNCTION(1, 3)
481
482 DECLARE_FUNCTION(2, 0)
483 DECLARE_FUNCTION(2, 1)
484 DECLARE_FUNCTION(2, 2)
485 DECLARE_FUNCTION(2, 3)
486
487 DECLARE_FUNCTION(3, 0)
488 DECLARE_FUNCTION(3, 1)
489 DECLARE_FUNCTION(3, 2)
490 DECLARE_FUNCTION(3, 3)
491
492 static void vc1_inv_trans_4x4_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
493 {
494     int dc = block[0];
495     dc = (17 * dc +  4) >> 3;
496     dc = (17 * dc + 64) >> 7;
497     __asm__ volatile(
498         "movd          %0, %%mm0 \n\t"
499         "pshufw $0, %%mm0, %%mm0 \n\t"
500         "pxor       %%mm1, %%mm1 \n\t"
501         "psubw      %%mm0, %%mm1 \n\t"
502         "packuswb   %%mm0, %%mm0 \n\t"
503         "packuswb   %%mm1, %%mm1 \n\t"
504         ::"r"(dc)
505     );
506     __asm__ volatile(
507         "movd          %0, %%mm2 \n\t"
508         "movd          %1, %%mm3 \n\t"
509         "movd          %2, %%mm4 \n\t"
510         "movd          %3, %%mm5 \n\t"
511         "paddusb    %%mm0, %%mm2 \n\t"
512         "paddusb    %%mm0, %%mm3 \n\t"
513         "paddusb    %%mm0, %%mm4 \n\t"
514         "paddusb    %%mm0, %%mm5 \n\t"
515         "psubusb    %%mm1, %%mm2 \n\t"
516         "psubusb    %%mm1, %%mm3 \n\t"
517         "psubusb    %%mm1, %%mm4 \n\t"
518         "psubusb    %%mm1, %%mm5 \n\t"
519         "movd       %%mm2, %0    \n\t"
520         "movd       %%mm3, %1    \n\t"
521         "movd       %%mm4, %2    \n\t"
522         "movd       %%mm5, %3    \n\t"
523         :"+m"(*(uint32_t*)(dest+0*linesize)),
524          "+m"(*(uint32_t*)(dest+1*linesize)),
525          "+m"(*(uint32_t*)(dest+2*linesize)),
526          "+m"(*(uint32_t*)(dest+3*linesize))
527     );
528 }
529
530 static void vc1_inv_trans_4x8_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
531 {
532     int dc = block[0];
533     dc = (17 * dc +  4) >> 3;
534     dc = (12 * dc + 64) >> 7;
535     __asm__ volatile(
536         "movd          %0, %%mm0 \n\t"
537         "pshufw $0, %%mm0, %%mm0 \n\t"
538         "pxor       %%mm1, %%mm1 \n\t"
539         "psubw      %%mm0, %%mm1 \n\t"
540         "packuswb   %%mm0, %%mm0 \n\t"
541         "packuswb   %%mm1, %%mm1 \n\t"
542         ::"r"(dc)
543     );
544     __asm__ volatile(
545         "movd          %0, %%mm2 \n\t"
546         "movd          %1, %%mm3 \n\t"
547         "movd          %2, %%mm4 \n\t"
548         "movd          %3, %%mm5 \n\t"
549         "paddusb    %%mm0, %%mm2 \n\t"
550         "paddusb    %%mm0, %%mm3 \n\t"
551         "paddusb    %%mm0, %%mm4 \n\t"
552         "paddusb    %%mm0, %%mm5 \n\t"
553         "psubusb    %%mm1, %%mm2 \n\t"
554         "psubusb    %%mm1, %%mm3 \n\t"
555         "psubusb    %%mm1, %%mm4 \n\t"
556         "psubusb    %%mm1, %%mm5 \n\t"
557         "movd       %%mm2, %0    \n\t"
558         "movd       %%mm3, %1    \n\t"
559         "movd       %%mm4, %2    \n\t"
560         "movd       %%mm5, %3    \n\t"
561         :"+m"(*(uint32_t*)(dest+0*linesize)),
562          "+m"(*(uint32_t*)(dest+1*linesize)),
563          "+m"(*(uint32_t*)(dest+2*linesize)),
564          "+m"(*(uint32_t*)(dest+3*linesize))
565     );
566     dest += 4*linesize;
567     __asm__ volatile(
568         "movd          %0, %%mm2 \n\t"
569         "movd          %1, %%mm3 \n\t"
570         "movd          %2, %%mm4 \n\t"
571         "movd          %3, %%mm5 \n\t"
572         "paddusb    %%mm0, %%mm2 \n\t"
573         "paddusb    %%mm0, %%mm3 \n\t"
574         "paddusb    %%mm0, %%mm4 \n\t"
575         "paddusb    %%mm0, %%mm5 \n\t"
576         "psubusb    %%mm1, %%mm2 \n\t"
577         "psubusb    %%mm1, %%mm3 \n\t"
578         "psubusb    %%mm1, %%mm4 \n\t"
579         "psubusb    %%mm1, %%mm5 \n\t"
580         "movd       %%mm2, %0    \n\t"
581         "movd       %%mm3, %1    \n\t"
582         "movd       %%mm4, %2    \n\t"
583         "movd       %%mm5, %3    \n\t"
584         :"+m"(*(uint32_t*)(dest+0*linesize)),
585          "+m"(*(uint32_t*)(dest+1*linesize)),
586          "+m"(*(uint32_t*)(dest+2*linesize)),
587          "+m"(*(uint32_t*)(dest+3*linesize))
588     );
589 }
590
591 static void vc1_inv_trans_8x4_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
592 {
593     int dc = block[0];
594     dc = ( 3 * dc +  1) >> 1;
595     dc = (17 * dc + 64) >> 7;
596     __asm__ volatile(
597         "movd          %0, %%mm0 \n\t"
598         "pshufw $0, %%mm0, %%mm0 \n\t"
599         "pxor       %%mm1, %%mm1 \n\t"
600         "psubw      %%mm0, %%mm1 \n\t"
601         "packuswb   %%mm0, %%mm0 \n\t"
602         "packuswb   %%mm1, %%mm1 \n\t"
603         ::"r"(dc)
604     );
605     __asm__ volatile(
606         "movq          %0, %%mm2 \n\t"
607         "movq          %1, %%mm3 \n\t"
608         "movq          %2, %%mm4 \n\t"
609         "movq          %3, %%mm5 \n\t"
610         "paddusb    %%mm0, %%mm2 \n\t"
611         "paddusb    %%mm0, %%mm3 \n\t"
612         "paddusb    %%mm0, %%mm4 \n\t"
613         "paddusb    %%mm0, %%mm5 \n\t"
614         "psubusb    %%mm1, %%mm2 \n\t"
615         "psubusb    %%mm1, %%mm3 \n\t"
616         "psubusb    %%mm1, %%mm4 \n\t"
617         "psubusb    %%mm1, %%mm5 \n\t"
618         "movq       %%mm2, %0    \n\t"
619         "movq       %%mm3, %1    \n\t"
620         "movq       %%mm4, %2    \n\t"
621         "movq       %%mm5, %3    \n\t"
622         :"+m"(*(uint32_t*)(dest+0*linesize)),
623          "+m"(*(uint32_t*)(dest+1*linesize)),
624          "+m"(*(uint32_t*)(dest+2*linesize)),
625          "+m"(*(uint32_t*)(dest+3*linesize))
626     );
627 }
628
629 static void vc1_inv_trans_8x8_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
630 {
631     int dc = block[0];
632     dc = (3 * dc +  1) >> 1;
633     dc = (3 * dc + 16) >> 5;
634     __asm__ volatile(
635         "movd          %0, %%mm0 \n\t"
636         "pshufw $0, %%mm0, %%mm0 \n\t"
637         "pxor       %%mm1, %%mm1 \n\t"
638         "psubw      %%mm0, %%mm1 \n\t"
639         "packuswb   %%mm0, %%mm0 \n\t"
640         "packuswb   %%mm1, %%mm1 \n\t"
641         ::"r"(dc)
642     );
643     __asm__ volatile(
644         "movq          %0, %%mm2 \n\t"
645         "movq          %1, %%mm3 \n\t"
646         "movq          %2, %%mm4 \n\t"
647         "movq          %3, %%mm5 \n\t"
648         "paddusb    %%mm0, %%mm2 \n\t"
649         "paddusb    %%mm0, %%mm3 \n\t"
650         "paddusb    %%mm0, %%mm4 \n\t"
651         "paddusb    %%mm0, %%mm5 \n\t"
652         "psubusb    %%mm1, %%mm2 \n\t"
653         "psubusb    %%mm1, %%mm3 \n\t"
654         "psubusb    %%mm1, %%mm4 \n\t"
655         "psubusb    %%mm1, %%mm5 \n\t"
656         "movq       %%mm2, %0    \n\t"
657         "movq       %%mm3, %1    \n\t"
658         "movq       %%mm4, %2    \n\t"
659         "movq       %%mm5, %3    \n\t"
660         :"+m"(*(uint32_t*)(dest+0*linesize)),
661          "+m"(*(uint32_t*)(dest+1*linesize)),
662          "+m"(*(uint32_t*)(dest+2*linesize)),
663          "+m"(*(uint32_t*)(dest+3*linesize))
664     );
665     dest += 4*linesize;
666     __asm__ volatile(
667         "movq          %0, %%mm2 \n\t"
668         "movq          %1, %%mm3 \n\t"
669         "movq          %2, %%mm4 \n\t"
670         "movq          %3, %%mm5 \n\t"
671         "paddusb    %%mm0, %%mm2 \n\t"
672         "paddusb    %%mm0, %%mm3 \n\t"
673         "paddusb    %%mm0, %%mm4 \n\t"
674         "paddusb    %%mm0, %%mm5 \n\t"
675         "psubusb    %%mm1, %%mm2 \n\t"
676         "psubusb    %%mm1, %%mm3 \n\t"
677         "psubusb    %%mm1, %%mm4 \n\t"
678         "psubusb    %%mm1, %%mm5 \n\t"
679         "movq       %%mm2, %0    \n\t"
680         "movq       %%mm3, %1    \n\t"
681         "movq       %%mm4, %2    \n\t"
682         "movq       %%mm5, %3    \n\t"
683         :"+m"(*(uint32_t*)(dest+0*linesize)),
684          "+m"(*(uint32_t*)(dest+1*linesize)),
685          "+m"(*(uint32_t*)(dest+2*linesize)),
686          "+m"(*(uint32_t*)(dest+3*linesize))
687     );
688 }
689
690 #define LOOP_FILTER(EXT) \
691 void ff_vc1_v_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \
692 void ff_vc1_h_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \
693 void ff_vc1_v_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \
694 void ff_vc1_h_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \
695 \
696 static void vc1_v_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \
697 { \
698     ff_vc1_v_loop_filter8_ ## EXT(src,   stride, pq); \
699     ff_vc1_v_loop_filter8_ ## EXT(src+8, stride, pq); \
700 } \
701 \
702 static void vc1_h_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \
703 { \
704     ff_vc1_h_loop_filter8_ ## EXT(src,          stride, pq); \
705     ff_vc1_h_loop_filter8_ ## EXT(src+8*stride, stride, pq); \
706 }
707
708 #if HAVE_YASM
709 LOOP_FILTER(mmx)
710 LOOP_FILTER(mmx2)
711 LOOP_FILTER(sse2)
712 LOOP_FILTER(ssse3)
713
714 void ff_vc1_h_loop_filter8_sse4(uint8_t *src, int stride, int pq);
715
716 static void vc1_h_loop_filter16_sse4(uint8_t *src, int stride, int pq)
717 {
718     ff_vc1_h_loop_filter8_sse4(src,          stride, pq);
719     ff_vc1_h_loop_filter8_sse4(src+8*stride, stride, pq);
720 }
721 #endif
722
723 void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx) {
724     mm_flags = mm_support();
725
726     dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_vc1_mspel_mc00_mmx;
727     dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_mmx;
728     dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_mmx;
729     dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_mmx;
730
731     dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_mmx;
732     dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_mmx;
733     dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_mmx;
734     dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_mmx;
735
736     dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_mmx;
737     dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_mmx;
738     dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_mmx;
739     dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_mmx;
740
741     dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_mmx;
742     dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_mmx;
743     dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_mmx;
744     dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_mmx;
745
746     if (mm_flags & FF_MM_MMX2){
747         dsp->avg_vc1_mspel_pixels_tab[ 0] = ff_avg_vc1_mspel_mc00_mmx2;
748         dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_mmx2;
749         dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_mmx2;
750         dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_mmx2;
751
752         dsp->avg_vc1_mspel_pixels_tab[ 1] = avg_vc1_mspel_mc10_mmx2;
753         dsp->avg_vc1_mspel_pixels_tab[ 5] = avg_vc1_mspel_mc11_mmx2;
754         dsp->avg_vc1_mspel_pixels_tab[ 9] = avg_vc1_mspel_mc12_mmx2;
755         dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_mmx2;
756
757         dsp->avg_vc1_mspel_pixels_tab[ 2] = avg_vc1_mspel_mc20_mmx2;
758         dsp->avg_vc1_mspel_pixels_tab[ 6] = avg_vc1_mspel_mc21_mmx2;
759         dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_mmx2;
760         dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_mmx2;
761
762         dsp->avg_vc1_mspel_pixels_tab[ 3] = avg_vc1_mspel_mc30_mmx2;
763         dsp->avg_vc1_mspel_pixels_tab[ 7] = avg_vc1_mspel_mc31_mmx2;
764         dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_mmx2;
765         dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_mmx2;
766
767         dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_mmx2;
768         dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_mmx2;
769         dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_mmx2;
770         dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_mmx2;
771     }
772
773 #define ASSIGN_LF(EXT) \
774         dsp->vc1_v_loop_filter4  = ff_vc1_v_loop_filter4_ ## EXT; \
775         dsp->vc1_h_loop_filter4  = ff_vc1_h_loop_filter4_ ## EXT; \
776         dsp->vc1_v_loop_filter8  = ff_vc1_v_loop_filter8_ ## EXT; \
777         dsp->vc1_h_loop_filter8  = ff_vc1_h_loop_filter8_ ## EXT; \
778         dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_ ## EXT; \
779         dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_ ## EXT
780
781 #if HAVE_YASM
782     if (mm_flags & FF_MM_MMX) {
783         ASSIGN_LF(mmx);
784     }
785     return;
786     if (mm_flags & FF_MM_MMX2) {
787         ASSIGN_LF(mmx2);
788     }
789     if (mm_flags & FF_MM_SSE2) {
790         dsp->vc1_v_loop_filter8  = ff_vc1_v_loop_filter8_sse2;
791         dsp->vc1_h_loop_filter8  = ff_vc1_h_loop_filter8_sse2;
792         dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_sse2;
793         dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse2;
794     }
795     if (mm_flags & FF_MM_SSSE3) {
796         ASSIGN_LF(ssse3);
797     }
798     if (mm_flags & FF_MM_SSE4) {
799         dsp->vc1_h_loop_filter8  = ff_vc1_h_loop_filter8_sse4;
800         dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse4;
801     }
802 #endif
803 }