2 * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 #include "libavutil/avutil.h"
28 #include "libavutil/bswap.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/pixdesc.h"
36 #include "swscale_internal.h"
38 #define RGB2YUV_SHIFT 15
39 #define BY ((int)(0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
40 #define BV (-(int)(0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
41 #define BU ((int)(0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
42 #define GY ((int)(0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
43 #define GV (-(int)(0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
44 #define GU (-(int)(0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
45 #define RY ((int)(0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
46 #define RV ((int)(0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
47 #define RU (-(int)(0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
49 #define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
51 #define r ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE) ? b_r : r_b)
52 #define b ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE) ? r_b : b_r)
54 static av_always_inline void rgb48ToY_c_template(uint16_t *dst,
55 const uint16_t *src, int width,
56 enum AVPixelFormat origin)
59 for (i = 0; i < width; i++) {
60 unsigned int r_b = input_pixel(&src[i * 3 + 0]);
61 unsigned int g = input_pixel(&src[i * 3 + 1]);
62 unsigned int b_r = input_pixel(&src[i * 3 + 2]);
64 dst[i] = (RY * r + GY * g + BY * b + (0x2001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
68 static av_always_inline void rgb48ToUV_c_template(uint16_t *dstU,
73 enum AVPixelFormat origin)
77 for (i = 0; i < width; i++) {
78 int r_b = input_pixel(&src1[i * 3 + 0]);
79 int g = input_pixel(&src1[i * 3 + 1]);
80 int b_r = input_pixel(&src1[i * 3 + 2]);
82 dstU[i] = (RU * r + GU * g + BU * b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
83 dstV[i] = (RV * r + GV * g + BV * b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
87 static av_always_inline void rgb48ToUV_half_c_template(uint16_t *dstU,
92 enum AVPixelFormat origin)
96 for (i = 0; i < width; i++) {
97 int r_b = (input_pixel(&src1[6 * i + 0]) +
98 input_pixel(&src1[6 * i + 3]) + 1) >> 1;
99 int g = (input_pixel(&src1[6 * i + 1]) +
100 input_pixel(&src1[6 * i + 4]) + 1) >> 1;
101 int b_r = (input_pixel(&src1[6 * i + 2]) +
102 input_pixel(&src1[6 * i + 5]) + 1) >> 1;
104 dstU[i] = (RU * r + GU * g + BU * b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
105 dstV[i] = (RV * r + GV * g + BV * b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
113 #define rgb48funcs(pattern, BE_LE, origin) \
114 static void pattern ## 48 ## BE_LE ## ToY_c(uint8_t *_dst, \
115 const uint8_t *_src, \
119 const uint16_t *src = (const uint16_t *)_src; \
120 uint16_t *dst = (uint16_t *)_dst; \
121 rgb48ToY_c_template(dst, src, width, origin); \
124 static void pattern ## 48 ## BE_LE ## ToUV_c(uint8_t *_dstU, \
126 const uint8_t *_src1, \
127 const uint8_t *_src2, \
131 const uint16_t *src1 = (const uint16_t *)_src1, \
132 *src2 = (const uint16_t *)_src2; \
133 uint16_t *dstU = (uint16_t *)_dstU, \
134 *dstV = (uint16_t *)_dstV; \
135 rgb48ToUV_c_template(dstU, dstV, src1, src2, width, origin); \
138 static void pattern ## 48 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, \
140 const uint8_t *_src1, \
141 const uint8_t *_src2, \
145 const uint16_t *src1 = (const uint16_t *)_src1, \
146 *src2 = (const uint16_t *)_src2; \
147 uint16_t *dstU = (uint16_t *)_dstU, \
148 *dstV = (uint16_t *)_dstV; \
149 rgb48ToUV_half_c_template(dstU, dstV, src1, src2, width, origin); \
152 rgb48funcs(rgb, LE, AV_PIX_FMT_RGB48LE)
153 rgb48funcs(rgb, BE, AV_PIX_FMT_RGB48BE)
154 rgb48funcs(bgr, LE, AV_PIX_FMT_BGR48LE)
155 rgb48funcs(bgr, BE, AV_PIX_FMT_BGR48BE)
157 #define input_pixel(i) ((origin == AV_PIX_FMT_RGBA || \
158 origin == AV_PIX_FMT_BGRA || \
159 origin == AV_PIX_FMT_ARGB || \
160 origin == AV_PIX_FMT_ABGR) \
161 ? AV_RN32A(&src[(i) * 4]) \
162 : (isBE(origin) ? AV_RB16(&src[(i) * 2]) \
163 : AV_RL16(&src[(i) * 2])))
165 static av_always_inline void rgb16_32ToY_c_template(uint8_t *dst,
168 enum AVPixelFormat origin,
171 int maskr, int maskg,
173 int gsh, int bsh, int S)
175 const int ry = RY << rsh, gy = GY << gsh, by = BY << bsh;
176 const unsigned rnd = 33u << (S - 1);
179 for (i = 0; i < width; i++) {
180 int px = input_pixel(i) >> shp;
181 int b = (px & maskb) >> shb;
182 int g = (px & maskg) >> shg;
183 int r = (px & maskr) >> shr;
185 dst[i] = (ry * r + gy * g + by * b + rnd) >> S;
189 static av_always_inline void rgb16_32ToUV_c_template(uint8_t *dstU,
193 enum AVPixelFormat origin,
196 int maskr, int maskg,
198 int gsh, int bsh, int S)
200 const int ru = RU << rsh, gu = GU << gsh, bu = BU << bsh,
201 rv = RV << rsh, gv = GV << gsh, bv = BV << bsh;
202 const unsigned rnd = 257u << (S - 1);
205 for (i = 0; i < width; i++) {
206 int px = input_pixel(i) >> shp;
207 int b = (px & maskb) >> shb;
208 int g = (px & maskg) >> shg;
209 int r = (px & maskr) >> shr;
211 dstU[i] = (ru * r + gu * g + bu * b + rnd) >> S;
212 dstV[i] = (rv * r + gv * g + bv * b + rnd) >> S;
216 static av_always_inline void rgb16_32ToUV_half_c_template(uint8_t *dstU,
220 enum AVPixelFormat origin,
223 int maskr, int maskg,
225 int gsh, int bsh, int S)
227 const int ru = RU << rsh, gu = GU << gsh, bu = BU << bsh,
228 rv = RV << rsh, gv = GV << gsh, bv = BV << bsh,
229 maskgx = ~(maskr | maskb);
230 const unsigned rnd = 257u << S;
236 for (i = 0; i < width; i++) {
237 int px0 = input_pixel(2 * i + 0) >> shp;
238 int px1 = input_pixel(2 * i + 1) >> shp;
239 int b, r, g = (px0 & maskgx) + (px1 & maskgx);
240 int rb = px0 + px1 - g;
242 b = (rb & maskb) >> shb;
244 origin == AV_PIX_FMT_BGR565LE || origin == AV_PIX_FMT_BGR565BE ||
245 origin == AV_PIX_FMT_RGB565LE || origin == AV_PIX_FMT_RGB565BE) {
248 g = (g & maskg) >> shg;
250 r = (rb & maskr) >> shr;
252 dstU[i] = (ru * r + gu * g + bu * b + rnd) >> (S + 1);
253 dstV[i] = (rv * r + gv * g + bv * b + rnd) >> (S + 1);
259 #define rgb16_32_wrapper(fmt, name, shr, shg, shb, shp, maskr, \
260 maskg, maskb, rsh, gsh, bsh, S) \
261 static void name ## ToY_c(uint8_t *dst, const uint8_t *src, \
262 int width, uint32_t *unused) \
264 rgb16_32ToY_c_template(dst, src, width, fmt, shr, shg, shb, shp, \
265 maskr, maskg, maskb, rsh, gsh, bsh, S); \
268 static void name ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \
269 const uint8_t *src, const uint8_t *dummy, \
270 int width, uint32_t *unused) \
272 rgb16_32ToUV_c_template(dstU, dstV, src, width, fmt, \
273 shr, shg, shb, shp, \
274 maskr, maskg, maskb, rsh, gsh, bsh, S); \
277 static void name ## ToUV_half_c(uint8_t *dstU, uint8_t *dstV, \
278 const uint8_t *src, \
279 const uint8_t *dummy, \
280 int width, uint32_t *unused) \
282 rgb16_32ToUV_half_c_template(dstU, dstV, src, width, fmt, \
283 shr, shg, shb, shp, \
284 maskr, maskg, maskb, \
288 rgb16_32_wrapper(AV_PIX_FMT_BGR32, bgr32, 16, 0, 0, 0, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
289 rgb16_32_wrapper(AV_PIX_FMT_BGR32_1, bgr321, 16, 0, 0, 8, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
290 rgb16_32_wrapper(AV_PIX_FMT_RGB32, rgb32, 0, 0, 16, 0, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
291 rgb16_32_wrapper(AV_PIX_FMT_RGB32_1, rgb321, 0, 0, 16, 8, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
292 rgb16_32_wrapper(AV_PIX_FMT_BGR565LE, bgr16le, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
293 rgb16_32_wrapper(AV_PIX_FMT_BGR555LE, bgr15le, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
294 rgb16_32_wrapper(AV_PIX_FMT_BGR444LE, bgr12le, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
295 rgb16_32_wrapper(AV_PIX_FMT_RGB565LE, rgb16le, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
296 rgb16_32_wrapper(AV_PIX_FMT_RGB555LE, rgb15le, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
297 rgb16_32_wrapper(AV_PIX_FMT_RGB444LE, rgb12le, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
298 rgb16_32_wrapper(AV_PIX_FMT_BGR565BE, bgr16be, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
299 rgb16_32_wrapper(AV_PIX_FMT_BGR555BE, bgr15be, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
300 rgb16_32_wrapper(AV_PIX_FMT_BGR444BE, bgr12be, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
301 rgb16_32_wrapper(AV_PIX_FMT_RGB565BE, rgb16be, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
302 rgb16_32_wrapper(AV_PIX_FMT_RGB555BE, rgb15be, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
303 rgb16_32_wrapper(AV_PIX_FMT_RGB444BE, rgb12be, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
305 static void abgrToA_c(uint8_t *dst, const uint8_t *src, int width,
309 for (i = 0; i < width; i++)
313 static void rgbaToA_c(uint8_t *dst, const uint8_t *src, int width,
317 for (i = 0; i < width; i++)
318 dst[i] = src[4 * i + 3];
321 static void palToY_c(uint8_t *dst, const uint8_t *src, int width, uint32_t *pal)
324 for (i = 0; i < width; i++) {
327 dst[i] = pal[d] & 0xFF;
331 static void palToUV_c(uint8_t *dstU, uint8_t *dstV,
332 const uint8_t *src1, const uint8_t *src2,
333 int width, uint32_t *pal)
336 assert(src1 == src2);
337 for (i = 0; i < width; i++) {
338 int p = pal[src1[i]];
345 static void monowhite2Y_c(uint8_t *dst, const uint8_t *src,
346 int width, uint32_t *unused)
349 width = (width + 7) >> 3;
350 for (i = 0; i < width; i++) {
352 for (j = 0; j < 8; j++)
353 dst[8 * i + j] = ((d >> (7 - j)) & 1) * 255;
357 static void monoblack2Y_c(uint8_t *dst, const uint8_t *src,
358 int width, uint32_t *unused)
361 width = (width + 7) >> 3;
362 for (i = 0; i < width; i++) {
364 for (j = 0; j < 8; j++)
365 dst[8 * i + j] = ((d >> (7 - j)) & 1) * 255;
369 static void yuy2ToY_c(uint8_t *dst, const uint8_t *src, int width,
373 for (i = 0; i < width; i++)
377 static void yuy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
378 const uint8_t *src2, int width, uint32_t *unused)
381 for (i = 0; i < width; i++) {
382 dstU[i] = src1[4 * i + 1];
383 dstV[i] = src1[4 * i + 3];
385 assert(src1 == src2);
388 static void yvy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
389 const uint8_t *src2, int width, uint32_t *unused)
392 for (i = 0; i < width; i++) {
393 dstV[i] = src1[4 * i + 1];
394 dstU[i] = src1[4 * i + 3];
396 assert(src1 == src2);
399 static void bswap16Y_c(uint8_t *_dst, const uint8_t *_src, int width,
403 const uint16_t *src = (const uint16_t *)_src;
404 uint16_t *dst = (uint16_t *)_dst;
405 for (i = 0; i < width; i++)
406 dst[i] = av_bswap16(src[i]);
409 static void bswap16UV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *_src1,
410 const uint8_t *_src2, int width, uint32_t *unused)
413 const uint16_t *src1 = (const uint16_t *)_src1,
414 *src2 = (const uint16_t *)_src2;
415 uint16_t *dstU = (uint16_t *)_dstU, *dstV = (uint16_t *)_dstV;
416 for (i = 0; i < width; i++) {
417 dstU[i] = av_bswap16(src1[i]);
418 dstV[i] = av_bswap16(src2[i]);
422 static void read_ya16le_gray_c(uint8_t *dst, const uint8_t *src, int width,
426 for (i = 0; i < width; i++)
427 AV_WN16(dst + i * 2, AV_RL16(src + i * 4));
430 static void read_ya16le_alpha_c(uint8_t *dst, const uint8_t *src, int width,
434 for (i = 0; i < width; i++)
435 AV_WN16(dst + i * 2, AV_RL16(src + i * 4 + 2));
438 static void read_ya16be_gray_c(uint8_t *dst, const uint8_t *src, int width,
442 for (i = 0; i < width; i++)
443 AV_WN16(dst + i * 2, AV_RB16(src + i * 4));
446 static void read_ya16be_alpha_c(uint8_t *dst, const uint8_t *src, int width,
450 for (i = 0; i < width; i++)
451 AV_WN16(dst + i * 2, AV_RB16(src + i * 4 + 2));
454 /* This is almost identical to the previous, end exists only because
455 * yuy2ToY/UV)(dst, src + 1, ...) would have 100% unaligned accesses. */
456 static void uyvyToY_c(uint8_t *dst, const uint8_t *src, int width,
460 for (i = 0; i < width; i++)
461 dst[i] = src[2 * i + 1];
464 static void uyvyToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
465 const uint8_t *src2, int width, uint32_t *unused)
468 for (i = 0; i < width; i++) {
469 dstU[i] = src1[4 * i + 0];
470 dstV[i] = src1[4 * i + 2];
472 assert(src1 == src2);
475 static av_always_inline void nvXXtoUV_c(uint8_t *dst1, uint8_t *dst2,
476 const uint8_t *src, int width)
479 for (i = 0; i < width; i++) {
480 dst1[i] = src[2 * i + 0];
481 dst2[i] = src[2 * i + 1];
485 static void nv12ToUV_c(uint8_t *dstU, uint8_t *dstV,
486 const uint8_t *src1, const uint8_t *src2,
487 int width, uint32_t *unused)
489 nvXXtoUV_c(dstU, dstV, src1, width);
492 static void nv21ToUV_c(uint8_t *dstU, uint8_t *dstV,
493 const uint8_t *src1, const uint8_t *src2,
494 int width, uint32_t *unused)
496 nvXXtoUV_c(dstV, dstU, src1, width);
499 static void p010LEToY_c(uint8_t *dst, const uint8_t *src, int width, uint32_t *unused)
502 for (i = 0; i < width; i++) {
503 AV_WN16(dst + i * 2, AV_RL16(src + i * 2) >> 6);
507 static void p010BEToY_c(uint8_t *dst, const uint8_t *src, int width, uint32_t *unused)
510 for (i = 0; i < width; i++) {
511 AV_WN16(dst + i * 2, AV_RB16(src + i * 2) >> 6);
515 static void p010LEToUV_c(uint8_t *dstU, uint8_t *dstV,
516 const uint8_t *src1, const uint8_t *src2,
517 int width, uint32_t *unused)
520 for (i = 0; i < width; i++) {
521 AV_WN16(dstU + i * 2, AV_RL16(src1 + i * 4 + 0) >> 6);
522 AV_WN16(dstV + i * 2, AV_RL16(src1 + i * 4 + 2) >> 6);
526 static void p010BEToUV_c(uint8_t *dstU, uint8_t *dstV,
527 const uint8_t *src1, const uint8_t *src2,
528 int width, uint32_t *unused)
531 for (i = 0; i < width; i++) {
532 AV_WN16(dstU + i * 2, AV_RB16(src1 + i * 4 + 0) >> 6);
533 AV_WN16(dstV + i * 2, AV_RB16(src1 + i * 4 + 2) >> 6);
537 #define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
539 static void bgr24ToY_c(uint8_t *dst, const uint8_t *src,
540 int width, uint32_t *unused)
543 for (i = 0; i < width; i++) {
544 int b = src[i * 3 + 0];
545 int g = src[i * 3 + 1];
546 int r = src[i * 3 + 2];
548 dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
552 static void bgr24ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
553 const uint8_t *src2, int width, uint32_t *unused)
556 for (i = 0; i < width; i++) {
557 int b = src1[3 * i + 0];
558 int g = src1[3 * i + 1];
559 int r = src1[3 * i + 2];
561 dstU[i] = (RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
562 dstV[i] = (RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
564 assert(src1 == src2);
567 static void bgr24ToUV_half_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
568 const uint8_t *src2, int width, uint32_t *unused)
571 for (i = 0; i < width; i++) {
572 int b = src1[6 * i + 0] + src1[6 * i + 3];
573 int g = src1[6 * i + 1] + src1[6 * i + 4];
574 int r = src1[6 * i + 2] + src1[6 * i + 5];
576 dstU[i] = (RU * r + GU * g + BU * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
577 dstV[i] = (RV * r + GV * g + BV * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
579 assert(src1 == src2);
582 static void rgb24ToY_c(uint8_t *dst, const uint8_t *src, int width,
586 for (i = 0; i < width; i++) {
587 int r = src[i * 3 + 0];
588 int g = src[i * 3 + 1];
589 int b = src[i * 3 + 2];
591 dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
595 static void rgb24ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
596 const uint8_t *src2, int width, uint32_t *unused)
599 assert(src1 == src2);
600 for (i = 0; i < width; i++) {
601 int r = src1[3 * i + 0];
602 int g = src1[3 * i + 1];
603 int b = src1[3 * i + 2];
605 dstU[i] = (RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
606 dstV[i] = (RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
610 static void rgb24ToUV_half_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
611 const uint8_t *src2, int width, uint32_t *unused)
614 assert(src1 == src2);
615 for (i = 0; i < width; i++) {
616 int r = src1[6 * i + 0] + src1[6 * i + 3];
617 int g = src1[6 * i + 1] + src1[6 * i + 4];
618 int b = src1[6 * i + 2] + src1[6 * i + 5];
620 dstU[i] = (RU * r + GU * g + BU * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
621 dstV[i] = (RV * r + GV * g + BV * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
625 static void planar_rgb_to_y(uint8_t *dst, const uint8_t *src[4], int width)
628 for (i = 0; i < width; i++) {
633 dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
637 static void planar_rgb_to_a(uint8_t *dst, const uint8_t *src[4], int width)
640 for (i = 0; i < width; i++)
644 static void planar_rgb_to_uv(uint8_t *dstU, uint8_t *dstV, const uint8_t *src[4], int width)
647 for (i = 0; i < width; i++) {
652 dstU[i] = (RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
653 dstV[i] = (RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
658 is_be ? AV_RB16(src) : AV_RL16(src)
659 static av_always_inline void planar_rgb16_to_y(uint8_t *_dst, const uint8_t *_src[4],
660 int width, int bpc, int is_be)
663 const uint16_t **src = (const uint16_t **)_src;
664 uint16_t *dst = (uint16_t *)_dst;
665 for (i = 0; i < width; i++) {
666 int g = rdpx(src[0] + i);
667 int b = rdpx(src[1] + i);
668 int r = rdpx(src[2] + i);
670 dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT + bpc - 9))) >> RGB2YUV_SHIFT);
674 static void planar_rgb9le_to_y(uint8_t *dst, const uint8_t *src[4], int w)
676 planar_rgb16_to_y(dst, src, w, 9, 0);
679 static void planar_rgb9be_to_y(uint8_t *dst, const uint8_t *src[4], int w)
681 planar_rgb16_to_y(dst, src, w, 9, 1);
684 static void planar_rgb10le_to_y(uint8_t *dst, const uint8_t *src[4], int w)
686 planar_rgb16_to_y(dst, src, w, 10, 0);
689 static void planar_rgb10be_to_y(uint8_t *dst, const uint8_t *src[4], int w)
691 planar_rgb16_to_y(dst, src, w, 10, 1);
694 static void planar_rgb16le_to_y(uint8_t *dst, const uint8_t *src[4], int w)
696 planar_rgb16_to_y(dst, src, w, 16, 0);
699 static void planar_rgb16be_to_y(uint8_t *dst, const uint8_t *src[4], int w)
701 planar_rgb16_to_y(dst, src, w, 16, 1);
704 static av_always_inline void planar_rgb16_to_uv(uint8_t *_dstU, uint8_t *_dstV,
705 const uint8_t *_src[4], int width,
709 const uint16_t **src = (const uint16_t **)_src;
710 uint16_t *dstU = (uint16_t *)_dstU;
711 uint16_t *dstV = (uint16_t *)_dstV;
712 for (i = 0; i < width; i++) {
713 int g = rdpx(src[0] + i);
714 int b = rdpx(src[1] + i);
715 int r = rdpx(src[2] + i);
717 dstU[i] = (RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> RGB2YUV_SHIFT;
718 dstV[i] = (RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> RGB2YUV_SHIFT;
723 static void planar_rgb9le_to_uv(uint8_t *dstU, uint8_t *dstV,
724 const uint8_t *src[4], int w)
726 planar_rgb16_to_uv(dstU, dstV, src, w, 9, 0);
729 static void planar_rgb9be_to_uv(uint8_t *dstU, uint8_t *dstV,
730 const uint8_t *src[4], int w)
732 planar_rgb16_to_uv(dstU, dstV, src, w, 9, 1);
735 static void planar_rgb10le_to_uv(uint8_t *dstU, uint8_t *dstV,
736 const uint8_t *src[4], int w)
738 planar_rgb16_to_uv(dstU, dstV, src, w, 10, 0);
741 static void planar_rgb10be_to_uv(uint8_t *dstU, uint8_t *dstV,
742 const uint8_t *src[4], int w)
744 planar_rgb16_to_uv(dstU, dstV, src, w, 10, 1);
747 static void planar_rgb16le_to_uv(uint8_t *dstU, uint8_t *dstV,
748 const uint8_t *src[4], int w)
750 planar_rgb16_to_uv(dstU, dstV, src, w, 16, 0);
753 static void planar_rgb16be_to_uv(uint8_t *dstU, uint8_t *dstV,
754 const uint8_t *src[4], int w)
756 planar_rgb16_to_uv(dstU, dstV, src, w, 16, 1);
759 av_cold void ff_sws_init_input_funcs(SwsContext *c)
761 enum AVPixelFormat srcFormat = c->srcFormat;
765 case AV_PIX_FMT_YUYV422:
766 c->chrToYV12 = yuy2ToUV_c;
768 case AV_PIX_FMT_YVYU422:
769 c->chrToYV12 = yvy2ToUV_c;
771 case AV_PIX_FMT_UYVY422:
772 c->chrToYV12 = uyvyToUV_c;
774 case AV_PIX_FMT_NV12:
775 c->chrToYV12 = nv12ToUV_c;
777 case AV_PIX_FMT_NV21:
778 c->chrToYV12 = nv21ToUV_c;
780 case AV_PIX_FMT_RGB8:
781 case AV_PIX_FMT_BGR8:
782 case AV_PIX_FMT_PAL8:
783 case AV_PIX_FMT_BGR4_BYTE:
784 case AV_PIX_FMT_RGB4_BYTE:
785 c->chrToYV12 = palToUV_c;
787 case AV_PIX_FMT_GBRP9LE:
788 c->readChrPlanar = planar_rgb9le_to_uv;
790 case AV_PIX_FMT_GBRP10LE:
791 c->readChrPlanar = planar_rgb10le_to_uv;
793 case AV_PIX_FMT_GBRAP16LE:
794 case AV_PIX_FMT_GBRP16LE:
795 c->readChrPlanar = planar_rgb16le_to_uv;
797 case AV_PIX_FMT_GBRP9BE:
798 c->readChrPlanar = planar_rgb9be_to_uv;
800 case AV_PIX_FMT_GBRP10BE:
801 c->readChrPlanar = planar_rgb10be_to_uv;
803 case AV_PIX_FMT_GBRAP16BE:
804 case AV_PIX_FMT_GBRP16BE:
805 c->readChrPlanar = planar_rgb16be_to_uv;
807 case AV_PIX_FMT_GBRAP:
808 case AV_PIX_FMT_GBRP:
809 c->readChrPlanar = planar_rgb_to_uv;
812 case AV_PIX_FMT_YUV420P9LE:
813 case AV_PIX_FMT_YUV422P9LE:
814 case AV_PIX_FMT_YUV444P9LE:
815 case AV_PIX_FMT_YUV420P10LE:
816 case AV_PIX_FMT_YUV422P10LE:
817 case AV_PIX_FMT_YUV444P10LE:
818 case AV_PIX_FMT_YUV420P12LE:
819 case AV_PIX_FMT_YUV422P12LE:
820 case AV_PIX_FMT_YUV420P16LE:
821 case AV_PIX_FMT_YUV422P16LE:
822 case AV_PIX_FMT_YUV444P16LE:
823 case AV_PIX_FMT_YUVA420P9LE:
824 case AV_PIX_FMT_YUVA422P9LE:
825 case AV_PIX_FMT_YUVA444P9LE:
826 case AV_PIX_FMT_YUVA420P10LE:
827 case AV_PIX_FMT_YUVA422P10LE:
828 case AV_PIX_FMT_YUVA444P10LE:
829 case AV_PIX_FMT_YUVA420P16LE:
830 case AV_PIX_FMT_YUVA422P16LE:
831 case AV_PIX_FMT_YUVA444P16LE:
832 c->chrToYV12 = bswap16UV_c;
835 case AV_PIX_FMT_YUV420P9BE:
836 case AV_PIX_FMT_YUV422P9BE:
837 case AV_PIX_FMT_YUV444P9BE:
838 case AV_PIX_FMT_YUV420P10BE:
839 case AV_PIX_FMT_YUV422P10BE:
840 case AV_PIX_FMT_YUV444P10BE:
841 case AV_PIX_FMT_YUV420P12BE:
842 case AV_PIX_FMT_YUV422P12BE:
843 case AV_PIX_FMT_YUV420P16BE:
844 case AV_PIX_FMT_YUV422P16BE:
845 case AV_PIX_FMT_YUV444P16BE:
846 case AV_PIX_FMT_YUVA420P9BE:
847 case AV_PIX_FMT_YUVA422P9BE:
848 case AV_PIX_FMT_YUVA444P9BE:
849 case AV_PIX_FMT_YUVA420P10BE:
850 case AV_PIX_FMT_YUVA422P10BE:
851 case AV_PIX_FMT_YUVA444P10BE:
852 case AV_PIX_FMT_YUVA420P16BE:
853 case AV_PIX_FMT_YUVA422P16BE:
854 case AV_PIX_FMT_YUVA444P16BE:
855 c->chrToYV12 = bswap16UV_c;
858 case AV_PIX_FMT_P010LE:
859 c->chrToYV12 = p010LEToUV_c;
861 case AV_PIX_FMT_P010BE:
862 c->chrToYV12 = p010BEToUV_c;
865 if (c->chrSrcHSubSample) {
867 case AV_PIX_FMT_RGB48BE:
868 c->chrToYV12 = rgb48BEToUV_half_c;
870 case AV_PIX_FMT_RGB48LE:
871 c->chrToYV12 = rgb48LEToUV_half_c;
873 case AV_PIX_FMT_BGR48BE:
874 c->chrToYV12 = bgr48BEToUV_half_c;
876 case AV_PIX_FMT_BGR48LE:
877 c->chrToYV12 = bgr48LEToUV_half_c;
879 case AV_PIX_FMT_RGB32:
880 c->chrToYV12 = bgr32ToUV_half_c;
882 case AV_PIX_FMT_RGB32_1:
883 c->chrToYV12 = bgr321ToUV_half_c;
885 case AV_PIX_FMT_BGR24:
886 c->chrToYV12 = bgr24ToUV_half_c;
888 case AV_PIX_FMT_BGR565LE:
889 c->chrToYV12 = bgr16leToUV_half_c;
891 case AV_PIX_FMT_BGR565BE:
892 c->chrToYV12 = bgr16beToUV_half_c;
894 case AV_PIX_FMT_BGR555LE:
895 c->chrToYV12 = bgr15leToUV_half_c;
897 case AV_PIX_FMT_BGR555BE:
898 c->chrToYV12 = bgr15beToUV_half_c;
900 case AV_PIX_FMT_BGR444LE:
901 c->chrToYV12 = bgr12leToUV_half_c;
903 case AV_PIX_FMT_BGR444BE:
904 c->chrToYV12 = bgr12beToUV_half_c;
906 case AV_PIX_FMT_BGR32:
907 c->chrToYV12 = rgb32ToUV_half_c;
909 case AV_PIX_FMT_BGR32_1:
910 c->chrToYV12 = rgb321ToUV_half_c;
912 case AV_PIX_FMT_RGB24:
913 c->chrToYV12 = rgb24ToUV_half_c;
915 case AV_PIX_FMT_RGB565LE:
916 c->chrToYV12 = rgb16leToUV_half_c;
918 case AV_PIX_FMT_RGB565BE:
919 c->chrToYV12 = rgb16beToUV_half_c;
921 case AV_PIX_FMT_RGB555LE:
922 c->chrToYV12 = rgb15leToUV_half_c;
924 case AV_PIX_FMT_RGB555BE:
925 c->chrToYV12 = rgb15beToUV_half_c;
927 case AV_PIX_FMT_RGB444LE:
928 c->chrToYV12 = rgb12leToUV_half_c;
930 case AV_PIX_FMT_RGB444BE:
931 c->chrToYV12 = rgb12beToUV_half_c;
936 case AV_PIX_FMT_RGB48BE:
937 c->chrToYV12 = rgb48BEToUV_c;
939 case AV_PIX_FMT_RGB48LE:
940 c->chrToYV12 = rgb48LEToUV_c;
942 case AV_PIX_FMT_BGR48BE:
943 c->chrToYV12 = bgr48BEToUV_c;
945 case AV_PIX_FMT_BGR48LE:
946 c->chrToYV12 = bgr48LEToUV_c;
948 case AV_PIX_FMT_RGB32:
949 c->chrToYV12 = bgr32ToUV_c;
951 case AV_PIX_FMT_RGB32_1:
952 c->chrToYV12 = bgr321ToUV_c;
954 case AV_PIX_FMT_BGR24:
955 c->chrToYV12 = bgr24ToUV_c;
957 case AV_PIX_FMT_BGR565LE:
958 c->chrToYV12 = bgr16leToUV_c;
960 case AV_PIX_FMT_BGR565BE:
961 c->chrToYV12 = bgr16beToUV_c;
963 case AV_PIX_FMT_BGR555LE:
964 c->chrToYV12 = bgr15leToUV_c;
966 case AV_PIX_FMT_BGR555BE:
967 c->chrToYV12 = bgr15beToUV_c;
969 case AV_PIX_FMT_BGR444LE:
970 c->chrToYV12 = bgr12leToUV_c;
972 case AV_PIX_FMT_BGR444BE:
973 c->chrToYV12 = bgr12beToUV_c;
975 case AV_PIX_FMT_BGR32:
976 c->chrToYV12 = rgb32ToUV_c;
978 case AV_PIX_FMT_BGR32_1:
979 c->chrToYV12 = rgb321ToUV_c;
981 case AV_PIX_FMT_RGB24:
982 c->chrToYV12 = rgb24ToUV_c;
984 case AV_PIX_FMT_RGB565LE:
985 c->chrToYV12 = rgb16leToUV_c;
987 case AV_PIX_FMT_RGB565BE:
988 c->chrToYV12 = rgb16beToUV_c;
990 case AV_PIX_FMT_RGB555LE:
991 c->chrToYV12 = rgb15leToUV_c;
993 case AV_PIX_FMT_RGB555BE:
994 c->chrToYV12 = rgb15beToUV_c;
996 case AV_PIX_FMT_RGB444LE:
997 c->chrToYV12 = rgb12leToUV_c;
999 case AV_PIX_FMT_RGB444BE:
1000 c->chrToYV12 = rgb12beToUV_c;
1005 c->lumToYV12 = NULL;
1006 c->alpToYV12 = NULL;
1007 switch (srcFormat) {
1008 case AV_PIX_FMT_GBRP9LE:
1009 c->readLumPlanar = planar_rgb9le_to_y;
1011 case AV_PIX_FMT_GBRP10LE:
1012 c->readLumPlanar = planar_rgb10le_to_y;
1014 case AV_PIX_FMT_GBRAP16LE:
1015 case AV_PIX_FMT_GBRP16LE:
1016 c->readLumPlanar = planar_rgb16le_to_y;
1018 case AV_PIX_FMT_GBRP9BE:
1019 c->readLumPlanar = planar_rgb9be_to_y;
1021 case AV_PIX_FMT_GBRP10BE:
1022 c->readLumPlanar = planar_rgb10be_to_y;
1024 case AV_PIX_FMT_GBRAP16BE:
1025 case AV_PIX_FMT_GBRP16BE:
1026 c->readLumPlanar = planar_rgb16be_to_y;
1028 case AV_PIX_FMT_GBRAP:
1029 c->readAlpPlanar = planar_rgb_to_a;
1030 case AV_PIX_FMT_GBRP:
1031 c->readLumPlanar = planar_rgb_to_y;
1034 case AV_PIX_FMT_YUV420P9LE:
1035 case AV_PIX_FMT_YUV422P9LE:
1036 case AV_PIX_FMT_YUV444P9LE:
1037 case AV_PIX_FMT_YUV420P10LE:
1038 case AV_PIX_FMT_YUV422P10LE:
1039 case AV_PIX_FMT_YUV444P10LE:
1040 case AV_PIX_FMT_YUV420P12LE:
1041 case AV_PIX_FMT_YUV422P12LE:
1042 case AV_PIX_FMT_YUV420P16LE:
1043 case AV_PIX_FMT_YUV422P16LE:
1044 case AV_PIX_FMT_YUV444P16LE:
1045 case AV_PIX_FMT_GRAY16LE:
1046 c->lumToYV12 = bswap16Y_c;
1048 case AV_PIX_FMT_YUVA420P9LE:
1049 case AV_PIX_FMT_YUVA422P9LE:
1050 case AV_PIX_FMT_YUVA444P9LE:
1051 case AV_PIX_FMT_YUVA420P10LE:
1052 case AV_PIX_FMT_YUVA422P10LE:
1053 case AV_PIX_FMT_YUVA444P10LE:
1054 case AV_PIX_FMT_YUVA420P16LE:
1055 case AV_PIX_FMT_YUVA422P16LE:
1056 case AV_PIX_FMT_YUVA444P16LE:
1057 c->lumToYV12 = bswap16Y_c;
1058 c->alpToYV12 = bswap16Y_c;
1061 case AV_PIX_FMT_YUV420P9BE:
1062 case AV_PIX_FMT_YUV422P9BE:
1063 case AV_PIX_FMT_YUV444P9BE:
1064 case AV_PIX_FMT_YUV420P10BE:
1065 case AV_PIX_FMT_YUV422P10BE:
1066 case AV_PIX_FMT_YUV444P10BE:
1067 case AV_PIX_FMT_YUV420P12BE:
1068 case AV_PIX_FMT_YUV422P12BE:
1069 case AV_PIX_FMT_YUV420P16BE:
1070 case AV_PIX_FMT_YUV422P16BE:
1071 case AV_PIX_FMT_YUV444P16BE:
1072 case AV_PIX_FMT_GRAY16BE:
1073 c->lumToYV12 = bswap16Y_c;
1075 case AV_PIX_FMT_YUVA420P9BE:
1076 case AV_PIX_FMT_YUVA422P9BE:
1077 case AV_PIX_FMT_YUVA444P9BE:
1078 case AV_PIX_FMT_YUVA420P10BE:
1079 case AV_PIX_FMT_YUVA422P10BE:
1080 case AV_PIX_FMT_YUVA444P10BE:
1081 case AV_PIX_FMT_YUVA420P16BE:
1082 case AV_PIX_FMT_YUVA422P16BE:
1083 case AV_PIX_FMT_YUVA444P16BE:
1084 c->lumToYV12 = bswap16Y_c;
1085 c->alpToYV12 = bswap16Y_c;
1088 case AV_PIX_FMT_YA16LE:
1089 c->lumToYV12 = read_ya16le_gray_c;
1090 c->alpToYV12 = read_ya16le_alpha_c;
1092 case AV_PIX_FMT_YA16BE:
1093 c->lumToYV12 = read_ya16be_gray_c;
1094 c->alpToYV12 = read_ya16be_alpha_c;
1096 case AV_PIX_FMT_YUYV422:
1097 case AV_PIX_FMT_YVYU422:
1098 case AV_PIX_FMT_YA8:
1099 c->lumToYV12 = yuy2ToY_c;
1101 case AV_PIX_FMT_UYVY422:
1102 c->lumToYV12 = uyvyToY_c;
1104 case AV_PIX_FMT_BGR24:
1105 c->lumToYV12 = bgr24ToY_c;
1107 case AV_PIX_FMT_BGR565LE:
1108 c->lumToYV12 = bgr16leToY_c;
1110 case AV_PIX_FMT_BGR565BE:
1111 c->lumToYV12 = bgr16beToY_c;
1113 case AV_PIX_FMT_BGR555LE:
1114 c->lumToYV12 = bgr15leToY_c;
1116 case AV_PIX_FMT_BGR555BE:
1117 c->lumToYV12 = bgr15beToY_c;
1119 case AV_PIX_FMT_BGR444LE:
1120 c->lumToYV12 = bgr12leToY_c;
1122 case AV_PIX_FMT_BGR444BE:
1123 c->lumToYV12 = bgr12beToY_c;
1125 case AV_PIX_FMT_RGB24:
1126 c->lumToYV12 = rgb24ToY_c;
1128 case AV_PIX_FMT_RGB565LE:
1129 c->lumToYV12 = rgb16leToY_c;
1131 case AV_PIX_FMT_RGB565BE:
1132 c->lumToYV12 = rgb16beToY_c;
1134 case AV_PIX_FMT_RGB555LE:
1135 c->lumToYV12 = rgb15leToY_c;
1137 case AV_PIX_FMT_RGB555BE:
1138 c->lumToYV12 = rgb15beToY_c;
1140 case AV_PIX_FMT_RGB444LE:
1141 c->lumToYV12 = rgb12leToY_c;
1143 case AV_PIX_FMT_RGB444BE:
1144 c->lumToYV12 = rgb12beToY_c;
1146 case AV_PIX_FMT_RGB8:
1147 case AV_PIX_FMT_BGR8:
1148 case AV_PIX_FMT_PAL8:
1149 case AV_PIX_FMT_BGR4_BYTE:
1150 case AV_PIX_FMT_RGB4_BYTE:
1151 c->lumToYV12 = palToY_c;
1153 case AV_PIX_FMT_MONOBLACK:
1154 c->lumToYV12 = monoblack2Y_c;
1156 case AV_PIX_FMT_MONOWHITE:
1157 c->lumToYV12 = monowhite2Y_c;
1159 case AV_PIX_FMT_RGB32:
1160 c->lumToYV12 = bgr32ToY_c;
1162 case AV_PIX_FMT_RGB32_1:
1163 c->lumToYV12 = bgr321ToY_c;
1165 case AV_PIX_FMT_BGR32:
1166 c->lumToYV12 = rgb32ToY_c;
1168 case AV_PIX_FMT_BGR32_1:
1169 c->lumToYV12 = rgb321ToY_c;
1171 case AV_PIX_FMT_RGB48BE:
1172 c->lumToYV12 = rgb48BEToY_c;
1174 case AV_PIX_FMT_RGB48LE:
1175 c->lumToYV12 = rgb48LEToY_c;
1177 case AV_PIX_FMT_BGR48BE:
1178 c->lumToYV12 = bgr48BEToY_c;
1180 case AV_PIX_FMT_BGR48LE:
1181 c->lumToYV12 = bgr48LEToY_c;
1183 case AV_PIX_FMT_P010LE:
1184 c->lumToYV12 = p010LEToY_c;
1186 case AV_PIX_FMT_P010BE:
1187 c->lumToYV12 = p010BEToY_c;
1191 switch (srcFormat) {
1192 case AV_PIX_FMT_BGRA:
1193 case AV_PIX_FMT_RGBA:
1194 c->alpToYV12 = rgbaToA_c;
1196 case AV_PIX_FMT_ABGR:
1197 case AV_PIX_FMT_ARGB:
1198 c->alpToYV12 = abgrToA_c;
1200 case AV_PIX_FMT_YA8:
1201 c->alpToYV12 = uyvyToY_c;