/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/mpegvideo.h"
int ff_pix_abs16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
int ff_pix_abs16_x2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
int ff_pix_abs16_y2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
int ff_pix_abs8_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
int ff_sse16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
av_cold void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx)
{
/*
- * This file is part of Libav.
+ * DSP utils
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
+#include "libavutil/internal.h"
#include "avcodec.h"
#include "copy_block.h"
#include "simple_idct.h"
uint32_t ff_square_tab[512] = { 0, };
static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
uint32_t *sq = ff_square_tab + 256;
s += sq[pix1[1] - pix2[1]];
s += sq[pix1[2] - pix2[2]];
s += sq[pix1[3] - pix2[3]];
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
uint32_t *sq = ff_square_tab + 256;
s += sq[pix1[5] - pix2[5]];
s += sq[pix1[6] - pix2[6]];
s += sq[pix1[7] - pix2[7]];
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int sse16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
uint32_t *sq = ff_square_tab + 256;
s += sq[pix1[14] - pix2[14]];
s += sq[pix1[15] - pix2[15]];
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
#define avg4(a, b, c, d) ((a + b + c + d + 2) >> 2)
static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
s += abs(pix1[13] - pix2[13]);
s += abs(pix1[14] - pix2[14]);
s += abs(pix1[15] - pix2[15]);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
for (i = 0; i < h; i++) {
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
- pix1 += line_size;
- pix2 += line_size;
- pix3 += line_size;
+ pix1 += stride;
+ pix2 += stride;
+ pix3 += stride;
}
return s;
}
static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
for (i = 0; i < h; i++) {
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
- pix1 += line_size;
- pix2 += line_size;
- pix3 += line_size;
+ pix1 += stride;
+ pix2 += stride;
+ pix3 += stride;
}
return s;
}
static inline int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
s += abs(pix1[5] - pix2[5]);
s += abs(pix1[6] - pix2[6]);
s += abs(pix1[7] - pix2[7]);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
for (i = 0; i < h; i++) {
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
- pix1 += line_size;
- pix2 += line_size;
- pix3 += line_size;
+ pix1 += stride;
+ pix2 += stride;
+ pix3 += stride;
}
return s;
}
static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
for (i = 0; i < h; i++) {
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
- pix1 += line_size;
- pix2 += line_size;
- pix3 += line_size;
+ pix1 += stride;
+ pix2 += stride;
+ pix3 += stride;
}
return s;
}
- static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
+ static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
+ ptrdiff_t stride, int h)
{
int score1 = 0, score2 = 0, x, y;
return score1 + FFABS(score2) * 8;
}
- static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
+ static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
+ ptrdiff_t stride, int h)
{
int score1 = 0, score2 = 0, x, y;
}
static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
return 0;
}
case FF_CMP_NSSE:
cmp[i] = c->nsse[i];
break;
+#if CONFIG_DWT
+ case FF_CMP_W53:
+ cmp[i]= c->w53[i];
+ break;
+ case FF_CMP_W97:
+ cmp[i]= c->w97[i];
+ break;
+#endif
default:
av_log(NULL, AV_LOG_ERROR,
"internal error in cmp function selection\n");
#define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst,
- uint8_t *src, int stride, int h)
+ uint8_t *src, ptrdiff_t stride, int h)
{
int i, temp[64], sum = 0;
- assert(h == 8);
+ av_assert2(h == 8);
for (i = 0; i < 8; i++) {
// FIXME: try pointer walks
}
static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src,
- uint8_t *dummy, int stride, int h)
+ uint8_t *dummy, ptrdiff_t stride, int h)
{
int i, temp[64], sum = 0;
- assert(h == 8);
+ av_assert2(h == 8);
for (i = 0; i < 8; i++) {
// FIXME: try pointer walks
}
static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1,
- uint8_t *src2, int stride, int h)
+ uint8_t *src2, ptrdiff_t stride, int h)
{
LOCAL_ALIGNED_16(int16_t, temp, [64]);
- assert(h == 8);
+ av_assert2(h == 8);
s->pdsp.diff_pixels(temp, src1, src2, stride);
s->fdsp.fdct(temp);
}
static int dct264_sad8x8_c(MpegEncContext *s, uint8_t *src1,
- uint8_t *src2, int stride, int h)
+ uint8_t *src2, ptrdiff_t stride, int h)
{
int16_t dct[8][8];
int i, sum = 0;
#endif
static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1,
- uint8_t *src2, int stride, int h)
+ uint8_t *src2, ptrdiff_t stride, int h)
{
LOCAL_ALIGNED_16(int16_t, temp, [64]);
int sum = 0, i;
- assert(h == 8);
+ av_assert2(h == 8);
s->pdsp.diff_pixels(temp, src1, src2, stride);
s->fdsp.fdct(temp);
}
static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1,
- uint8_t *src2, int stride, int h)
+ uint8_t *src2, ptrdiff_t stride, int h)
{
LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
int16_t *const bak = temp + 64;
int sum = 0, i;
- assert(h == 8);
+ av_assert2(h == 8);
s->mb_intra = 0;
s->pdsp.diff_pixels(temp, src1, src2, stride);
}
static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
const uint8_t *scantable = s->intra_scantable.permutated;
LOCAL_ALIGNED_16(int16_t, temp, [64]);
const int esc_length = s->ac_esc_length;
uint8_t *length, *last_length;
- assert(h == 8);
+ av_assert2(h == 8);
copy_block8(lsrc1, src1, 8, stride, 8);
copy_block8(lsrc2, src2, 8, stride, 8);
level = temp[i] + 64;
- assert(level - 64);
+ av_assert2(level - 64);
if ((level & (~127)) == 0) {
bits += last_length[UNI_AC_ENC_INDEX(run, level)];
}
static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
const uint8_t *scantable = s->intra_scantable.permutated;
LOCAL_ALIGNED_16(int16_t, temp, [64]);
const int esc_length = s->ac_esc_length;
uint8_t *length, *last_length;
- assert(h == 8);
+ av_assert2(h == 8);
s->pdsp.diff_pixels(temp, src1, src2, stride);
level = temp[i] + 64;
- assert(level - 64);
+ av_assert2(level - 64);
if ((level & (~127)) == 0)
bits += last_length[UNI_AC_ENC_INDEX(run, level)];
#define VSAD_INTRA(size) \
static int vsad_intra ## size ## _c(MpegEncContext *c, \
uint8_t *s, uint8_t *dummy, \
- int stride, int h) \
+ ptrdiff_t stride, int h) \
{ \
int score = 0, x, y; \
\
VSAD_INTRA(8)
VSAD_INTRA(16)
-static int vsad16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
- ptrdiff_t stride, int h)
-{
- int score = 0, x, y;
-
- for (y = 1; y < h; y++) {
- for (x = 0; x < 16; x++)
- score += FFABS(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]);
- s1 += stride;
- s2 += stride;
- }
-
- return score;
+#define VSAD(size) \
+static int vsad ## size ## _c(MpegEncContext *c, \
+ uint8_t *s1, uint8_t *s2, \
- int stride, int h) \
++ ptrdiff_t stride, int h) \
+{ \
+ int score = 0, x, y; \
+ \
+ for (y = 1; y < h; y++) { \
+ for (x = 0; x < size; x++) \
+ score += FFABS(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
+ s1 += stride; \
+ s2 += stride; \
+ } \
+ \
+ return score; \
}
+VSAD(8)
+VSAD(16)
#define SQ(a) ((a) * (a))
#define VSSE_INTRA(size) \
static int vsse_intra ## size ## _c(MpegEncContext *c, \
uint8_t *s, uint8_t *dummy, \
- int stride, int h) \
+ ptrdiff_t stride, int h) \
{ \
int score = 0, x, y; \
\
VSSE_INTRA(8)
VSSE_INTRA(16)
-static int vsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
- ptrdiff_t stride, int h)
-{
- int score = 0, x, y;
-
- for (y = 1; y < h; y++) {
- for (x = 0; x < 16; x++)
- score += SQ(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]);
- s1 += stride;
- s2 += stride;
- }
-
- return score;
+#define VSSE(size) \
+static int vsse ## size ## _c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, \
- int stride, int h) \
++ ptrdiff_t stride, int h) \
+{ \
+ int score = 0, x, y; \
+ \
+ for (y = 1; y < h; y++) { \
+ for (x = 0; x < size; x++) \
+ score += SQ(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
+ s1 += stride; \
+ s2 += stride; \
+ } \
+ \
+ return score; \
}
+VSSE(8)
+VSSE(16)
#define WRAPPER8_16_SQ(name8, name16) \
static int name16(MpegEncContext *s, uint8_t *dst, uint8_t *src, \
- int stride, int h) \
+ ptrdiff_t stride, int h) \
{ \
int score = 0; \
\
ff_square_tab[i] = (i - 256) * (i - 256);
}
+int ff_check_alignment(void)
+{
+ static int did_fail = 0;
+ LOCAL_ALIGNED_16(int, aligned, [4]);
+
+ if ((intptr_t)aligned & 15) {
+ if (!did_fail) {
+#if HAVE_MMX || HAVE_ALTIVEC
+ av_log(NULL, AV_LOG_ERROR,
+ "Compiler did not align stack variables. Libavcodec has been miscompiled\n"
+ "and may be very slow or crash. This is not a bug in libavcodec,\n"
+ "but in the compiler. You may try recompiling using gcc >= 4.2.\n"
+ "Do not report crashes to FFmpeg developers.\n");
+#endif
+ did_fail=1;
+ }
+ return -1;
+ }
+ return 0;
+}
+
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
{
+ ff_check_alignment();
+
c->sum_abs_dctelem = sum_abs_dctelem_c;
/* TODO [0] 16 [1] 8 */
SET_CMP_FUNC(rd)
SET_CMP_FUNC(bit)
c->vsad[0] = vsad16_c;
+ c->vsad[1] = vsad8_c;
c->vsad[4] = vsad_intra16_c;
c->vsad[5] = vsad_intra8_c;
c->vsse[0] = vsse16_c;
+ c->vsse[1] = vsse8_c;
c->vsse[4] = vsse_intra16_c;
c->vsse[5] = vsse_intra8_c;
c->nsse[0] = nsse16_c;
c->nsse[1] = nsse8_c;
+#if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
+ ff_dsputil_init_dwt(c);
+#endif
+ if (ARCH_ALPHA)
+ ff_me_cmp_init_alpha(c, avctx);
if (ARCH_ARM)
ff_me_cmp_init_arm(c, avctx);
if (ARCH_PPC)
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
extern uint32_t ff_square_tab[512];
+
+/* minimum alignment rules ;)
+ * If you notice errors in the align stuff, need more alignment for some ASM code
+ * for some CPU or need to use a function with less aligned data then send a mail
+ * to the ffmpeg-devel mailing list, ...
+ *
+ * !warning These alignments might not match reality, (missing attribute((align))
+ * stuff somewhere possible).
+ * I (Michael) did not check them, these are just the alignments which I think
+ * could be reached easily ...
+ *
+ * !future video codecs might need functions with less strict alignment
+ */
+
struct MpegEncContext;
/* Motion estimation:
* h is limited to { width / 2, width, 2 * width },
* width < 8 are neither used nor implemented. */
typedef int (*me_cmp_func)(struct MpegEncContext *c,
uint8_t *blk1 /* align width (8 or 16) */,
- uint8_t *blk2 /* align 1 */, int line_size, int h);
+ uint8_t *blk2 /* align 1 */, ptrdiff_t stride,
+ int h);
typedef struct MECmpContext {
int (*sum_abs_dctelem)(int16_t *block /* align 16 */);
me_cmp_func vsad[6];
me_cmp_func vsse[6];
me_cmp_func nsse[6];
+ me_cmp_func w53[6];
+ me_cmp_func w97[6];
me_cmp_func dct_max[6];
me_cmp_func dct264_sad[6];
void ff_me_cmp_init_static(void);
+int ff_check_alignment(void);
+
void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx);
+void ff_me_cmp_init_alpha(MECmpContext *c, AVCodecContext *avctx);
void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx);
void ff_me_cmp_init_ppc(MECmpContext *c, AVCodecContext *avctx);
void ff_me_cmp_init_x86(MECmpContext *c, AVCodecContext *avctx);
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type);
+void ff_dsputil_init_dwt(MECmpContext *c);
+
#endif /* AVCODEC_ME_CMP_H */
*
* new motion estimation (X1/EPZS) by Michael Niedermayer <michaelni@gmx.at>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
uint8_t * const * const src= c->src[src_index];
int d;
//FIXME check chroma 4mv, (no crashes ...)
- assert(x >= c->xmin && hx <= c->xmax<<(qpel+1) && y >= c->ymin && hy <= c->ymax<<(qpel+1));
+ av_assert2(x >= c->xmin && hx <= c->xmax<<(qpel+1) && y >= c->ymin && hy <= c->ymax<<(qpel+1));
if(x >= c->xmin && hx <= c->xmax<<(qpel+1) && y >= c->ymin && hy <= c->ymax<<(qpel+1)){
const int time_pp= s->pp_time;
const int time_pb= s->pb_time;
c->qpel_avg[1][bxy](c->temp + 8*stride, ref[8] + (bx>>2) + (by>>2)*stride + 8*stride, stride);
c->qpel_avg[1][bxy](c->temp + 8 + 8*stride, ref[8] + (bx>>2) + (by>>2)*stride + 8 + 8*stride, stride);
}else{
- assert((fx>>1) + 16*s->mb_x >= -16);
- assert((fy>>1) + 16*s->mb_y >= -16);
- assert((fx>>1) + 16*s->mb_x <= s->width);
- assert((fy>>1) + 16*s->mb_y <= s->height);
- assert((bx>>1) + 16*s->mb_x >= -16);
- assert((by>>1) + 16*s->mb_y >= -16);
- assert((bx>>1) + 16*s->mb_x <= s->width);
- assert((by>>1) + 16*s->mb_y <= s->height);
+ av_assert2((fx>>1) + 16*s->mb_x >= -16);
+ av_assert2((fy>>1) + 16*s->mb_y >= -16);
+ av_assert2((fx>>1) + 16*s->mb_x <= s->width);
+ av_assert2((fy>>1) + 16*s->mb_y <= s->height);
+ av_assert2((bx>>1) + 16*s->mb_x >= -16);
+ av_assert2((by>>1) + 16*s->mb_y >= -16);
+ av_assert2((bx>>1) + 16*s->mb_x <= s->width);
+ av_assert2((by>>1) + 16*s->mb_y <= s->height);
c->hpel_put[0][fxy](c->temp, ref[0] + (fx>>1) + (fy>>1)*stride, stride, 16);
c->hpel_avg[0][bxy](c->temp, ref[8] + (bx>>1) + (by>>1)*stride, stride, 16);
#include "motion_est_template.c"
static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
-- int stride, int h)
++ ptrdiff_t stride, int h)
{
return 0;
}
int cache_size= FFMIN(ME_MAP_SIZE>>ME_MAP_SHIFT, 1<<ME_MAP_SHIFT);
int dia_size= FFMAX(FFABS(s->avctx->dia_size)&255, FFABS(s->avctx->pre_dia_size)&255);
- if(FFMIN(s->avctx->dia_size, s->avctx->pre_dia_size) < -ME_MAP_SIZE){
+ if(FFMIN(s->avctx->dia_size, s->avctx->pre_dia_size) < -FFMIN(ME_MAP_SIZE, MAX_SAB_SIZE)){
av_log(s->avctx, AV_LOG_ERROR, "ME_MAP size is too small for SAB diamond\n");
return -1;
}
- if (s->me_method != ME_ZERO &&
- s->me_method != ME_EPZS &&
- s->me_method != ME_X1) {
+ //special case of snow is needed because snow uses its own iterative ME code
+ if(s->me_method!=ME_ZERO && s->me_method!=ME_EPZS && s->me_method!=ME_X1 && s->avctx->codec_id != AV_CODEC_ID_SNOW){
av_log(s->avctx, AV_LOG_ERROR, "me_method is only allowed to be set to zero and epzs; for hex,umh,full and others see dia_size\n");
return -1;
}
/* 8x8 fullpel search would need a 4x4 chroma compare, which we do
* not have yet, and even if we had, the motion estimation code
* does not expect it. */
- if ((c->avctx->me_cmp & FF_CMP_CHROMA) /* && !s->mecc.me_cmp[2] */)
- s->mecc.me_cmp[2] = zero_cmp;
- if ((c->avctx->me_sub_cmp & FF_CMP_CHROMA) && !s->mecc.me_sub_cmp[2])
- s->mecc.me_sub_cmp[2] = zero_cmp;
- c->hpel_put[2][0]= c->hpel_put[2][1]=
- c->hpel_put[2][2]= c->hpel_put[2][3]= zero_hpel;
+ if (s->codec_id != AV_CODEC_ID_SNOW) {
+ if ((c->avctx->me_cmp & FF_CMP_CHROMA) /* && !s->mecc.me_cmp[2] */)
+ s->mecc.me_cmp[2] = zero_cmp;
+ if ((c->avctx->me_sub_cmp & FF_CMP_CHROMA) && !s->mecc.me_sub_cmp[2])
+ s->mecc.me_sub_cmp[2] = zero_cmp;
+ c->hpel_put[2][0]= c->hpel_put[2][1]=
+ c->hpel_put[2][2]= c->hpel_put[2][3]= zero_hpel;
+ }
if(s->codec_id == AV_CODEC_ID_H261){
c->sub_motion_search= no_sub_motion_search;
int mx, my, dminh;
uint8_t *pix, *ptr;
int stride= c->stride;
- const int flags= c->sub_flags;
LOAD_COMMON
- assert(flags == 0);
+ av_assert2(c->sub_flags == 0);
if(c->skip){
*mx_ptr = 0;
{
MotionEstContext * const c= &s->me;
int range= c->avctx->me_range >> (1 + !!(c->flags&FLAG_QPEL));
+ int max_range = MAX_MV >> (1 + !!(c->flags&FLAG_QPEL));
/*
if(c->avctx->me_range) c->range= c->avctx->me_range >> 1;
else c->range= 16;
if (s->unrestricted_mv) {
c->xmin = - x - 16;
c->ymin = - y - 16;
- c->xmax = - x + s->mb_width *16;
- c->ymax = - y + s->mb_height*16;
+ c->xmax = - x + s->width;
+ c->ymax = - y + s->height;
} else if (s->out_format == FMT_H261){
// Search range of H261 is different from other codec standards
c->xmin = (x > 15) ? - 15 : 0;
c->xmax = - x + s->mb_width *16 - 16;
c->ymax = - y + s->mb_height*16 - 16;
}
+ if(!range || range > max_range)
+ range = max_range;
if(range){
c->xmin = FFMAX(c->xmin,-range);
c->xmax = FFMIN(c->xmax, range);
const int h=8;
int block;
int P[10][2];
- int dmin_sum=0, mx4_sum=0, my4_sum=0;
+ int dmin_sum=0, mx4_sum=0, my4_sum=0, i;
int same=1;
const int stride= c->stride;
uint8_t *mv_penalty= c->current_mv_penalty;
+ int saftey_cliping= s->unrestricted_mv && (s->width&15) && (s->height&15);
init_mv4_ref(c);
const int mot_stride = s->b8_stride;
const int mot_xy = s->block_index[block];
+ if(saftey_cliping){
+ c->xmax = - 16*s->mb_x + s->width - 8*(block &1);
+ c->ymax = - 16*s->mb_y + s->height - 8*(block>>1);
+ }
+
P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0];
P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1];
}
P_MV1[0]= mx;
P_MV1[1]= my;
+ if(saftey_cliping)
+ for(i=1; i<10; i++){
+ if (s->first_slice_line && block<2 && i>1 && i<9)
+ continue;
+ if (i>4 && i<9)
+ continue;
+ if(P[i][0] > (c->xmax<<shift)) P[i][0]= (c->xmax<<shift);
+ if(P[i][1] > (c->ymax<<shift)) P[i][1]= (c->ymax<<shift);
+ }
dmin4 = epzs_motion_search4(s, &mx4, &my4, P, block, block, s->p_mv_table, (1<<16)>>shift);
int16_t (*mv_table)[2]= mv_tables[block][field_select];
if(user_field_select){
- assert(field_select==0 || field_select==1);
- assert(field_select_tables[block][xy]==0 || field_select_tables[block][xy]==1);
+ av_assert1(field_select==0 || field_select==1);
+ av_assert1(field_select_tables[block][xy]==0 || field_select_tables[block][xy]==1);
if(field_select_tables[block][xy] != field_select)
continue;
}
return lambda>>FF_LAMBDA_SHIFT;
case FF_CMP_DCT:
return (3*lambda)>>(FF_LAMBDA_SHIFT+1);
+ case FF_CMP_W53:
+ return (4*lambda)>>(FF_LAMBDA_SHIFT);
+ case FF_CMP_W97:
+ return (2*lambda)>>(FF_LAMBDA_SHIFT);
case FF_CMP_SATD:
case FF_CMP_DCT264:
return (2*lambda)>>FF_LAMBDA_SHIFT;
init_ref(c, s->new_picture.f->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0);
- assert(s->quarter_sample==0 || s->quarter_sample==1);
- assert(s->linesize == c->stride);
- assert(s->uvlinesize == c->uvstride);
+ av_assert0(s->quarter_sample==0 || s->quarter_sample==1);
+ av_assert0(s->linesize == c->stride);
+ av_assert0(s->uvlinesize == c->uvstride);
c->penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_cmp);
c->sub_penalty_factor= get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_sub_cmp);
const int xy= mb_x + mb_y*s->mb_stride;
init_ref(c, s->new_picture.f->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0);
- assert(s->quarter_sample==0 || s->quarter_sample==1);
+ av_assert0(s->quarter_sample==0 || s->quarter_sample==1);
c->pre_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_pre_cmp);
c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_MV;
if(s->mv_type == MV_TYPE_16X16) break;
}
- assert(xmax <= 15 && ymax <= 15 && xmin >= -16 && ymin >= -16);
+ av_assert2(xmax <= 15 && ymax <= 15 && xmin >= -16 && ymin >= -16);
if(xmax < 0 || xmin >0 || ymax < 0 || ymin > 0){
s->b_direct_mv_table[mot_xy][0]= 0;
MotionEstContext * const c= &s->me;
const int f_code= s->f_code;
int y, range;
- assert(s->pict_type==AV_PICTURE_TYPE_P);
+ av_assert0(s->pict_type==AV_PICTURE_TYPE_P);
range = (((s->out_format == FMT_MPEG1 || s->msmpeg4_version) ? 8 : 16) << f_code);
- assert(range <= 16 || !s->msmpeg4_version);
- assert(range <=256 || !(s->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->avctx->strict_std_compliance >= FF_COMPLIANCE_NORMAL));
+ av_assert0(range <= 16 || !s->msmpeg4_version);
+ av_assert0(range <=256 || !(s->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->avctx->strict_std_compliance >= FF_COMPLIANCE_NORMAL));
if(c->avctx->me_range && range > c->avctx->me_range) range= c->avctx->me_range;
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/me_cmp.h"
#if HAVE_ALTIVEC
+
+#if HAVE_BIGENDIAN
+#define GET_PERM(per1, per2, pix) {\
+ per1 = vec_lvsl(0, pix);\
+ per2 = vec_add(per1, vec_splat_u8(1));\
+}
+#define LOAD_PIX(v, iv, pix, per1, per2) {\
+ vector unsigned char pix2l = vec_ld(0, pix);\
+ vector unsigned char pix2r = vec_ld(16, pix);\
+ v = vec_perm(pix2l, pix2r, per1);\
+ iv = vec_perm(pix2l, pix2r, per2);\
+}
+#else
+#define GET_PERM(per1, per2, pix) {}
+#define LOAD_PIX(v, iv, pix, per1, per2) {\
+ v = vec_vsx_ld(0, pix);\
+ iv = vec_vsx_ld(1, pix);\
+}
+#endif
static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
- int i, s = 0;
+ int i;
+ int __attribute__((aligned(16))) s = 0;
const vector unsigned char zero =
(const vector unsigned char) vec_splat_u8(0);
- vector unsigned char perm1 = vec_lvsl(0, pix2);
- vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
+ vector unsigned char perm1, perm2, pix2v, pix2iv;
+ GET_PERM(perm1, perm2, pix2);
for (i = 0; i < h; i++) {
/* Read unaligned pixels into our vectors. The vectors are as follows:
* pix1v: pix1[0] - pix1[15]
* pix2v: pix2[0] - pix2[15] pix2iv: pix2[1] - pix2[16] */
vector unsigned char pix1v = vec_ld(0, pix1);
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(16, pix2);
- vector unsigned char pix2v = vec_perm(pix2l, pix2r, perm1);
- vector unsigned char pix2iv = vec_perm(pix2l, pix2r, perm2);
+ LOAD_PIX(pix2v, pix2iv, pix2, perm1, perm2);
/* Calculate the average vector. */
vector unsigned char avgv = vec_avg(pix2v, pix2iv);
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
}
static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
- int i, s = 0;
+ int i;
+ int __attribute__((aligned(16))) s = 0;
const vector unsigned char zero =
(const vector unsigned char) vec_splat_u8(0);
- vector unsigned char perm = vec_lvsl(0, pix2);
vector unsigned char pix1v, pix3v, avgv, t5;
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
- uint8_t *pix3 = pix2 + line_size;
+
+ uint8_t *pix3 = pix2 + stride;
- /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
+ /* Due to the fact that pix3 = pix2 + stride, the pix3 of one
* iteration becomes pix2 in the next iteration. We can use this
* fact to avoid a potentially expensive unaligned read, each
* time around the loop.
* Read unaligned pixels into our vectors. The vectors are as follows:
* pix2v: pix2[0] - pix2[15]
* Split the pixel vectors into shorts. */
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(15, pix2);
- vector unsigned char pix2v = vec_perm(pix2l, pix2r, perm);
+ vector unsigned char pix2v = VEC_LD(0, pix2);
for (i = 0; i < h; i++) {
/* Read unaligned pixels into our vectors. The vectors are as follows:
* pix1v: pix1[0] - pix1[15]
* pix3v: pix3[0] - pix3[15] */
pix1v = vec_ld(0, pix1);
-
- pix2l = vec_ld(0, pix3);
- pix2r = vec_ld(15, pix3);
- pix3v = vec_perm(pix2l, pix2r, perm);
+ pix3v = VEC_LD(0, pix3);
/* Calculate the average vector. */
avgv = vec_avg(pix2v, pix3v);
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
- pix1 += line_size;
+ pix1 += stride;
pix2v = pix3v;
- pix3 += line_size;
+ pix3 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
}
static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
- int i, s = 0;
+ int i;
+ int __attribute__((aligned(16))) s = 0;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
const vector unsigned char zero =
(const vector unsigned char) vec_splat_u8(0);
const vector unsigned short two =
(const vector unsigned short) vec_splat_u16(2);
vector unsigned char avgv, t5;
- vector unsigned char perm1 = vec_lvsl(0, pix2);
- vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
vector unsigned char pix1v, pix3v, pix3iv;
vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
vector unsigned short avghv, avglv;
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
+ vector unsigned char perm1, perm2, pix2v, pix2iv;
+ GET_PERM(perm1, perm2, pix2);
- /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
+ /* Due to the fact that pix3 = pix2 + stride, the pix3 of one
* iteration becomes pix2 in the next iteration. We can use this
* fact to avoid a potentially expensive unaligned read, as well
* as some splitting, and vector addition each time around the loop.
* Read unaligned pixels into our vectors. The vectors are as follows:
* pix2v: pix2[0] - pix2[15] pix2iv: pix2[1] - pix2[16]
* Split the pixel vectors into shorts. */
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(16, pix2);
- vector unsigned char pix2v = vec_perm(pix2l, pix2r, perm1);
- vector unsigned char pix2iv = vec_perm(pix2l, pix2r, perm2);
-
+ LOAD_PIX(pix2v, pix2iv, pix2, perm1, perm2);
vector unsigned short pix2hv =
- (vector unsigned short) vec_mergeh(zero, pix2v);
+ (vector unsigned short) VEC_MERGEH(zero, pix2v);
vector unsigned short pix2lv =
- (vector unsigned short) vec_mergel(zero, pix2v);
+ (vector unsigned short) VEC_MERGEL(zero, pix2v);
vector unsigned short pix2ihv =
- (vector unsigned short) vec_mergeh(zero, pix2iv);
+ (vector unsigned short) VEC_MERGEH(zero, pix2iv);
vector unsigned short pix2ilv =
- (vector unsigned short) vec_mergel(zero, pix2iv);
+ (vector unsigned short) VEC_MERGEL(zero, pix2iv);
+
vector unsigned short t1 = vec_add(pix2hv, pix2ihv);
vector unsigned short t2 = vec_add(pix2lv, pix2ilv);
vector unsigned short t3, t4;
* pix1v: pix1[0] - pix1[15]
* pix3v: pix3[0] - pix3[15] pix3iv: pix3[1] - pix3[16] */
pix1v = vec_ld(0, pix1);
-
- pix2l = vec_ld(0, pix3);
- pix2r = vec_ld(16, pix3);
- pix3v = vec_perm(pix2l, pix2r, perm1);
- pix3iv = vec_perm(pix2l, pix2r, perm2);
+ LOAD_PIX(pix3v, pix3iv, pix3, perm1, perm2);
/* Note that AltiVec does have vec_avg, but this works on vector pairs
* and rounds up. We could do avg(avg(a, b), avg(c, d)), but the
* vectors of shorts and do the averaging by hand. */
/* Split the pixel vectors into shorts. */
- pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
- pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
- pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
- pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
+ pix3hv = (vector unsigned short) VEC_MERGEH(zero, pix3v);
+ pix3lv = (vector unsigned short) VEC_MERGEL(zero, pix3v);
+ pix3ihv = (vector unsigned short) VEC_MERGEH(zero, pix3iv);
+ pix3ilv = (vector unsigned short) VEC_MERGEL(zero, pix3iv);
/* Do the averaging on them. */
t3 = vec_add(pix3hv, pix3ihv);
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
- pix1 += line_size;
- pix3 += line_size;
+ pix1 += stride;
+ pix3 += stride;
/* Transfer the calculated values for pix3 into pix2. */
t1 = t3;
t2 = t4;
}
static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
- int i, s;
+ int i;
+ int __attribute__((aligned(16))) s;
const vector unsigned int zero =
(const vector unsigned int) vec_splat_u32(0);
- vector unsigned char perm = vec_lvsl(0, pix2);
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
for (i = 0; i < h; i++) {
/* Read potentially unaligned pixels into t1 and t2. */
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(15, pix2);
- vector unsigned char t1 = vec_ld(0, pix1);
- vector unsigned char t2 = vec_perm(pix2l, pix2r, perm);
+ vector unsigned char t1 =vec_ld(0, pix1);
+ vector unsigned char t2 = VEC_LD(0, pix2);
/* Calculate a sum of abs differences vector. */
vector unsigned char t3 = vec_max(t1, t2);
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
}
static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
- int i, s;
+ int i;
+ int __attribute__((aligned(16))) s;
const vector unsigned int zero =
(const vector unsigned int) vec_splat_u32(0);
const vector unsigned char permclear =
(vector unsigned char)
{ 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0 };
- vector unsigned char perm1 = vec_lvsl(0, pix1);
- vector unsigned char perm2 = vec_lvsl(0, pix2);
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
/* Read potentially unaligned pixels into t1 and t2.
* Since we're reading 16 pixels, and actually only want 8,
* mask out the last 8 pixels. The 0s don't change the sum. */
- vector unsigned char pix1l = vec_ld(0, pix1);
- vector unsigned char pix1r = vec_ld(7, pix1);
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(7, pix2);
- vector unsigned char t1 = vec_and(vec_perm(pix1l, pix1r, perm1),
- permclear);
- vector unsigned char t2 = vec_and(vec_perm(pix2l, pix2r, perm2),
- permclear);
+ vector unsigned char pix1l = VEC_LD(0, pix1);
+ vector unsigned char pix2l = VEC_LD(0, pix2);
+ vector unsigned char t1 = vec_and(pix1l, permclear);
+ vector unsigned char t2 = vec_and(pix2l, permclear);
/* Calculate a sum of abs differences vector. */
vector unsigned char t3 = vec_max(t1, t2);
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
/* Sum of Squared Errors for an 8x8 block, AltiVec-enhanced.
* It's the sad8_altivec code above w/ squaring added. */
static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
- int i, s;
+ int i;
+ int __attribute__((aligned(16))) s;
const vector unsigned int zero =
(const vector unsigned int) vec_splat_u32(0);
const vector unsigned char permclear =
(vector unsigned char)
{ 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0 };
- vector unsigned char perm1 = vec_lvsl(0, pix1);
- vector unsigned char perm2 = vec_lvsl(0, pix2);
vector unsigned int sum = (vector unsigned int) vec_splat_u32(0);
vector signed int sumsqr;
/* Read potentially unaligned pixels into t1 and t2.
* Since we're reading 16 pixels, and actually only want 8,
* mask out the last 8 pixels. The 0s don't change the sum. */
- vector unsigned char pix1l = vec_ld(0, pix1);
- vector unsigned char pix1r = vec_ld(7, pix1);
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(7, pix2);
- vector unsigned char t1 = vec_and(vec_perm(pix1l, pix1r, perm1),
- permclear);
- vector unsigned char t2 = vec_and(vec_perm(pix2l, pix2r, perm2),
- permclear);
+ vector unsigned char t1 = vec_and(VEC_LD(0, pix1), permclear);
+ vector unsigned char t2 = vec_and(VEC_LD(0, pix2), permclear);
/* Since we want to use unsigned chars, we can take advantage
* of the fact that abs(a - b) ^ 2 = (a - b) ^ 2. */
/* Square the values and add them to our sum. */
sum = vec_msum(t5, t5, sum);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
/* Sum of Squared Errors for a 16x16 block, AltiVec-enhanced.
* It's the sad16_altivec code above w/ squaring added. */
static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
- int i, s;
+ int i;
+ int __attribute__((aligned(16))) s;
const vector unsigned int zero =
(const vector unsigned int) vec_splat_u32(0);
- vector unsigned char perm = vec_lvsl(0, pix2);
vector unsigned int sum = (vector unsigned int) vec_splat_u32(0);
vector signed int sumsqr;
for (i = 0; i < h; i++) {
/* Read potentially unaligned pixels into t1 and t2. */
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(15, pix2);
vector unsigned char t1 = vec_ld(0, pix1);
- vector unsigned char t2 = vec_perm(pix2l, pix2r, perm);
+ vector unsigned char t2 = VEC_LD(0, pix2);
/* Since we want to use unsigned chars, we can take advantage
* of the fact that abs(a - b) ^ 2 = (a - b) ^ 2. */
/* Square the values and add them to our sum. */
sum = vec_msum(t5, t5, sum);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
sumsqr = vec_splat(sumsqr, 3);
- vec_ste(sumsqr, 0, &s);
+ vec_ste(sumsqr, 0, &s);
return s;
}
static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
- uint8_t *src, int stride, int h)
+ uint8_t *src, ptrdiff_t stride, int h)
{
- int sum;
+ int __attribute__((aligned(16))) sum;
register const vector unsigned char vzero =
(const vector unsigned char) vec_splat_u8(0);
register vector signed short temp0, temp1, temp2, temp3, temp4,
{ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 };
+
#define ONEITERBUTTERFLY(i, res) \
{ \
- register vector unsigned char src1 = vec_ld(stride * i, src); \
- register vector unsigned char src2 = vec_ld(stride * i + 15, src); \
- register vector unsigned char srcO = \
- vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
- register vector unsigned char dst1 = vec_ld(stride * i, dst); \
- register vector unsigned char dst2 = vec_ld(stride * i + 15, dst); \
- register vector unsigned char dstO = \
- vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
+ register vector unsigned char srcO = unaligned_load(stride * i, src); \
+ register vector unsigned char dstO = unaligned_load(stride * i, dst);\
\
/* Promote the unsigned chars to signed shorts. */ \
/* We're in the 8x8 function, we only care for the first 8. */ \
register vector signed short srcV = \
- (vector signed short) vec_mergeh((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEH((vector signed char) vzero, \
(vector signed char) srcO); \
register vector signed short dstV = \
- (vector signed short) vec_mergeh((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEH((vector signed char) vzero, \
(vector signed char) dstO); \
\
/* subtractions inside the first butterfly */ \
register vector signed short op3 = vec_perm(but2, but2, perm3); \
res = vec_mladd(but2, vprod3, op3); \
}
+
ONEITERBUTTERFLY(0, temp0);
ONEITERBUTTERFLY(1, temp1);
ONEITERBUTTERFLY(2, temp2);
vsum = vec_sum4s(vec_abs(line7C), vsum);
vsum = vec_sums(vsum, (vector signed int) vzero);
vsum = vec_splat(vsum, 3);
+
vec_ste(vsum, 0, &sum);
}
return sum;
* but xlc goes to around 660 on the regular C code...
*/
static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
- uint8_t *src, int stride, int h)
+ uint8_t *src, ptrdiff_t stride, int h)
{
- int sum;
+ int __attribute__((aligned(16))) sum;
register vector signed short
temp0 __asm__ ("v0"),
temp1 __asm__ ("v1"),
#define ONEITERBUTTERFLY(i, res1, res2) \
{ \
- register vector unsigned char src1 __asm__ ("v22") = \
- vec_ld(stride * i, src); \
- register vector unsigned char src2 __asm__ ("v23") = \
- vec_ld(stride * i + 16, src); \
register vector unsigned char srcO __asm__ ("v22") = \
- vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
- register vector unsigned char dst1 __asm__ ("v24") = \
- vec_ld(stride * i, dst); \
- register vector unsigned char dst2 __asm__ ("v25") = \
- vec_ld(stride * i + 16, dst); \
+ unaligned_load(stride * i, src); \
register vector unsigned char dstO __asm__ ("v23") = \
- vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
+ unaligned_load(stride * i, dst);\
\
/* Promote the unsigned chars to signed shorts. */ \
register vector signed short srcV __asm__ ("v24") = \
- (vector signed short) vec_mergeh((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEH((vector signed char) vzero, \
(vector signed char) srcO); \
register vector signed short dstV __asm__ ("v25") = \
- (vector signed short) vec_mergeh((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEH((vector signed char) vzero, \
(vector signed char) dstO); \
register vector signed short srcW __asm__ ("v26") = \
- (vector signed short) vec_mergel((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEL((vector signed char) vzero, \
(vector signed char) srcO); \
register vector signed short dstW __asm__ ("v27") = \
- (vector signed short) vec_mergel((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEL((vector signed char) vzero, \
(vector signed char) dstO); \
\
/* subtractions inside the first butterfly */ \
res1 = vec_mladd(but2, vprod3, op3); \
res2 = vec_mladd(but2S, vprod3, op3S); \
}
+
ONEITERBUTTERFLY(0, temp0, temp0S);
ONEITERBUTTERFLY(1, temp1, temp1S);
ONEITERBUTTERFLY(2, temp2, temp2S);
vsum = vec_sum4s(vec_abs(line7CS), vsum);
vsum = vec_sums(vsum, (vector signed int) vzero);
vsum = vec_splat(vsum, 3);
+
vec_ste(vsum, 0, &sum);
}
return sum;
}
static int hadamard8_diff16_altivec(MpegEncContext *s, uint8_t *dst,
- uint8_t *src, int stride, int h)
+ uint8_t *src, ptrdiff_t stride, int h)
{
int score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
--- /dev/null
- static inline int w_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size,
+/*
+ * Copyright (C) 2004-2010 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (C) 2008 David Conrad
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/avassert.h"
+#include "libavutil/common.h"
+#include "me_cmp.h"
+#include "snow_dwt.h"
+
+int ff_slice_buffer_init(slice_buffer *buf, int line_count,
+ int max_allocated_lines, int line_width,
+ IDWTELEM *base_buffer)
+{
+ int i;
+
+ buf->base_buffer = base_buffer;
+ buf->line_count = line_count;
+ buf->line_width = line_width;
+ buf->data_count = max_allocated_lines;
+ buf->line = av_mallocz_array(line_count, sizeof(IDWTELEM *));
+ if (!buf->line)
+ return AVERROR(ENOMEM);
+ buf->data_stack = av_malloc_array(max_allocated_lines, sizeof(IDWTELEM *));
+ if (!buf->data_stack) {
+ av_freep(&buf->line);
+ return AVERROR(ENOMEM);
+ }
+
+ for (i = 0; i < max_allocated_lines; i++) {
+ buf->data_stack[i] = av_malloc_array(line_width, sizeof(IDWTELEM));
+ if (!buf->data_stack[i]) {
+ for (i--; i >=0; i--)
+ av_freep(&buf->data_stack[i]);
+ av_freep(&buf->data_stack);
+ av_freep(&buf->line);
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ buf->data_stack_top = max_allocated_lines - 1;
+ return 0;
+}
+
+IDWTELEM *ff_slice_buffer_load_line(slice_buffer *buf, int line)
+{
+ IDWTELEM *buffer;
+
+ av_assert0(buf->data_stack_top >= 0);
+// av_assert1(!buf->line[line]);
+ if (buf->line[line])
+ return buf->line[line];
+
+ buffer = buf->data_stack[buf->data_stack_top];
+ buf->data_stack_top--;
+ buf->line[line] = buffer;
+
+ return buffer;
+}
+
+void ff_slice_buffer_release(slice_buffer *buf, int line)
+{
+ IDWTELEM *buffer;
+
+ av_assert1(line >= 0 && line < buf->line_count);
+ av_assert1(buf->line[line]);
+
+ buffer = buf->line[line];
+ buf->data_stack_top++;
+ buf->data_stack[buf->data_stack_top] = buffer;
+ buf->line[line] = NULL;
+}
+
+void ff_slice_buffer_flush(slice_buffer *buf)
+{
+ int i;
+ for (i = 0; i < buf->line_count; i++)
+ if (buf->line[i])
+ ff_slice_buffer_release(buf, i);
+}
+
+void ff_slice_buffer_destroy(slice_buffer *buf)
+{
+ int i;
+ ff_slice_buffer_flush(buf);
+
+ for (i = buf->data_count - 1; i >= 0; i--)
+ av_freep(&buf->data_stack[i]);
+ av_freep(&buf->data_stack);
+ av_freep(&buf->line);
+}
+
+static inline int mirror(int v, int m)
+{
+ while ((unsigned)v > (unsigned)m) {
+ v = -v;
+ if (v < 0)
+ v += 2 * m;
+ }
+ return v;
+}
+
+static av_always_inline void lift(DWTELEM *dst, DWTELEM *src, DWTELEM *ref,
+ int dst_step, int src_step, int ref_step,
+ int width, int mul, int add, int shift,
+ int highpass, int inverse)
+{
+ const int mirror_left = !highpass;
+ const int mirror_right = (width & 1) ^ highpass;
+ const int w = (width >> 1) - 1 + (highpass & width);
+ int i;
+
+#define LIFT(src, ref, inv) ((src) + ((inv) ? -(ref) : +(ref)))
+ if (mirror_left) {
+ dst[0] = LIFT(src[0], ((mul * 2 * ref[0] + add) >> shift), inverse);
+ dst += dst_step;
+ src += src_step;
+ }
+
+ for (i = 0; i < w; i++)
+ dst[i * dst_step] = LIFT(src[i * src_step],
+ ((mul * (ref[i * ref_step] +
+ ref[(i + 1) * ref_step]) +
+ add) >> shift),
+ inverse);
+
+ if (mirror_right)
+ dst[w * dst_step] = LIFT(src[w * src_step],
+ ((mul * 2 * ref[w * ref_step] + add) >> shift),
+ inverse);
+}
+
+static av_always_inline void liftS(DWTELEM *dst, DWTELEM *src, DWTELEM *ref,
+ int dst_step, int src_step, int ref_step,
+ int width, int mul, int add, int shift,
+ int highpass, int inverse)
+{
+ const int mirror_left = !highpass;
+ const int mirror_right = (width & 1) ^ highpass;
+ const int w = (width >> 1) - 1 + (highpass & width);
+ int i;
+
+ av_assert1(shift == 4);
+#define LIFTS(src, ref, inv) \
+ ((inv) ? (src) + (((ref) + 4 * (src)) >> shift) \
+ : -((-16 * (src) + (ref) + add / \
+ 4 + 1 + (5 << 25)) / (5 * 4) - (1 << 23)))
+ if (mirror_left) {
+ dst[0] = LIFTS(src[0], mul * 2 * ref[0] + add, inverse);
+ dst += dst_step;
+ src += src_step;
+ }
+
+ for (i = 0; i < w; i++)
+ dst[i * dst_step] = LIFTS(src[i * src_step],
+ mul * (ref[i * ref_step] +
+ ref[(i + 1) * ref_step]) + add,
+ inverse);
+
+ if (mirror_right)
+ dst[w * dst_step] = LIFTS(src[w * src_step],
+ mul * 2 * ref[w * ref_step] + add,
+ inverse);
+}
+
+static void horizontal_decompose53i(DWTELEM *b, DWTELEM *temp, int width)
+{
+ const int width2 = width >> 1;
+ int x;
+ const int w2 = (width + 1) >> 1;
+
+ for (x = 0; x < width2; x++) {
+ temp[x] = b[2 * x];
+ temp[x + w2] = b[2 * x + 1];
+ }
+ if (width & 1)
+ temp[x] = b[2 * x];
+ lift(b + w2, temp + w2, temp, 1, 1, 1, width, -1, 0, 1, 1, 0);
+ lift(b, temp, b + w2, 1, 1, 1, width, 1, 2, 2, 0, 0);
+}
+
+static void vertical_decompose53iH0(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2,
+ int width)
+{
+ int i;
+
+ for (i = 0; i < width; i++)
+ b1[i] -= (b0[i] + b2[i]) >> 1;
+}
+
+static void vertical_decompose53iL0(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2,
+ int width)
+{
+ int i;
+
+ for (i = 0; i < width; i++)
+ b1[i] += (b0[i] + b2[i] + 2) >> 2;
+}
+
+static void spatial_decompose53i(DWTELEM *buffer, DWTELEM *temp,
+ int width, int height, int stride)
+{
+ int y;
+ DWTELEM *b0 = buffer + mirror(-2 - 1, height - 1) * stride;
+ DWTELEM *b1 = buffer + mirror(-2, height - 1) * stride;
+
+ for (y = -2; y < height; y += 2) {
+ DWTELEM *b2 = buffer + mirror(y + 1, height - 1) * stride;
+ DWTELEM *b3 = buffer + mirror(y + 2, height - 1) * stride;
+
+ if (y + 1 < (unsigned)height)
+ horizontal_decompose53i(b2, temp, width);
+ if (y + 2 < (unsigned)height)
+ horizontal_decompose53i(b3, temp, width);
+
+ if (y + 1 < (unsigned)height)
+ vertical_decompose53iH0(b1, b2, b3, width);
+ if (y + 0 < (unsigned)height)
+ vertical_decompose53iL0(b0, b1, b2, width);
+
+ b0 = b2;
+ b1 = b3;
+ }
+}
+
+static void horizontal_decompose97i(DWTELEM *b, DWTELEM *temp, int width)
+{
+ const int w2 = (width + 1) >> 1;
+
+ lift(temp + w2, b + 1, b, 1, 2, 2, width, W_AM, W_AO, W_AS, 1, 1);
+ liftS(temp, b, temp + w2, 1, 2, 1, width, W_BM, W_BO, W_BS, 0, 0);
+ lift(b + w2, temp + w2, temp, 1, 1, 1, width, W_CM, W_CO, W_CS, 1, 0);
+ lift(b, temp, b + w2, 1, 1, 1, width, W_DM, W_DO, W_DS, 0, 0);
+}
+
+static void vertical_decompose97iH0(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2,
+ int width)
+{
+ int i;
+
+ for (i = 0; i < width; i++)
+ b1[i] -= (W_AM * (b0[i] + b2[i]) + W_AO) >> W_AS;
+}
+
+static void vertical_decompose97iH1(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2,
+ int width)
+{
+ int i;
+
+ for (i = 0; i < width; i++)
+ b1[i] += (W_CM * (b0[i] + b2[i]) + W_CO) >> W_CS;
+}
+
+static void vertical_decompose97iL0(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2,
+ int width)
+{
+ int i;
+
+ for (i = 0; i < width; i++)
+ b1[i] = (16 * 4 * b1[i] - 4 * (b0[i] + b2[i]) + W_BO * 5 + (5 << 27)) /
+ (5 * 16) - (1 << 23);
+}
+
+static void vertical_decompose97iL1(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2,
+ int width)
+{
+ int i;
+
+ for (i = 0; i < width; i++)
+ b1[i] += (W_DM * (b0[i] + b2[i]) + W_DO) >> W_DS;
+}
+
+static void spatial_decompose97i(DWTELEM *buffer, DWTELEM *temp,
+ int width, int height, int stride)
+{
+ int y;
+ DWTELEM *b0 = buffer + mirror(-4 - 1, height - 1) * stride;
+ DWTELEM *b1 = buffer + mirror(-4, height - 1) * stride;
+ DWTELEM *b2 = buffer + mirror(-4 + 1, height - 1) * stride;
+ DWTELEM *b3 = buffer + mirror(-4 + 2, height - 1) * stride;
+
+ for (y = -4; y < height; y += 2) {
+ DWTELEM *b4 = buffer + mirror(y + 3, height - 1) * stride;
+ DWTELEM *b5 = buffer + mirror(y + 4, height - 1) * stride;
+
+ if (y + 3 < (unsigned)height)
+ horizontal_decompose97i(b4, temp, width);
+ if (y + 4 < (unsigned)height)
+ horizontal_decompose97i(b5, temp, width);
+
+ if (y + 3 < (unsigned)height)
+ vertical_decompose97iH0(b3, b4, b5, width);
+ if (y + 2 < (unsigned)height)
+ vertical_decompose97iL0(b2, b3, b4, width);
+ if (y + 1 < (unsigned)height)
+ vertical_decompose97iH1(b1, b2, b3, width);
+ if (y + 0 < (unsigned)height)
+ vertical_decompose97iL1(b0, b1, b2, width);
+
+ b0 = b2;
+ b1 = b3;
+ b2 = b4;
+ b3 = b5;
+ }
+}
+
+void ff_spatial_dwt(DWTELEM *buffer, DWTELEM *temp, int width, int height,
+ int stride, int type, int decomposition_count)
+{
+ int level;
+
+ for (level = 0; level < decomposition_count; level++) {
+ switch (type) {
+ case DWT_97:
+ spatial_decompose97i(buffer, temp,
+ width >> level, height >> level,
+ stride << level);
+ break;
+ case DWT_53:
+ spatial_decompose53i(buffer, temp,
+ width >> level, height >> level,
+ stride << level);
+ break;
+ }
+ }
+}
+
+static void horizontal_compose53i(IDWTELEM *b, IDWTELEM *temp, int width)
+{
+ const int width2 = width >> 1;
+ const int w2 = (width + 1) >> 1;
+ int x;
+
+ for (x = 0; x < width2; x++) {
+ temp[2 * x] = b[x];
+ temp[2 * x + 1] = b[x + w2];
+ }
+ if (width & 1)
+ temp[2 * x] = b[x];
+
+ b[0] = temp[0] - ((temp[1] + 1) >> 1);
+ for (x = 2; x < width - 1; x += 2) {
+ b[x] = temp[x] - ((temp[x - 1] + temp[x + 1] + 2) >> 2);
+ b[x - 1] = temp[x - 1] + ((b[x - 2] + b[x] + 1) >> 1);
+ }
+ if (width & 1) {
+ b[x] = temp[x] - ((temp[x - 1] + 1) >> 1);
+ b[x - 1] = temp[x - 1] + ((b[x - 2] + b[x] + 1) >> 1);
+ } else
+ b[x - 1] = temp[x - 1] + b[x - 2];
+}
+
+static void vertical_compose53iH0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
+ int width)
+{
+ int i;
+
+ for (i = 0; i < width; i++)
+ b1[i] += (b0[i] + b2[i]) >> 1;
+}
+
+static void vertical_compose53iL0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
+ int width)
+{
+ int i;
+
+ for (i = 0; i < width; i++)
+ b1[i] -= (b0[i] + b2[i] + 2) >> 2;
+}
+
+static void spatial_compose53i_buffered_init(DWTCompose *cs, slice_buffer *sb,
+ int height, int stride_line)
+{
+ cs->b0 = slice_buffer_get_line(sb,
+ mirror(-1 - 1, height - 1) * stride_line);
+ cs->b1 = slice_buffer_get_line(sb, mirror(-1, height - 1) * stride_line);
+ cs->y = -1;
+}
+
+static void spatial_compose53i_init(DWTCompose *cs, IDWTELEM *buffer,
+ int height, int stride)
+{
+ cs->b0 = buffer + mirror(-1 - 1, height - 1) * stride;
+ cs->b1 = buffer + mirror(-1, height - 1) * stride;
+ cs->y = -1;
+}
+
+static void spatial_compose53i_dy_buffered(DWTCompose *cs, slice_buffer *sb,
+ IDWTELEM *temp,
+ int width, int height,
+ int stride_line)
+{
+ int y = cs->y;
+
+ IDWTELEM *b0 = cs->b0;
+ IDWTELEM *b1 = cs->b1;
+ IDWTELEM *b2 = slice_buffer_get_line(sb,
+ mirror(y + 1, height - 1) *
+ stride_line);
+ IDWTELEM *b3 = slice_buffer_get_line(sb,
+ mirror(y + 2, height - 1) *
+ stride_line);
+
+ if (y + 1 < (unsigned)height && y < (unsigned)height) {
+ int x;
+
+ for (x = 0; x < width; x++) {
+ b2[x] -= (b1[x] + b3[x] + 2) >> 2;
+ b1[x] += (b0[x] + b2[x]) >> 1;
+ }
+ } else {
+ if (y + 1 < (unsigned)height)
+ vertical_compose53iL0(b1, b2, b3, width);
+ if (y + 0 < (unsigned)height)
+ vertical_compose53iH0(b0, b1, b2, width);
+ }
+
+ if (y - 1 < (unsigned)height)
+ horizontal_compose53i(b0, temp, width);
+ if (y + 0 < (unsigned)height)
+ horizontal_compose53i(b1, temp, width);
+
+ cs->b0 = b2;
+ cs->b1 = b3;
+ cs->y += 2;
+}
+
+static void spatial_compose53i_dy(DWTCompose *cs, IDWTELEM *buffer,
+ IDWTELEM *temp, int width, int height,
+ int stride)
+{
+ int y = cs->y;
+ IDWTELEM *b0 = cs->b0;
+ IDWTELEM *b1 = cs->b1;
+ IDWTELEM *b2 = buffer + mirror(y + 1, height - 1) * stride;
+ IDWTELEM *b3 = buffer + mirror(y + 2, height - 1) * stride;
+
+ if (y + 1 < (unsigned)height)
+ vertical_compose53iL0(b1, b2, b3, width);
+ if (y + 0 < (unsigned)height)
+ vertical_compose53iH0(b0, b1, b2, width);
+
+ if (y - 1 < (unsigned)height)
+ horizontal_compose53i(b0, temp, width);
+ if (y + 0 < (unsigned)height)
+ horizontal_compose53i(b1, temp, width);
+
+ cs->b0 = b2;
+ cs->b1 = b3;
+ cs->y += 2;
+}
+
+void ff_snow_horizontal_compose97i(IDWTELEM *b, IDWTELEM *temp, int width)
+{
+ const int w2 = (width + 1) >> 1;
+ int x;
+
+ temp[0] = b[0] - ((3 * b[w2] + 2) >> 2);
+ for (x = 1; x < (width >> 1); x++) {
+ temp[2 * x] = b[x] - ((3 * (b[x + w2 - 1] + b[x + w2]) + 4) >> 3);
+ temp[2 * x - 1] = b[x + w2 - 1] - temp[2 * x - 2] - temp[2 * x];
+ }
+ if (width & 1) {
+ temp[2 * x] = b[x] - ((3 * b[x + w2 - 1] + 2) >> 2);
+ temp[2 * x - 1] = b[x + w2 - 1] - temp[2 * x - 2] - temp[2 * x];
+ } else
+ temp[2 * x - 1] = b[x + w2 - 1] - 2 * temp[2 * x - 2];
+
+ b[0] = temp[0] + ((2 * temp[0] + temp[1] + 4) >> 3);
+ for (x = 2; x < width - 1; x += 2) {
+ b[x] = temp[x] + ((4 * temp[x] + temp[x - 1] + temp[x + 1] + 8) >> 4);
+ b[x - 1] = temp[x - 1] + ((3 * (b[x - 2] + b[x])) >> 1);
+ }
+ if (width & 1) {
+ b[x] = temp[x] + ((2 * temp[x] + temp[x - 1] + 4) >> 3);
+ b[x - 1] = temp[x - 1] + ((3 * (b[x - 2] + b[x])) >> 1);
+ } else
+ b[x - 1] = temp[x - 1] + 3 * b[x - 2];
+}
+
+static void vertical_compose97iH0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
+ int width)
+{
+ int i;
+
+ for (i = 0; i < width; i++)
+ b1[i] += (W_AM * (b0[i] + b2[i]) + W_AO) >> W_AS;
+}
+
+static void vertical_compose97iH1(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
+ int width)
+{
+ int i;
+
+ for (i = 0; i < width; i++)
+ b1[i] -= (W_CM * (b0[i] + b2[i]) + W_CO) >> W_CS;
+}
+
+static void vertical_compose97iL0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
+ int width)
+{
+ int i;
+
+ for (i = 0; i < width; i++)
+ b1[i] += (W_BM * (b0[i] + b2[i]) + 4 * b1[i] + W_BO) >> W_BS;
+}
+
+static void vertical_compose97iL1(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
+ int width)
+{
+ int i;
+
+ for (i = 0; i < width; i++)
+ b1[i] -= (W_DM * (b0[i] + b2[i]) + W_DO) >> W_DS;
+}
+
+void ff_snow_vertical_compose97i(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
+ IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5,
+ int width)
+{
+ int i;
+
+ for (i = 0; i < width; i++) {
+ b4[i] -= (W_DM * (b3[i] + b5[i]) + W_DO) >> W_DS;
+ b3[i] -= (W_CM * (b2[i] + b4[i]) + W_CO) >> W_CS;
+ b2[i] += (W_BM * (b1[i] + b3[i]) + 4 * b2[i] + W_BO) >> W_BS;
+ b1[i] += (W_AM * (b0[i] + b2[i]) + W_AO) >> W_AS;
+ }
+}
+
+static void spatial_compose97i_buffered_init(DWTCompose *cs, slice_buffer *sb,
+ int height, int stride_line)
+{
+ cs->b0 = slice_buffer_get_line(sb, mirror(-3 - 1, height - 1) * stride_line);
+ cs->b1 = slice_buffer_get_line(sb, mirror(-3, height - 1) * stride_line);
+ cs->b2 = slice_buffer_get_line(sb, mirror(-3 + 1, height - 1) * stride_line);
+ cs->b3 = slice_buffer_get_line(sb, mirror(-3 + 2, height - 1) * stride_line);
+ cs->y = -3;
+}
+
+static void spatial_compose97i_init(DWTCompose *cs, IDWTELEM *buffer, int height,
+ int stride)
+{
+ cs->b0 = buffer + mirror(-3 - 1, height - 1) * stride;
+ cs->b1 = buffer + mirror(-3, height - 1) * stride;
+ cs->b2 = buffer + mirror(-3 + 1, height - 1) * stride;
+ cs->b3 = buffer + mirror(-3 + 2, height - 1) * stride;
+ cs->y = -3;
+}
+
+static void spatial_compose97i_dy_buffered(SnowDWTContext *dsp, DWTCompose *cs,
+ slice_buffer * sb, IDWTELEM *temp,
+ int width, int height,
+ int stride_line)
+{
+ int y = cs->y;
+
+ IDWTELEM *b0 = cs->b0;
+ IDWTELEM *b1 = cs->b1;
+ IDWTELEM *b2 = cs->b2;
+ IDWTELEM *b3 = cs->b3;
+ IDWTELEM *b4 = slice_buffer_get_line(sb,
+ mirror(y + 3, height - 1) *
+ stride_line);
+ IDWTELEM *b5 = slice_buffer_get_line(sb,
+ mirror(y + 4, height - 1) *
+ stride_line);
+
+ if (y > 0 && y + 4 < height) {
+ dsp->vertical_compose97i(b0, b1, b2, b3, b4, b5, width);
+ } else {
+ if (y + 3 < (unsigned)height)
+ vertical_compose97iL1(b3, b4, b5, width);
+ if (y + 2 < (unsigned)height)
+ vertical_compose97iH1(b2, b3, b4, width);
+ if (y + 1 < (unsigned)height)
+ vertical_compose97iL0(b1, b2, b3, width);
+ if (y + 0 < (unsigned)height)
+ vertical_compose97iH0(b0, b1, b2, width);
+ }
+
+ if (y - 1 < (unsigned)height)
+ dsp->horizontal_compose97i(b0, temp, width);
+ if (y + 0 < (unsigned)height)
+ dsp->horizontal_compose97i(b1, temp, width);
+
+ cs->b0 = b2;
+ cs->b1 = b3;
+ cs->b2 = b4;
+ cs->b3 = b5;
+ cs->y += 2;
+}
+
+static void spatial_compose97i_dy(DWTCompose *cs, IDWTELEM *buffer,
+ IDWTELEM *temp, int width, int height,
+ int stride)
+{
+ int y = cs->y;
+ IDWTELEM *b0 = cs->b0;
+ IDWTELEM *b1 = cs->b1;
+ IDWTELEM *b2 = cs->b2;
+ IDWTELEM *b3 = cs->b3;
+ IDWTELEM *b4 = buffer + mirror(y + 3, height - 1) * stride;
+ IDWTELEM *b5 = buffer + mirror(y + 4, height - 1) * stride;
+
+ if (y + 3 < (unsigned)height)
+ vertical_compose97iL1(b3, b4, b5, width);
+ if (y + 2 < (unsigned)height)
+ vertical_compose97iH1(b2, b3, b4, width);
+ if (y + 1 < (unsigned)height)
+ vertical_compose97iL0(b1, b2, b3, width);
+ if (y + 0 < (unsigned)height)
+ vertical_compose97iH0(b0, b1, b2, width);
+
+ if (y - 1 < (unsigned)height)
+ ff_snow_horizontal_compose97i(b0, temp, width);
+ if (y + 0 < (unsigned)height)
+ ff_snow_horizontal_compose97i(b1, temp, width);
+
+ cs->b0 = b2;
+ cs->b1 = b3;
+ cs->b2 = b4;
+ cs->b3 = b5;
+ cs->y += 2;
+}
+
+void ff_spatial_idwt_buffered_init(DWTCompose *cs, slice_buffer *sb, int width,
+ int height, int stride_line, int type,
+ int decomposition_count)
+{
+ int level;
+ for (level = decomposition_count - 1; level >= 0; level--) {
+ switch (type) {
+ case DWT_97:
+ spatial_compose97i_buffered_init(cs + level, sb, height >> level,
+ stride_line << level);
+ break;
+ case DWT_53:
+ spatial_compose53i_buffered_init(cs + level, sb, height >> level,
+ stride_line << level);
+ break;
+ }
+ }
+}
+
+void ff_spatial_idwt_buffered_slice(SnowDWTContext *dsp, DWTCompose *cs,
+ slice_buffer *slice_buf, IDWTELEM *temp,
+ int width, int height, int stride_line,
+ int type, int decomposition_count, int y)
+{
+ const int support = type == 1 ? 3 : 5;
+ int level;
+ if (type == 2)
+ return;
+
+ for (level = decomposition_count - 1; level >= 0; level--)
+ while (cs[level].y <= FFMIN((y >> level) + support, height >> level)) {
+ switch (type) {
+ case DWT_97:
+ spatial_compose97i_dy_buffered(dsp, cs + level, slice_buf, temp,
+ width >> level,
+ height >> level,
+ stride_line << level);
+ break;
+ case DWT_53:
+ spatial_compose53i_dy_buffered(cs + level, slice_buf, temp,
+ width >> level,
+ height >> level,
+ stride_line << level);
+ break;
+ }
+ }
+}
+
+static void ff_spatial_idwt_init(DWTCompose *cs, IDWTELEM *buffer, int width,
+ int height, int stride, int type,
+ int decomposition_count)
+{
+ int level;
+ for (level = decomposition_count - 1; level >= 0; level--) {
+ switch (type) {
+ case DWT_97:
+ spatial_compose97i_init(cs + level, buffer, height >> level,
+ stride << level);
+ break;
+ case DWT_53:
+ spatial_compose53i_init(cs + level, buffer, height >> level,
+ stride << level);
+ break;
+ }
+ }
+}
+
+static void ff_spatial_idwt_slice(DWTCompose *cs, IDWTELEM *buffer,
+ IDWTELEM *temp, int width, int height,
+ int stride, int type,
+ int decomposition_count, int y)
+{
+ const int support = type == 1 ? 3 : 5;
+ int level;
+ if (type == 2)
+ return;
+
+ for (level = decomposition_count - 1; level >= 0; level--)
+ while (cs[level].y <= FFMIN((y >> level) + support, height >> level)) {
+ switch (type) {
+ case DWT_97:
+ spatial_compose97i_dy(cs + level, buffer, temp, width >> level,
+ height >> level, stride << level);
+ break;
+ case DWT_53:
+ spatial_compose53i_dy(cs + level, buffer, temp, width >> level,
+ height >> level, stride << level);
+ break;
+ }
+ }
+}
+
+void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height,
+ int stride, int type, int decomposition_count)
+{
+ DWTCompose cs[MAX_DECOMPOSITIONS];
+ int y;
+ ff_spatial_idwt_init(cs, buffer, width, height, stride, type,
+ decomposition_count);
+ for (y = 0; y < height; y += 4)
+ ff_spatial_idwt_slice(cs, buffer, temp, width, height, stride, type,
+ decomposition_count, y);
+}
+
- static int w53_8_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
++static inline int w_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size,
+ int w, int h, int type)
+{
+ int s, i, j;
+ const int dec_count = w == 8 ? 3 : 4;
+ int tmp[32 * 32], tmp2[32];
+ int level, ori;
+ static const int scale[2][2][4][4] = {
+ {
+ { // 9/7 8x8 dec=3
+ { 268, 239, 239, 213 },
+ { 0, 224, 224, 152 },
+ { 0, 135, 135, 110 },
+ },
+ { // 9/7 16x16 or 32x32 dec=4
+ { 344, 310, 310, 280 },
+ { 0, 320, 320, 228 },
+ { 0, 175, 175, 136 },
+ { 0, 129, 129, 102 },
+ }
+ },
+ {
+ { // 5/3 8x8 dec=3
+ { 275, 245, 245, 218 },
+ { 0, 230, 230, 156 },
+ { 0, 138, 138, 113 },
+ },
+ { // 5/3 16x16 or 32x32 dec=4
+ { 352, 317, 317, 286 },
+ { 0, 328, 328, 233 },
+ { 0, 180, 180, 140 },
+ { 0, 132, 132, 105 },
+ }
+ }
+ };
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j += 4) {
+ tmp[32 * i + j + 0] = (pix1[j + 0] - pix2[j + 0]) << 4;
+ tmp[32 * i + j + 1] = (pix1[j + 1] - pix2[j + 1]) << 4;
+ tmp[32 * i + j + 2] = (pix1[j + 2] - pix2[j + 2]) << 4;
+ tmp[32 * i + j + 3] = (pix1[j + 3] - pix2[j + 3]) << 4;
+ }
+ pix1 += line_size;
+ pix2 += line_size;
+ }
+
+ ff_spatial_dwt(tmp, tmp2, w, h, 32, type, dec_count);
+
+ s = 0;
+ av_assert1(w == h);
+ for (level = 0; level < dec_count; level++)
+ for (ori = level ? 1 : 0; ori < 4; ori++) {
+ int size = w >> (dec_count - level);
+ int sx = (ori & 1) ? size : 0;
+ int stride = 32 << (dec_count - level);
+ int sy = (ori & 2) ? stride >> 1 : 0;
+
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++) {
+ int v = tmp[sx + sy + i * stride + j] *
+ scale[type][dec_count - 3][level][ori];
+ s += FFABS(v);
+ }
+ }
+ av_assert1(s >= 0);
+ return s >> 9;
+}
+
- static int w97_8_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
++static int w53_8_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
+{
+ return w_c(v, pix1, pix2, line_size, 8, h, 1);
+}
+
- static int w53_16_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
++static int w97_8_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
+{
+ return w_c(v, pix1, pix2, line_size, 8, h, 0);
+}
+
- static int w97_16_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
++static int w53_16_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
+{
+ return w_c(v, pix1, pix2, line_size, 16, h, 1);
+}
+
- int ff_w53_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
++static int w97_16_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
+{
+ return w_c(v, pix1, pix2, line_size, 16, h, 0);
+}
+
- int ff_w97_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
++int ff_w53_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
+{
+ return w_c(v, pix1, pix2, line_size, 32, h, 1);
+}
+
++int ff_w97_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
+{
+ return w_c(v, pix1, pix2, line_size, 32, h, 0);
+}
+
+void ff_dsputil_init_dwt(MECmpContext *c)
+{
+ c->w53[0] = w53_16_c;
+ c->w53[1] = w53_8_c;
+ c->w97[0] = w97_16_c;
+ c->w97[1] = w97_8_c;
+}
+
+void ff_dwt_init(SnowDWTContext *c)
+{
+ c->vertical_compose97i = ff_snow_vertical_compose97i;
+ c->horizontal_compose97i = ff_snow_horizontal_compose97i;
+ c->inner_add_yblock = ff_snow_inner_add_yblock;
+
+ if (HAVE_MMX)
+ ff_dwt_init_x86(c);
+}
+
+
--- /dev/null
- int ff_w53_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
- int ff_w97_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
+/*
+ * Copyright (C) 2004-2010 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_SNOW_DWT_H
+#define AVCODEC_SNOW_DWT_H
+
+#include <stdint.h>
+
+typedef int DWTELEM;
+typedef short IDWTELEM;
+
+#define MAX_DECOMPOSITIONS 8
+
+typedef struct DWTCompose {
+ IDWTELEM *b0;
+ IDWTELEM *b1;
+ IDWTELEM *b2;
+ IDWTELEM *b3;
+ int y;
+} DWTCompose;
+
+/** Used to minimize the amount of memory used in order to
+ * optimize cache performance. **/
+typedef struct slice_buffer_s {
+ IDWTELEM **line; ///< For use by idwt and predict_slices.
+ IDWTELEM **data_stack; ///< Used for internal purposes.
+ int data_stack_top;
+ int line_count;
+ int line_width;
+ int data_count;
+ IDWTELEM *base_buffer; ///< Buffer that this structure is caching.
+} slice_buffer;
+
+struct SnowDWTContext;
+
+typedef struct SnowDWTContext {
+ void (*vertical_compose97i)(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
+ IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5,
+ int width);
+ void (*horizontal_compose97i)(IDWTELEM *b, IDWTELEM *temp, int width);
+ void (*inner_add_yblock)(const uint8_t *obmc, const int obmc_stride,
+ uint8_t **block, int b_w, int b_h, int src_x,
+ int src_y, int src_stride, slice_buffer *sb,
+ int add, uint8_t *dst8);
+} SnowDWTContext;
+
+
+#define DWT_97 0
+#define DWT_53 1
+
+#define liftS lift
+#define W_AM 3
+#define W_AO 0
+#define W_AS 1
+
+#undef liftS
+#define W_BM 1
+#define W_BO 8
+#define W_BS 4
+
+#define W_CM 1
+#define W_CO 0
+#define W_CS 0
+
+#define W_DM 3
+#define W_DO 4
+#define W_DS 3
+
+#define slice_buffer_get_line(slice_buf, line_num) \
+ ((slice_buf)->line[line_num] ? (slice_buf)->line[line_num] \
+ : ff_slice_buffer_load_line((slice_buf), \
+ (line_num)))
+
+int ff_slice_buffer_init(slice_buffer *buf, int line_count,
+ int max_allocated_lines, int line_width,
+ IDWTELEM *base_buffer);
+void ff_slice_buffer_release(slice_buffer *buf, int line);
+void ff_slice_buffer_flush(slice_buffer *buf);
+void ff_slice_buffer_destroy(slice_buffer *buf);
+IDWTELEM *ff_slice_buffer_load_line(slice_buffer *buf, int line);
+
+void ff_snow_vertical_compose97i(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
+ IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5,
+ int width);
+void ff_snow_horizontal_compose97i(IDWTELEM *b, IDWTELEM *temp, int width);
+void ff_snow_inner_add_yblock(const uint8_t *obmc, const int obmc_stride,
+ uint8_t **block, int b_w, int b_h, int src_x,
+ int src_y, int src_stride, slice_buffer *sb,
+ int add, uint8_t *dst8);
+
++int ff_w53_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h);
++int ff_w97_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h);
+
+void ff_spatial_dwt(int *buffer, int *temp, int width, int height, int stride,
+ int type, int decomposition_count);
+
+void ff_spatial_idwt_buffered_init(DWTCompose *cs, slice_buffer *sb, int width,
+ int height, int stride_line, int type,
+ int decomposition_count);
+void ff_spatial_idwt_buffered_slice(SnowDWTContext *dsp, DWTCompose *cs,
+ slice_buffer *slice_buf, IDWTELEM *temp,
+ int width, int height, int stride_line,
+ int type, int decomposition_count, int y);
+void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height,
+ int stride, int type, int decomposition_count);
+
+void ff_dwt_init(SnowDWTContext *c);
+void ff_dwt_init_x86(SnowDWTContext *c);
+
+#endif /* AVCODEC_DWT_H */
;* Copyright (c) 2000, 2001 Fabrice Bellard
;* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or
+;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
-;* License along with Libav; if not, write to the Free Software
+;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;*****************************************************************************
%include "libavutil/x86/x86util.asm"
+SECTION_RODATA
+
+cextern pb_1
+cextern pb_80
+
SECTION .text
%macro DIFF_PIXELS_1 4
%elif cpuflag(mmx)
ALIGN 16
; int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1,
--; uint8_t *src2, int stride, int h)
++; uint8_t *src2, ptrdiff_t stride, int h)
; r0 = void *s = unused, int h = unused (always 8)
; note how r1, r2 and r3 are not clobbered in this function, so 16x16
; can simply call this 2x2x (and that's why we access rsp+gprsize
%define ABS_SUM_8x8 ABS_SUM_8x8_64
HADAMARD8_DIFF 9
-INIT_XMM sse2
-; int ff_sse16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
-; int line_size, int h);
-cglobal sse16, 5, 5, 8
- shr r4d, 1
+; int ff_sse*_*(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- ; int line_size, int h)
++; ptrdiff_t line_size, int h)
+
+%macro SUM_SQUARED_ERRORS 1
+cglobal sse%1, 5,5,8, v, pix1, pix2, lsize, h
+%if %1 == mmsize
+ shr hd, 1
+%endif
pxor m0, m0 ; mm0 = 0
pxor m7, m7 ; mm7 holds the sum
.next2lines: ; FIXME why are these unaligned movs? pix1[] is aligned
- movu m1, [r1 ] ; mm1 = pix1[0][0-15]
- movu m2, [r2 ] ; mm2 = pix2[0][0-15]
- movu m3, [r1+r3] ; mm3 = pix1[1][0-15]
- movu m4, [r2+r3] ; mm4 = pix2[1][0-15]
+ movu m1, [pix1q] ; m1 = pix1[0][0-15], [0-7] for mmx
+ movu m2, [pix2q] ; m2 = pix2[0][0-15], [0-7] for mmx
+%if %1 == mmsize
+ movu m3, [pix1q+lsizeq] ; m3 = pix1[1][0-15], [0-7] for mmx
+ movu m4, [pix2q+lsizeq] ; m4 = pix2[1][0-15], [0-7] for mmx
+%else ; %1 / 2 == mmsize; mmx only
+ mova m3, [pix1q+8] ; m3 = pix1[0][8-15]
+ mova m4, [pix2q+8] ; m4 = pix2[0][8-15]
+%endif
; todo: mm1-mm2, mm3-mm4
; algo: subtract mm1 from mm2 with saturation and vice versa
pmaddwd m1, m1
pmaddwd m3, m3
- lea r1, [r1+r3*2] ; pix1 += 2*line_size
- lea r2, [r2+r3*2] ; pix2 += 2*line_size
-
paddd m1, m2
paddd m3, m4
paddd m7, m1
paddd m7, m3
- dec r4
+%if %1 == mmsize
+ lea pix1q, [pix1q + 2*lsizeq]
+ lea pix2q, [pix2q + 2*lsizeq]
+%else
+ add pix1q, lsizeq
+ add pix2q, lsizeq
+%endif
+ dec hd
jnz .next2lines
- mova m1, m7
- psrldq m7, 8 ; shift hi qword to lo
- paddd m7, m1
- mova m1, m7
- psrldq m7, 4 ; shift hi dword to lo
- paddd m7, m1
+ HADDD m7, m1
movd eax, m7 ; return value
RET
- ; int ff_hf_noise*_mmx(uint8_t *pix1, int lsize, int h)
+%endmacro
+
+INIT_MMX mmx
+SUM_SQUARED_ERRORS 8
+
+INIT_MMX mmx
+SUM_SQUARED_ERRORS 16
+
+INIT_XMM sse2
+SUM_SQUARED_ERRORS 16
+
+;-----------------------------------------------
+;int ff_sum_abs_dctelem(int16_t *block)
+;-----------------------------------------------
+; %1 = number of xmm registers used
+; %2 = number of inline loops
+
+%macro SUM_ABS_DCTELEM 2
+cglobal sum_abs_dctelem, 1, 1, %1, block
+ pxor m0, m0
+ pxor m1, m1
+%assign %%i 0
+%rep %2
+ mova m2, [blockq+mmsize*(0+%%i)]
+ mova m3, [blockq+mmsize*(1+%%i)]
+ mova m4, [blockq+mmsize*(2+%%i)]
+ mova m5, [blockq+mmsize*(3+%%i)]
+ ABS1_SUM m2, m6, m0
+ ABS1_SUM m3, m6, m1
+ ABS1_SUM m4, m6, m0
+ ABS1_SUM m5, m6, m1
+%assign %%i %%i+4
+%endrep
+ paddusw m0, m1
+ HSUM m0, m1, eax
+ and eax, 0xFFFF
+ RET
+%endmacro
+
+INIT_MMX mmx
+SUM_ABS_DCTELEM 0, 4
+INIT_MMX mmxext
+SUM_ABS_DCTELEM 0, 4
+INIT_XMM sse2
+SUM_ABS_DCTELEM 7, 2
+INIT_XMM ssse3
+SUM_ABS_DCTELEM 6, 2
+
+;------------------------------------------------------------------------------
- movsxdifnidn lsizeq, lsized
++; int ff_hf_noise*_mmx(uint8_t *pix1, ptrdiff_t lsize, int h)
+;------------------------------------------------------------------------------
+; %1 = 8/16. %2-5=m#
+%macro HF_NOISE_PART1 5
+ mova m%2, [pix1q]
+%if %1 == 8
+ mova m%3, m%2
+ psllq m%2, 8
+ psrlq m%3, 8
+ psrlq m%2, 8
+%else
+ mova m%3, [pix1q+1]
+%endif
+ mova m%4, m%2
+ mova m%5, m%3
+ punpcklbw m%2, m7
+ punpcklbw m%3, m7
+ punpckhbw m%4, m7
+ punpckhbw m%5, m7
+ psubw m%2, m%3
+ psubw m%4, m%5
+%endmacro
+
+; %1-2 = m#
+%macro HF_NOISE_PART2 4
+ psubw m%1, m%3
+ psubw m%2, m%4
+ pxor m3, m3
+ pxor m1, m1
+ pcmpgtw m3, m%1
+ pcmpgtw m1, m%2
+ pxor m%1, m3
+ pxor m%2, m1
+ psubw m%1, m3
+ psubw m%2, m1
+ paddw m%2, m%1
+ paddw m6, m%2
+%endmacro
+
+; %1 = 8/16
+%macro HF_NOISE 1
+cglobal hf_noise%1, 3,3,0, pix1, lsize, h
- ;int ff_sad_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int stride, int h);
+ sub hd, 2
+ pxor m7, m7
+ pxor m6, m6
+ HF_NOISE_PART1 %1, 0, 1, 2, 3
+ add pix1q, lsizeq
+ HF_NOISE_PART1 %1, 4, 1, 5, 3
+ HF_NOISE_PART2 0, 2, 4, 5
+ add pix1q, lsizeq
+.loop:
+ HF_NOISE_PART1 %1, 0, 1, 2, 3
+ HF_NOISE_PART2 4, 5, 0, 2
+ add pix1q, lsizeq
+ HF_NOISE_PART1 %1, 4, 1, 5, 3
+ HF_NOISE_PART2 0, 2, 4, 5
+ add pix1q, lsizeq
+ sub hd, 2
+ jne .loop
+
+ mova m0, m6
+ punpcklwd m0, m7
+ punpckhwd m6, m7
+ paddd m6, m0
+ mova m0, m6
+ psrlq m6, 32
+ paddd m0, m6
+ movd eax, m0 ; eax = result of hf_noise8;
+ REP_RET ; return eax;
+%endmacro
+
+INIT_MMX mmx
+HF_NOISE 8
+HF_NOISE 16
+
+;---------------------------------------------------------------------------------------
- ;int ff_sad_x2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int stride, int h);
++;int ff_sad_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
+;---------------------------------------------------------------------------------------
+;%1 = 8/16
+%macro SAD 1
+cglobal sad%1, 5, 5, 3, v, pix1, pix2, stride, h
+ movu m2, [pix2q]
+ movu m1, [pix2q+strideq]
+ psadbw m2, [pix1q]
+ psadbw m1, [pix1q+strideq]
+ paddw m2, m1
+%if %1 != mmsize
+ movu m0, [pix2q+8]
+ movu m1, [pix2q+strideq+8]
+ psadbw m0, [pix1q+8]
+ psadbw m1, [pix1q+strideq+8]
+ paddw m2, m0
+ paddw m2, m1
+%endif
+ sub hd, 2
+
+align 16
+.loop:
+ lea pix1q, [pix1q+strideq*2]
+ lea pix2q, [pix2q+strideq*2]
+ movu m0, [pix2q]
+ movu m1, [pix2q+strideq]
+ psadbw m0, [pix1q]
+ psadbw m1, [pix1q+strideq]
+ paddw m2, m0
+ paddw m2, m1
+%if %1 != mmsize
+ movu m0, [pix2q+8]
+ movu m1, [pix2q+strideq+8]
+ psadbw m0, [pix1q+8]
+ psadbw m1, [pix1q+strideq+8]
+ paddw m2, m0
+ paddw m2, m1
+%endif
+ sub hd, 2
+ jg .loop
+%if mmsize == 16
+ movhlps m0, m2
+ paddw m2, m0
+%endif
+ movd eax, m2
+ RET
+%endmacro
+
+INIT_MMX mmxext
+SAD 8
+SAD 16
+INIT_XMM sse2
+SAD 16
+
+;------------------------------------------------------------------------------------------
- ;int ff_sad_y2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int stride, int h);
++;int ff_sad_x2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
+;------------------------------------------------------------------------------------------
+;%1 = 8/16
+%macro SAD_X2 1
+cglobal sad%1_x2, 5, 5, 5, v, pix1, pix2, stride, h
+ movu m0, [pix2q]
+ movu m2, [pix2q+strideq]
+%if mmsize == 16
+ movu m3, [pix2q+1]
+ movu m4, [pix2q+strideq+1]
+ pavgb m0, m3
+ pavgb m2, m4
+%else
+ pavgb m0, [pix2q+1]
+ pavgb m2, [pix2q+strideq+1]
+%endif
+ psadbw m0, [pix1q]
+ psadbw m2, [pix1q+strideq]
+ paddw m0, m2
+%if %1 != mmsize
+ movu m1, [pix2q+8]
+ movu m2, [pix2q+strideq+8]
+ pavgb m1, [pix2q+9]
+ pavgb m2, [pix2q+strideq+9]
+ psadbw m1, [pix1q+8]
+ psadbw m2, [pix1q+strideq+8]
+ paddw m0, m1
+ paddw m0, m2
+%endif
+ sub hd, 2
+
+align 16
+.loop:
+ lea pix1q, [pix1q+2*strideq]
+ lea pix2q, [pix2q+2*strideq]
+ movu m1, [pix2q]
+ movu m2, [pix2q+strideq]
+%if mmsize == 16
+ movu m3, [pix2q+1]
+ movu m4, [pix2q+strideq+1]
+ pavgb m1, m3
+ pavgb m2, m4
+%else
+ pavgb m1, [pix2q+1]
+ pavgb m2, [pix2q+strideq+1]
+%endif
+ psadbw m1, [pix1q]
+ psadbw m2, [pix1q+strideq]
+ paddw m0, m1
+ paddw m0, m2
+%if %1 != mmsize
+ movu m1, [pix2q+8]
+ movu m2, [pix2q+strideq+8]
+ pavgb m1, [pix2q+9]
+ pavgb m2, [pix2q+strideq+9]
+ psadbw m1, [pix1q+8]
+ psadbw m2, [pix1q+strideq+8]
+ paddw m0, m1
+ paddw m0, m2
+%endif
+ sub hd, 2
+ jg .loop
+%if mmsize == 16
+ movhlps m1, m0
+ paddw m0, m1
+%endif
+ movd eax, m0
+ RET
+%endmacro
+
+INIT_MMX mmxext
+SAD_X2 8
+SAD_X2 16
+INIT_XMM sse2
+SAD_X2 16
+
+;------------------------------------------------------------------------------------------
- ;int ff_sad_approx_xy2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int stride, int h);
++;int ff_sad_y2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
+;------------------------------------------------------------------------------------------
+;%1 = 8/16
+%macro SAD_Y2 1
+cglobal sad%1_y2, 5, 5, 4, v, pix1, pix2, stride, h
+ movu m1, [pix2q]
+ movu m0, [pix2q+strideq]
+ movu m3, [pix2q+2*strideq]
+ pavgb m1, m0
+ pavgb m0, m3
+ psadbw m1, [pix1q]
+ psadbw m0, [pix1q+strideq]
+ paddw m0, m1
+ mova m1, m3
+%if %1 != mmsize
+ movu m4, [pix2q+8]
+ movu m5, [pix2q+strideq+8]
+ movu m6, [pix2q+2*strideq+8]
+ pavgb m4, m5
+ pavgb m5, m6
+ psadbw m4, [pix1q+8]
+ psadbw m5, [pix1q+strideq+8]
+ paddw m0, m4
+ paddw m0, m5
+ mova m4, m6
+%endif
+ add pix2q, strideq
+ sub hd, 2
+
+align 16
+.loop:
+ lea pix1q, [pix1q+2*strideq]
+ lea pix2q, [pix2q+2*strideq]
+ movu m2, [pix2q]
+ movu m3, [pix2q+strideq]
+ pavgb m1, m2
+ pavgb m2, m3
+ psadbw m1, [pix1q]
+ psadbw m2, [pix1q+strideq]
+ paddw m0, m1
+ paddw m0, m2
+ mova m1, m3
+%if %1 != mmsize
+ movu m5, [pix2q+8]
+ movu m6, [pix2q+strideq+8]
+ pavgb m4, m5
+ pavgb m5, m6
+ psadbw m4, [pix1q+8]
+ psadbw m5, [pix1q+strideq+8]
+ paddw m0, m4
+ paddw m0, m5
+ mova m4, m6
+%endif
+ sub hd, 2
+ jg .loop
+%if mmsize == 16
+ movhlps m1, m0
+ paddw m0, m1
+%endif
+ movd eax, m0
+ RET
+%endmacro
+
+INIT_MMX mmxext
+SAD_Y2 8
+SAD_Y2 16
+INIT_XMM sse2
+SAD_Y2 16
+
+;-------------------------------------------------------------------------------------------
- ; int line_size, int h);
++;int ff_sad_approx_xy2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
+;-------------------------------------------------------------------------------------------
+;%1 = 8/16
+%macro SAD_APPROX_XY2 1
+cglobal sad%1_approx_xy2, 5, 5, 7, v, pix1, pix2, stride, h
+ mova m4, [pb_1]
+ movu m1, [pix2q]
+ movu m0, [pix2q+strideq]
+ movu m3, [pix2q+2*strideq]
+%if mmsize == 16
+ movu m5, [pix2q+1]
+ movu m6, [pix2q+strideq+1]
+ movu m2, [pix2q+2*strideq+1]
+ pavgb m1, m5
+ pavgb m0, m6
+ pavgb m3, m2
+%else
+ pavgb m1, [pix2q+1]
+ pavgb m0, [pix2q+strideq+1]
+ pavgb m3, [pix2q+2*strideq+1]
+%endif
+ psubusb m0, m4
+ pavgb m1, m0
+ pavgb m0, m3
+ psadbw m1, [pix1q]
+ psadbw m0, [pix1q+strideq]
+ paddw m0, m1
+ mova m1, m3
+%if %1 != mmsize
+ movu m5, [pix2q+8]
+ movu m6, [pix2q+strideq+8]
+ movu m7, [pix2q+2*strideq+8]
+ pavgb m5, [pix2q+1+8]
+ pavgb m6, [pix2q+strideq+1+8]
+ pavgb m7, [pix2q+2*strideq+1+8]
+ psubusb m6, m4
+ pavgb m5, m6
+ pavgb m6, m7
+ psadbw m5, [pix1q+8]
+ psadbw m6, [pix1q+strideq+8]
+ paddw m0, m5
+ paddw m0, m6
+ mova m5, m7
+%endif
+ add pix2q, strideq
+ sub hd, 2
+
+align 16
+.loop:
+ lea pix1q, [pix1q+2*strideq]
+ lea pix2q, [pix2q+2*strideq]
+ movu m2, [pix2q]
+ movu m3, [pix2q+strideq]
+%if mmsize == 16
+ movu m5, [pix2q+1]
+ movu m6, [pix2q+strideq+1]
+ pavgb m2, m5
+ pavgb m3, m6
+%else
+ pavgb m2, [pix2q+1]
+ pavgb m3, [pix2q+strideq+1]
+%endif
+ psubusb m2, m4
+ pavgb m1, m2
+ pavgb m2, m3
+ psadbw m1, [pix1q]
+ psadbw m2, [pix1q+strideq]
+ paddw m0, m1
+ paddw m0, m2
+ mova m1, m3
+%if %1 != mmsize
+ movu m6, [pix2q+8]
+ movu m7, [pix2q+strideq+8]
+ pavgb m6, [pix2q+8+1]
+ pavgb m7, [pix2q+strideq+8+1]
+ psubusb m6, m4
+ pavgb m5, m6
+ pavgb m6, m7
+ psadbw m5, [pix1q+8]
+ psadbw m6, [pix1q+strideq+8]
+ paddw m0, m5
+ paddw m0, m6
+ mova m5, m7
+%endif
+ sub hd, 2
+ jg .loop
+%if mmsize == 16
+ movhlps m1, m0
+ paddw m0, m1
+%endif
+ movd eax, m0
+ RET
+%endmacro
+
+INIT_MMX mmxext
+SAD_APPROX_XY2 8
+SAD_APPROX_XY2 16
+INIT_XMM sse2
+SAD_APPROX_XY2 16
+
+;--------------------------------------------------------------------
+;int ff_vsad_intra(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- ; int line_size, int h);
++; ptrdiff_t line_size, int h);
+;--------------------------------------------------------------------
+; %1 = 8/16
+%macro VSAD_INTRA 1
+cglobal vsad_intra%1, 5, 5, 3, v, pix1, pix2, lsize, h
+ mova m0, [pix1q]
+%if %1 == mmsize
+ mova m2, [pix1q+lsizeq]
+ psadbw m0, m2
+%else
+ mova m2, [pix1q+lsizeq]
+ mova m3, [pix1q+8]
+ mova m4, [pix1q+lsizeq+8]
+ psadbw m0, m2
+ psadbw m3, m4
+ paddw m0, m3
+%endif
+ sub hd, 2
+
+.loop
+ lea pix1q, [pix1q + 2*lsizeq]
+%if %1 == mmsize
+ mova m1, [pix1q]
+ psadbw m2, m1
+ paddw m0, m2
+ mova m2, [pix1q+lsizeq]
+ psadbw m1, m2
+ paddw m0, m1
+%else
+ mova m1, [pix1q]
+ mova m3, [pix1q+8]
+ psadbw m2, m1
+ psadbw m4, m3
+ paddw m0, m2
+ paddw m0, m4
+ mova m2, [pix1q+lsizeq]
+ mova m4, [pix1q+lsizeq+8]
+ psadbw m1, m2
+ psadbw m3, m4
+ paddw m0, m1
+ paddw m0, m3
+%endif
+ sub hd, 2
+ jg .loop
+
+%if mmsize == 16
+ pshufd m1, m0, 0xe
+ paddd m0, m1
+%endif
+ movd eax, m0
+ RET
+%endmacro
+
+INIT_MMX mmxext
+VSAD_INTRA 8
+VSAD_INTRA 16
+INIT_XMM sse2
+VSAD_INTRA 16
+
+;---------------------------------------------------------------------
+;int ff_vsad_approx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
++; ptrdiff_t line_size, int h);
+;---------------------------------------------------------------------
+; %1 = 8/16
+%macro VSAD_APPROX 1
+cglobal vsad%1_approx, 5, 5, 5, v, pix1, pix2, lsize, h
+ mova m1, [pb_80]
+ mova m0, [pix1q]
+%if %1 == mmsize ; vsad8_mmxext, vsad16_sse2
+ mova m4, [pix1q+lsizeq]
+%if mmsize == 16
+ movu m3, [pix2q]
+ movu m2, [pix2q+lsizeq]
+ psubb m0, m3
+ psubb m4, m2
+%else
+ psubb m0, [pix2q]
+ psubb m4, [pix2q+lsizeq]
+%endif
+ pxor m0, m1
+ pxor m4, m1
+ psadbw m0, m4
+%else ; vsad16_mmxext
+ mova m3, [pix1q+8]
+ psubb m0, [pix2q]
+ psubb m3, [pix2q+8]
+ pxor m0, m1
+ pxor m3, m1
+ mova m4, [pix1q+lsizeq]
+ mova m5, [pix1q+lsizeq+8]
+ psubb m4, [pix2q+lsizeq]
+ psubb m5, [pix2q+lsizeq+8]
+ pxor m4, m1
+ pxor m5, m1
+ psadbw m0, m4
+ psadbw m3, m5
+ paddw m0, m3
+%endif
+ sub hd, 2
+
+.loop
+ lea pix1q, [pix1q + 2*lsizeq]
+ lea pix2q, [pix2q + 2*lsizeq]
+ mova m2, [pix1q]
+%if %1 == mmsize ; vsad8_mmxext, vsad16_sse2
+%if mmsize == 16
+ movu m3, [pix2q]
+ psubb m2, m3
+%else
+ psubb m2, [pix2q]
+%endif
+ pxor m2, m1
+ psadbw m4, m2
+ paddw m0, m4
+ mova m4, [pix1q+lsizeq]
+ movu m3, [pix2q+lsizeq]
+ psubb m4, m3
+ pxor m4, m1
+ psadbw m2, m4
+ paddw m0, m2
+%else ; vsad16_mmxext
+ mova m3, [pix1q+8]
+ psubb m2, [pix2q]
+ psubb m3, [pix2q+8]
+ pxor m2, m1
+ pxor m3, m1
+ psadbw m4, m2
+ psadbw m5, m3
+ paddw m0, m4
+ paddw m0, m5
+ mova m4, [pix1q+lsizeq]
+ mova m5, [pix1q+lsizeq+8]
+ psubb m4, [pix2q+lsizeq]
+ psubb m5, [pix2q+lsizeq+8]
+ pxor m4, m1
+ pxor m5, m1
+ psadbw m2, m4
+ psadbw m3, m5
+ paddw m0, m2
+ paddw m0, m3
+%endif
+ sub hd, 2
+ jg .loop
+
+%if mmsize == 16
+ pshufd m1, m0, 0xe
+ paddd m0, m1
+%endif
+ movd eax, m0
+ RET
+%endmacro
+
+INIT_MMX mmxext
+VSAD_APPROX 8
+VSAD_APPROX 16
+INIT_XMM sse2
+VSAD_APPROX 16
*
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/me_cmp.h"
#include "libavcodec/mpegvideo.h"
-#if HAVE_INLINE_ASM
-
-static int sse8_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- ptrdiff_t stride, int h)
-{
- int tmp;
-
- __asm__ volatile (
- "movl %4, %%ecx \n"
- "shr $1, %%ecx \n"
- "pxor %%mm0, %%mm0 \n" /* mm0 = 0 */
- "pxor %%mm7, %%mm7 \n" /* mm7 holds the sum */
- "1: \n"
- "movq (%0), %%mm1 \n" /* mm1 = pix1[0][0 - 7] */
- "movq (%1), %%mm2 \n" /* mm2 = pix2[0][0 - 7] */
- "movq (%0, %3), %%mm3 \n" /* mm3 = pix1[1][0 - 7] */
- "movq (%1, %3), %%mm4 \n" /* mm4 = pix2[1][0 - 7] */
-
- /* todo: mm1-mm2, mm3-mm4 */
- /* algo: subtract mm1 from mm2 with saturation and vice versa */
- /* OR the results to get absolute difference */
- "movq %%mm1, %%mm5 \n"
- "movq %%mm3, %%mm6 \n"
- "psubusb %%mm2, %%mm1 \n"
- "psubusb %%mm4, %%mm3 \n"
- "psubusb %%mm5, %%mm2 \n"
- "psubusb %%mm6, %%mm4 \n"
-
- "por %%mm1, %%mm2 \n"
- "por %%mm3, %%mm4 \n"
-
- /* now convert to 16-bit vectors so we can square them */
- "movq %%mm2, %%mm1 \n"
- "movq %%mm4, %%mm3 \n"
-
- "punpckhbw %%mm0, %%mm2 \n"
- "punpckhbw %%mm0, %%mm4 \n"
- "punpcklbw %%mm0, %%mm1 \n" /* mm1 now spread over (mm1, mm2) */
- "punpcklbw %%mm0, %%mm3 \n" /* mm4 now spread over (mm3, mm4) */
-
- "pmaddwd %%mm2, %%mm2 \n"
- "pmaddwd %%mm4, %%mm4 \n"
- "pmaddwd %%mm1, %%mm1 \n"
- "pmaddwd %%mm3, %%mm3 \n"
-
- "lea (%0, %3, 2), %0 \n" /* pix1 += 2 * stride */
- "lea (%1, %3, 2), %1 \n" /* pix2 += 2 * stride */
-
- "paddd %%mm2, %%mm1 \n"
- "paddd %%mm4, %%mm3 \n"
- "paddd %%mm1, %%mm7 \n"
- "paddd %%mm3, %%mm7 \n"
-
- "decl %%ecx \n"
- "jnz 1b \n"
-
- "movq %%mm7, %%mm1 \n"
- "psrlq $32, %%mm7 \n" /* shift hi dword to lo */
- "paddd %%mm7, %%mm1 \n"
- "movd %%mm1, %2 \n"
- : "+r" (pix1), "+r" (pix2), "=r" (tmp)
- : "r" (stride), "m" (h)
- : "%ecx");
-
- return tmp;
-}
-
-static int sse16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- ptrdiff_t stride, int h)
-{
- int tmp;
-
- __asm__ volatile (
- "movl %4, %%ecx\n"
- "pxor %%mm0, %%mm0\n" /* mm0 = 0 */
- "pxor %%mm7, %%mm7\n" /* mm7 holds the sum */
- "1:\n"
- "movq (%0), %%mm1\n" /* mm1 = pix1[0 - 7] */
- "movq (%1), %%mm2\n" /* mm2 = pix2[0 - 7] */
- "movq 8(%0), %%mm3\n" /* mm3 = pix1[8 - 15] */
- "movq 8(%1), %%mm4\n" /* mm4 = pix2[8 - 15] */
-
- /* todo: mm1-mm2, mm3-mm4 */
- /* algo: subtract mm1 from mm2 with saturation and vice versa */
- /* OR the results to get absolute difference */
- "movq %%mm1, %%mm5\n"
- "movq %%mm3, %%mm6\n"
- "psubusb %%mm2, %%mm1\n"
- "psubusb %%mm4, %%mm3\n"
- "psubusb %%mm5, %%mm2\n"
- "psubusb %%mm6, %%mm4\n"
-
- "por %%mm1, %%mm2\n"
- "por %%mm3, %%mm4\n"
-
- /* now convert to 16-bit vectors so we can square them */
- "movq %%mm2, %%mm1\n"
- "movq %%mm4, %%mm3\n"
-
- "punpckhbw %%mm0, %%mm2\n"
- "punpckhbw %%mm0, %%mm4\n"
- "punpcklbw %%mm0, %%mm1\n" /* mm1 now spread over (mm1, mm2) */
- "punpcklbw %%mm0, %%mm3\n" /* mm4 now spread over (mm3, mm4) */
-
- "pmaddwd %%mm2, %%mm2\n"
- "pmaddwd %%mm4, %%mm4\n"
- "pmaddwd %%mm1, %%mm1\n"
- "pmaddwd %%mm3, %%mm3\n"
-
- "add %3, %0\n"
- "add %3, %1\n"
-
- "paddd %%mm2, %%mm1\n"
- "paddd %%mm4, %%mm3\n"
- "paddd %%mm1, %%mm7\n"
- "paddd %%mm3, %%mm7\n"
-
- "decl %%ecx\n"
- "jnz 1b\n"
-
- "movq %%mm7, %%mm1\n"
- "psrlq $32, %%mm7\n" /* shift hi dword to lo */
- "paddd %%mm7, %%mm1\n"
- "movd %%mm1, %2\n"
- : "+r" (pix1), "+r" (pix2), "=r" (tmp)
- : "r" (stride), "m" (h)
- : "%ecx");
-
- return tmp;
-}
-
-static int hf_noise8_mmx(uint8_t *pix1, ptrdiff_t stride, int h)
-{
- int tmp;
-
- __asm__ volatile (
- "movl %3, %%ecx\n"
- "pxor %%mm7, %%mm7\n"
- "pxor %%mm6, %%mm6\n"
-
- "movq (%0), %%mm0\n"
- "movq %%mm0, %%mm1\n"
- "psllq $8, %%mm0\n"
- "psrlq $8, %%mm1\n"
- "psrlq $8, %%mm0\n"
- "movq %%mm0, %%mm2\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7, %%mm0\n"
- "punpcklbw %%mm7, %%mm1\n"
- "punpckhbw %%mm7, %%mm2\n"
- "punpckhbw %%mm7, %%mm3\n"
- "psubw %%mm1, %%mm0\n"
- "psubw %%mm3, %%mm2\n"
-
- "add %2, %0\n"
-
- "movq (%0), %%mm4\n"
- "movq %%mm4, %%mm1\n"
- "psllq $8, %%mm4\n"
- "psrlq $8, %%mm1\n"
- "psrlq $8, %%mm4\n"
- "movq %%mm4, %%mm5\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7, %%mm4\n"
- "punpcklbw %%mm7, %%mm1\n"
- "punpckhbw %%mm7, %%mm5\n"
- "punpckhbw %%mm7, %%mm3\n"
- "psubw %%mm1, %%mm4\n"
- "psubw %%mm3, %%mm5\n"
- "psubw %%mm4, %%mm0\n"
- "psubw %%mm5, %%mm2\n"
- "pxor %%mm3, %%mm3\n"
- "pxor %%mm1, %%mm1\n"
- "pcmpgtw %%mm0, %%mm3\n\t"
- "pcmpgtw %%mm2, %%mm1\n\t"
- "pxor %%mm3, %%mm0\n"
- "pxor %%mm1, %%mm2\n"
- "psubw %%mm3, %%mm0\n"
- "psubw %%mm1, %%mm2\n"
- "paddw %%mm0, %%mm2\n"
- "paddw %%mm2, %%mm6\n"
-
- "add %2, %0\n"
- "1:\n"
-
- "movq (%0), %%mm0\n"
- "movq %%mm0, %%mm1\n"
- "psllq $8, %%mm0\n"
- "psrlq $8, %%mm1\n"
- "psrlq $8, %%mm0\n"
- "movq %%mm0, %%mm2\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7, %%mm0\n"
- "punpcklbw %%mm7, %%mm1\n"
- "punpckhbw %%mm7, %%mm2\n"
- "punpckhbw %%mm7, %%mm3\n"
- "psubw %%mm1, %%mm0\n"
- "psubw %%mm3, %%mm2\n"
- "psubw %%mm0, %%mm4\n"
- "psubw %%mm2, %%mm5\n"
- "pxor %%mm3, %%mm3\n"
- "pxor %%mm1, %%mm1\n"
- "pcmpgtw %%mm4, %%mm3\n\t"
- "pcmpgtw %%mm5, %%mm1\n\t"
- "pxor %%mm3, %%mm4\n"
- "pxor %%mm1, %%mm5\n"
- "psubw %%mm3, %%mm4\n"
- "psubw %%mm1, %%mm5\n"
- "paddw %%mm4, %%mm5\n"
- "paddw %%mm5, %%mm6\n"
-
- "add %2, %0\n"
-
- "movq (%0), %%mm4\n"
- "movq %%mm4, %%mm1\n"
- "psllq $8, %%mm4\n"
- "psrlq $8, %%mm1\n"
- "psrlq $8, %%mm4\n"
- "movq %%mm4, %%mm5\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7, %%mm4\n"
- "punpcklbw %%mm7, %%mm1\n"
- "punpckhbw %%mm7, %%mm5\n"
- "punpckhbw %%mm7, %%mm3\n"
- "psubw %%mm1, %%mm4\n"
- "psubw %%mm3, %%mm5\n"
- "psubw %%mm4, %%mm0\n"
- "psubw %%mm5, %%mm2\n"
- "pxor %%mm3, %%mm3\n"
- "pxor %%mm1, %%mm1\n"
- "pcmpgtw %%mm0, %%mm3\n\t"
- "pcmpgtw %%mm2, %%mm1\n\t"
- "pxor %%mm3, %%mm0\n"
- "pxor %%mm1, %%mm2\n"
- "psubw %%mm3, %%mm0\n"
- "psubw %%mm1, %%mm2\n"
- "paddw %%mm0, %%mm2\n"
- "paddw %%mm2, %%mm6\n"
-
- "add %2, %0\n"
- "subl $2, %%ecx\n"
- " jnz 1b\n"
-
- "movq %%mm6, %%mm0\n"
- "punpcklwd %%mm7, %%mm0\n"
- "punpckhwd %%mm7, %%mm6\n"
- "paddd %%mm0, %%mm6\n"
-
- "movq %%mm6, %%mm0\n"
- "psrlq $32, %%mm6\n"
- "paddd %%mm6, %%mm0\n"
- "movd %%mm0, %1\n"
- : "+r" (pix1), "=r" (tmp)
- : "r" (stride), "g" (h - 2)
- : "%ecx");
-
- return tmp;
-}
-
-static int hf_noise16_mmx(uint8_t *pix1, ptrdiff_t stride, int h)
-{
- int tmp;
- uint8_t *pix = pix1;
-
- __asm__ volatile (
- "movl %3, %%ecx\n"
- "pxor %%mm7, %%mm7\n"
- "pxor %%mm6, %%mm6\n"
-
- "movq (%0), %%mm0\n"
- "movq 1(%0), %%mm1\n"
- "movq %%mm0, %%mm2\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7, %%mm0\n"
- "punpcklbw %%mm7, %%mm1\n"
- "punpckhbw %%mm7, %%mm2\n"
- "punpckhbw %%mm7, %%mm3\n"
- "psubw %%mm1, %%mm0\n"
- "psubw %%mm3, %%mm2\n"
-
- "add %2, %0\n"
-
- "movq (%0), %%mm4\n"
- "movq 1(%0), %%mm1\n"
- "movq %%mm4, %%mm5\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7, %%mm4\n"
- "punpcklbw %%mm7, %%mm1\n"
- "punpckhbw %%mm7, %%mm5\n"
- "punpckhbw %%mm7, %%mm3\n"
- "psubw %%mm1, %%mm4\n"
- "psubw %%mm3, %%mm5\n"
- "psubw %%mm4, %%mm0\n"
- "psubw %%mm5, %%mm2\n"
- "pxor %%mm3, %%mm3\n"
- "pxor %%mm1, %%mm1\n"
- "pcmpgtw %%mm0, %%mm3\n\t"
- "pcmpgtw %%mm2, %%mm1\n\t"
- "pxor %%mm3, %%mm0\n"
- "pxor %%mm1, %%mm2\n"
- "psubw %%mm3, %%mm0\n"
- "psubw %%mm1, %%mm2\n"
- "paddw %%mm0, %%mm2\n"
- "paddw %%mm2, %%mm6\n"
-
- "add %2, %0\n"
- "1:\n"
-
- "movq (%0), %%mm0\n"
- "movq 1(%0), %%mm1\n"
- "movq %%mm0, %%mm2\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7, %%mm0\n"
- "punpcklbw %%mm7, %%mm1\n"
- "punpckhbw %%mm7, %%mm2\n"
- "punpckhbw %%mm7, %%mm3\n"
- "psubw %%mm1, %%mm0\n"
- "psubw %%mm3, %%mm2\n"
- "psubw %%mm0, %%mm4\n"
- "psubw %%mm2, %%mm5\n"
- "pxor %%mm3, %%mm3\n"
- "pxor %%mm1, %%mm1\n"
- "pcmpgtw %%mm4, %%mm3\n\t"
- "pcmpgtw %%mm5, %%mm1\n\t"
- "pxor %%mm3, %%mm4\n"
- "pxor %%mm1, %%mm5\n"
- "psubw %%mm3, %%mm4\n"
- "psubw %%mm1, %%mm5\n"
- "paddw %%mm4, %%mm5\n"
- "paddw %%mm5, %%mm6\n"
-
- "add %2, %0\n"
-
- "movq (%0), %%mm4\n"
- "movq 1(%0), %%mm1\n"
- "movq %%mm4, %%mm5\n"
- "movq %%mm1, %%mm3\n"
- "punpcklbw %%mm7, %%mm4\n"
- "punpcklbw %%mm7, %%mm1\n"
- "punpckhbw %%mm7, %%mm5\n"
- "punpckhbw %%mm7, %%mm3\n"
- "psubw %%mm1, %%mm4\n"
- "psubw %%mm3, %%mm5\n"
- "psubw %%mm4, %%mm0\n"
- "psubw %%mm5, %%mm2\n"
- "pxor %%mm3, %%mm3\n"
- "pxor %%mm1, %%mm1\n"
- "pcmpgtw %%mm0, %%mm3\n\t"
- "pcmpgtw %%mm2, %%mm1\n\t"
- "pxor %%mm3, %%mm0\n"
- "pxor %%mm1, %%mm2\n"
- "psubw %%mm3, %%mm0\n"
- "psubw %%mm1, %%mm2\n"
- "paddw %%mm0, %%mm2\n"
- "paddw %%mm2, %%mm6\n"
-
- "add %2, %0\n"
- "subl $2, %%ecx\n"
- " jnz 1b\n"
-
- "movq %%mm6, %%mm0\n"
- "punpcklwd %%mm7, %%mm0\n"
- "punpckhwd %%mm7, %%mm6\n"
- "paddd %%mm0, %%mm6\n"
+int ff_sum_abs_dctelem_mmx(int16_t *block);
+int ff_sum_abs_dctelem_mmxext(int16_t *block);
+int ff_sum_abs_dctelem_sse2(int16_t *block);
+int ff_sum_abs_dctelem_ssse3(int16_t *block);
+int ff_sse8_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h);
++ ptrdiff_t stride, int h);
+int ff_sse16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h);
++ ptrdiff_t stride, int h);
+int ff_sse16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h);
- int ff_hf_noise8_mmx(uint8_t *pix1, int lsize, int h);
- int ff_hf_noise16_mmx(uint8_t *pix1, int lsize, int h);
++ ptrdiff_t stride, int h);
++int ff_hf_noise8_mmx(uint8_t *pix1, ptrdiff_t stride, int h);
++int ff_hf_noise16_mmx(uint8_t *pix1, ptrdiff_t stride, int h);
+int ff_sad8_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int stride, int h);
++ ptrdiff_t stride, int h);
+int ff_sad16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int stride, int h);
++ ptrdiff_t stride, int h);
+int ff_sad16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int stride, int h);
++ ptrdiff_t stride, int h);
+int ff_sad8_x2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int stride, int h);
++ ptrdiff_t stride, int h);
+int ff_sad16_x2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int stride, int h);
++ ptrdiff_t stride, int h);
+int ff_sad16_x2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int stride, int h);
++ ptrdiff_t stride, int h);
+int ff_sad8_y2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int stride, int h);
++ ptrdiff_t stride, int h);
+int ff_sad16_y2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int stride, int h);
++ ptrdiff_t stride, int h);
+int ff_sad16_y2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int stride, int h);
++ ptrdiff_t stride, int h);
+int ff_sad8_approx_xy2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int stride, int h);
++ ptrdiff_t stride, int h);
+int ff_sad16_approx_xy2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int stride, int h);
++ ptrdiff_t stride, int h);
+int ff_sad16_approx_xy2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int stride, int h);
++ ptrdiff_t stride, int h);
+int ff_vsad_intra8_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h);
++ ptrdiff_t stride, int h);
+int ff_vsad_intra16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h);
++ ptrdiff_t stride, int h);
+int ff_vsad_intra16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h);
++ ptrdiff_t stride, int h);
+int ff_vsad8_approx_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h);
++ ptrdiff_t stride, int h);
+int ff_vsad16_approx_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h);
++ ptrdiff_t stride, int h);
+int ff_vsad16_approx_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h);
++ ptrdiff_t stride, int h);
- #define hadamard_func(cpu) \
- int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1, \
- uint8_t *src2, int stride, int h); \
- int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1, \
- uint8_t *src2, int stride, int h);
- "movq %%mm6, %%mm0\n"
- "psrlq $32, %%mm6\n"
- "paddd %%mm6, %%mm0\n"
- "movd %%mm0, %1\n"
- : "+r" (pix1), "=r" (tmp)
- : "r" (stride), "g" (h - 2)
- : "%ecx");
++#define hadamard_func(cpu) \
++ int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1, \
++ uint8_t *src2, ptrdiff_t stride, int h); \
++ int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1, \
++ uint8_t *src2, ptrdiff_t stride, int h);
- return tmp + hf_noise8_mmx(pix + 8, stride, h);
-}
+hadamard_func(mmx)
+hadamard_func(mmxext)
+hadamard_func(sse2)
+hadamard_func(ssse3)
+#if HAVE_YASM
static int nsse16_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int score1, score2;
if (c)
- score1 = c->mecc.sse[0](c, pix1, pix2, line_size, h);
+ score1 = c->mecc.sse[0](c, pix1, pix2, stride, h);
else
- score1 = ff_sse16_mmx(c, pix1, pix2, line_size, h);
- score2 = ff_hf_noise16_mmx(pix1, line_size, h) + ff_hf_noise8_mmx(pix1+8, line_size, h)
- - ff_hf_noise16_mmx(pix2, line_size, h) - ff_hf_noise8_mmx(pix2+8, line_size, h);
- score1 = sse16_mmx(c, pix1, pix2, stride, h);
- score2 = hf_noise16_mmx(pix1, stride, h) -
- hf_noise16_mmx(pix2, stride, h);
++ score1 = ff_sse16_mmx(c, pix1, pix2, stride, h);
++ score2 = ff_hf_noise16_mmx(pix1, stride, h) + ff_hf_noise8_mmx(pix1+8, stride, h)
++ - ff_hf_noise16_mmx(pix2, stride, h) - ff_hf_noise8_mmx(pix2+8, stride, h);
if (c)
return score1 + FFABS(score2) * c->avctx->nsse_weight;
}
static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
- int score1 = ff_sse8_mmx(c, pix1, pix2, line_size, h);
- int score2 = ff_hf_noise8_mmx(pix1, line_size, h) -
- ff_hf_noise8_mmx(pix2, line_size, h);
- int score1 = sse8_mmx(c, pix1, pix2, stride, h);
- int score2 = hf_noise8_mmx(pix1, stride, h) -
- hf_noise8_mmx(pix2, stride, h);
++ int score1 = ff_sse8_mmx(c, pix1, pix2, stride, h);
++ int score2 = ff_hf_noise8_mmx(pix1, stride, h) -
++ ff_hf_noise8_mmx(pix2, stride, h);
if (c)
return score1 + FFABS(score2) * c->avctx->nsse_weight;
return score1 + FFABS(score2) * 8;
}
+#endif /* HAVE_YASM */
+
+#if HAVE_INLINE_ASM
+
static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int tmp;
- assert((((int) pix) & 7) == 0);
- assert((stride & 7) == 0);
+ av_assert2((((int) pix) & 7) == 0);
- av_assert2((line_size & 7) == 0);
++ av_assert2((stride & 7) == 0);
#define SUM(in0, in1, out0, out1) \
"movq (%0), %%mm2\n" \
"paddw %%mm6, %%mm0\n"
"movd %%mm0, %1\n"
: "+r" (pix), "=r" (tmp)
- : "r" ((x86_reg) line_size), "m" (h)
+ : "r" (stride), "m" (h)
: "%ecx");
return tmp & 0xFFFF;
}
#undef SUM
-static int vsad_intra16_mmxext(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
- ptrdiff_t stride, int h)
-{
- int tmp;
-
- assert((((int) pix) & 7) == 0);
- assert((stride & 7) == 0);
-
-#define SUM(in0, in1, out0, out1) \
- "movq (%0), " #out0 "\n" \
- "movq 8(%0), " #out1 "\n" \
- "add %2, %0\n" \
- "psadbw " #out0 ", " #in0 "\n" \
- "psadbw " #out1 ", " #in1 "\n" \
- "paddw " #in1 ", " #in0 "\n" \
- "paddw " #in0 ", %%mm6\n"
-
- __asm__ volatile (
- "movl %3, %%ecx\n"
- "pxor %%mm6, %%mm6\n"
- "pxor %%mm7, %%mm7\n"
- "movq (%0), %%mm0\n"
- "movq 8(%0), %%mm1\n"
- "add %2, %0\n"
- "jmp 2f\n"
- "1:\n"
-
- SUM(%%mm4, %%mm5, %%mm0, %%mm1)
- "2:\n"
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
-
- "subl $2, %%ecx\n"
- "jnz 1b\n"
-
- "movd %%mm6, %1\n"
- : "+r" (pix), "=r" (tmp)
- : "r" (stride), "m" (h)
- : "%ecx");
-
- return tmp;
-}
-#undef SUM
-
static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int tmp;
- assert((((int) pix1) & 7) == 0);
- assert((((int) pix2) & 7) == 0);
- assert((stride & 7) == 0);
+ av_assert2((((int) pix1) & 7) == 0);
+ av_assert2((((int) pix2) & 7) == 0);
- av_assert2((line_size & 7) == 0);
++ av_assert2((stride & 7) == 0);
#define SUM(in0, in1, out0, out1) \
"movq (%0), %%mm2\n" \
"paddw %%mm6, %%mm0\n"
"movd %%mm0, %2\n"
: "+r" (pix1), "+r" (pix2), "=r" (tmp)
- : "r" ((x86_reg) line_size), "m" (h)
+ : "r" (stride), "m" (h)
: "%ecx");
return tmp & 0x7FFF;
}
#undef SUM
-static int vsad16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- ptrdiff_t stride, int h)
-{
- int tmp;
-
- assert((((int) pix1) & 7) == 0);
- assert((((int) pix2) & 7) == 0);
- assert((stride & 7) == 0);
-
-#define SUM(in0, in1, out0, out1) \
- "movq (%0), " #out0 "\n" \
- "movq (%1), %%mm2\n" \
- "movq 8(%0), " #out1 "\n" \
- "movq 8(%1), %%mm3\n" \
- "add %3, %0\n" \
- "add %3, %1\n" \
- "psubb %%mm2, " #out0 "\n" \
- "psubb %%mm3, " #out1 "\n" \
- "pxor %%mm7, " #out0 "\n" \
- "pxor %%mm7, " #out1 "\n" \
- "psadbw " #out0 ", " #in0 "\n" \
- "psadbw " #out1 ", " #in1 "\n" \
- "paddw " #in1 ", " #in0 "\n" \
- "paddw " #in0 ", %%mm6\n "
-
- __asm__ volatile (
- "movl %4, %%ecx\n"
- "pxor %%mm6, %%mm6\n"
- "pcmpeqw %%mm7, %%mm7\n"
- "psllw $15, %%mm7\n"
- "packsswb %%mm7, %%mm7\n"
- "movq (%0), %%mm0\n"
- "movq (%1), %%mm2\n"
- "movq 8(%0), %%mm1\n"
- "movq 8(%1), %%mm3\n"
- "add %3, %0\n"
- "add %3, %1\n"
- "psubb %%mm2, %%mm0\n"
- "psubb %%mm3, %%mm1\n"
- "pxor %%mm7, %%mm0\n"
- "pxor %%mm7, %%mm1\n"
- "jmp 2f\n"
- "1:\n"
-
- SUM(%%mm4, %%mm5, %%mm0, %%mm1)
- "2:\n"
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
-
- "subl $2, %%ecx\n"
- "jnz 1b\n"
-
- "movd %%mm6, %2\n"
- : "+r" (pix1), "+r" (pix2), "=r" (tmp)
- : "r" (stride), "m" (h)
- : "%ecx");
-
- return tmp;
-}
-#undef SUM
-
-#define MMABS_MMX(a,z) \
- "pxor " #z ", " #z " \n\t" \
- "pcmpgtw " #a ", " #z " \n\t" \
- "pxor " #z ", " #a " \n\t" \
- "psubw " #z ", " #a " \n\t"
-
-#define MMABS_MMXEXT(a, z) \
- "pxor " #z ", " #z " \n\t" \
- "psubw " #a ", " #z " \n\t" \
- "pmaxsw " #z ", " #a " \n\t"
-
-#define MMABS_SSSE3(a,z) \
- "pabsw " #a ", " #a " \n\t"
-
-#define MMABS_SUM(a,z, sum) \
- MMABS(a,z) \
- "paddusw " #a ", " #sum " \n\t"
-
-/* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get
- * up to about 100k on extreme inputs. But that's very unlikely to occur in
- * natural video, and it's even more unlikely to not have any alternative
- * mvs/modes with lower cost. */
-#define HSUM_MMX(a, t, dst) \
- "movq " #a ", " #t " \n\t" \
- "psrlq $32, " #a " \n\t" \
- "paddusw " #t ", " #a " \n\t" \
- "movq " #a ", " #t " \n\t" \
- "psrlq $16, " #a " \n\t" \
- "paddusw " #t ", " #a " \n\t" \
- "movd " #a ", " #dst " \n\t" \
-
-#define HSUM_MMXEXT(a, t, dst) \
- "pshufw $0x0E, " #a ", " #t " \n\t" \
- "paddusw " #t ", " #a " \n\t" \
- "pshufw $0x01, " #a ", " #t " \n\t" \
- "paddusw " #t ", " #a " \n\t" \
- "movd " #a ", " #dst " \n\t" \
-
-#define HSUM_SSE2(a, t, dst) \
- "movhlps " #a ", " #t " \n\t" \
- "paddusw " #t ", " #a " \n\t" \
- "pshuflw $0x0E, " #a ", " #t " \n\t" \
- "paddusw " #t ", " #a " \n\t" \
- "pshuflw $0x01, " #a ", " #t " \n\t" \
- "paddusw " #t ", " #a " \n\t" \
- "movd " #a ", " #dst " \n\t" \
-
-#define DCT_SAD4(m, mm, o) \
- "mov"#m" "#o" + 0(%1), " #mm "2 \n\t" \
- "mov"#m" "#o" + 16(%1), " #mm "3 \n\t" \
- "mov"#m" "#o" + 32(%1), " #mm "4 \n\t" \
- "mov"#m" "#o" + 48(%1), " #mm "5 \n\t" \
- MMABS_SUM(mm ## 2, mm ## 6, mm ## 0) \
- MMABS_SUM(mm ## 3, mm ## 7, mm ## 1) \
- MMABS_SUM(mm ## 4, mm ## 6, mm ## 0) \
- MMABS_SUM(mm ## 5, mm ## 7, mm ## 1) \
-
-#define DCT_SAD_MMX \
- "pxor %%mm0, %%mm0 \n\t" \
- "pxor %%mm1, %%mm1 \n\t" \
- DCT_SAD4(q, %%mm, 0) \
- DCT_SAD4(q, %%mm, 8) \
- DCT_SAD4(q, %%mm, 64) \
- DCT_SAD4(q, %%mm, 72) \
- "paddusw %%mm1, %%mm0 \n\t" \
- HSUM(%%mm0, %%mm1, %0)
-
-#define DCT_SAD_SSE2 \
- "pxor %%xmm0, %%xmm0 \n\t" \
- "pxor %%xmm1, %%xmm1 \n\t" \
- DCT_SAD4(dqa, %%xmm, 0) \
- DCT_SAD4(dqa, %%xmm, 64) \
- "paddusw %%xmm1, %%xmm0 \n\t" \
- HSUM(%%xmm0, %%xmm1, %0)
-
-#define DCT_SAD_FUNC(cpu) \
-static int sum_abs_dctelem_ ## cpu(int16_t *block) \
-{ \
- int sum; \
- __asm__ volatile ( \
- DCT_SAD \
- :"=r"(sum) \
- :"r"(block)); \
- return sum & 0xFFFF; \
-}
-
-#define DCT_SAD DCT_SAD_MMX
-#define HSUM(a, t, dst) HSUM_MMX(a, t, dst)
-#define MMABS(a, z) MMABS_MMX(a, z)
-DCT_SAD_FUNC(mmx)
-#undef MMABS
-#undef HSUM
-
-#define HSUM(a, t, dst) HSUM_MMXEXT(a, t, dst)
-#define MMABS(a, z) MMABS_MMXEXT(a, z)
-DCT_SAD_FUNC(mmxext)
-#undef HSUM
-#undef DCT_SAD
-
-#define DCT_SAD DCT_SAD_SSE2
-#define HSUM(a, t, dst) HSUM_SSE2(a, t, dst)
-DCT_SAD_FUNC(sse2)
-#undef MMABS
-
-#if HAVE_SSSE3_INLINE
-#define MMABS(a, z) MMABS_SSSE3(a, z)
-DCT_SAD_FUNC(ssse3)
-#undef MMABS
-#endif
-#undef HSUM
-#undef DCT_SAD
-
-
DECLARE_ASM_CONST(8, uint64_t, round_tab)[3] = {
0x0000000000000000ULL,
0x0001000100010001ULL,
0x0002000200020002ULL,
};
- static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
-DECLARE_ASM_CONST(8, uint64_t, bone) = 0x0101010101010101LL;
-
+ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2,
+ ptrdiff_t stride, int h)
{
- x86_reg len = -(x86_reg)stride * h;
- x86_reg len = -(stride * h);
++ x86_reg len = -stride * h;
__asm__ volatile (
".p2align 4 \n\t"
"1: \n\t"
"add %3, %%"REG_a" \n\t"
" js 1b \n\t"
: "+a" (len)
- : "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg) stride));
+ : "r" (blk1 - len), "r" (blk2 - len), "r" (stride));
}
-static inline void sad8_1_mmxext(uint8_t *blk1, uint8_t *blk2,
- ptrdiff_t stride, int h)
-{
- __asm__ volatile (
- ".p2align 4 \n\t"
- "1: \n\t"
- "movq (%1), %%mm0 \n\t"
- "movq (%1, %3), %%mm1 \n\t"
- "psadbw (%2), %%mm0 \n\t"
- "psadbw (%2, %3), %%mm1 \n\t"
- "paddw %%mm0, %%mm6 \n\t"
- "paddw %%mm1, %%mm6 \n\t"
- "lea (%1,%3,2), %1 \n\t"
- "lea (%2,%3,2), %2 \n\t"
- "sub $2, %0 \n\t"
- " jg 1b \n\t"
- : "+r" (h), "+r" (blk1), "+r" (blk2)
- : "r" (stride));
-}
-
-static int sad16_sse2(MpegEncContext *v, uint8_t *blk2, uint8_t *blk1,
- ptrdiff_t stride, int h)
-{
- int ret;
- __asm__ volatile (
- "pxor %%xmm2, %%xmm2 \n\t"
- ".p2align 4 \n\t"
- "1: \n\t"
- "movdqu (%1), %%xmm0 \n\t"
- "movdqu (%1, %4), %%xmm1 \n\t"
- "psadbw (%2), %%xmm0 \n\t"
- "psadbw (%2, %4), %%xmm1 \n\t"
- "paddw %%xmm0, %%xmm2 \n\t"
- "paddw %%xmm1, %%xmm2 \n\t"
- "lea (%1,%4,2), %1 \n\t"
- "lea (%2,%4,2), %2 \n\t"
- "sub $2, %0 \n\t"
- " jg 1b \n\t"
- "movhlps %%xmm2, %%xmm0 \n\t"
- "paddw %%xmm0, %%xmm2 \n\t"
- "movd %%xmm2, %3 \n\t"
- : "+r" (h), "+r" (blk1), "+r" (blk2), "=r" (ret)
- : "r" (stride));
- return ret;
-}
-
-static inline void sad8_x2a_mmxext(uint8_t *blk1, uint8_t *blk2,
- ptrdiff_t stride, int h)
-{
- __asm__ volatile (
- ".p2align 4 \n\t"
- "1: \n\t"
- "movq (%1), %%mm0 \n\t"
- "movq (%1, %3), %%mm1 \n\t"
- "pavgb 1(%1), %%mm0 \n\t"
- "pavgb 1(%1, %3), %%mm1 \n\t"
- "psadbw (%2), %%mm0 \n\t"
- "psadbw (%2, %3), %%mm1 \n\t"
- "paddw %%mm0, %%mm6 \n\t"
- "paddw %%mm1, %%mm6 \n\t"
- "lea (%1,%3,2), %1 \n\t"
- "lea (%2,%3,2), %2 \n\t"
- "sub $2, %0 \n\t"
- " jg 1b \n\t"
- : "+r" (h), "+r" (blk1), "+r" (blk2)
- : "r" (stride));
-}
-
-static inline void sad8_y2a_mmxext(uint8_t *blk1, uint8_t *blk2,
- ptrdiff_t stride, int h)
-{
- __asm__ volatile (
- "movq (%1), %%mm0 \n\t"
- "add %3, %1 \n\t"
- ".p2align 4 \n\t"
- "1: \n\t"
- "movq (%1), %%mm1 \n\t"
- "movq (%1, %3), %%mm2 \n\t"
- "pavgb %%mm1, %%mm0 \n\t"
- "pavgb %%mm2, %%mm1 \n\t"
- "psadbw (%2), %%mm0 \n\t"
- "psadbw (%2, %3), %%mm1 \n\t"
- "paddw %%mm0, %%mm6 \n\t"
- "paddw %%mm1, %%mm6 \n\t"
- "movq %%mm2, %%mm0 \n\t"
- "lea (%1,%3,2), %1 \n\t"
- "lea (%2,%3,2), %2 \n\t"
- "sub $2, %0 \n\t"
- " jg 1b \n\t"
- : "+r" (h), "+r" (blk1), "+r" (blk2)
- : "r" (stride));
-}
-
-static inline void sad8_4_mmxext(uint8_t *blk1, uint8_t *blk2,
- ptrdiff_t stride, int h)
-{
- __asm__ volatile (
- "movq "MANGLE(bone)", %%mm5 \n\t"
- "movq (%1), %%mm0 \n\t"
- "pavgb 1(%1), %%mm0 \n\t"
- "add %3, %1 \n\t"
- ".p2align 4 \n\t"
- "1: \n\t"
- "movq (%1), %%mm1 \n\t"
- "movq (%1,%3), %%mm2 \n\t"
- "pavgb 1(%1), %%mm1 \n\t"
- "pavgb 1(%1,%3), %%mm2 \n\t"
- "psubusb %%mm5, %%mm1 \n\t"
- "pavgb %%mm1, %%mm0 \n\t"
- "pavgb %%mm2, %%mm1 \n\t"
- "psadbw (%2), %%mm0 \n\t"
- "psadbw (%2,%3), %%mm1 \n\t"
- "paddw %%mm0, %%mm6 \n\t"
- "paddw %%mm1, %%mm6 \n\t"
- "movq %%mm2, %%mm0 \n\t"
- "lea (%1,%3,2), %1 \n\t"
- "lea (%2,%3,2), %2 \n\t"
- "sub $2, %0 \n\t"
- " jg 1b \n\t"
- : "+r" (h), "+r" (blk1), "+r" (blk2)
- : "r" (stride));
-}
-
static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
- x86_reg len = -(x86_reg)stride * h;
- x86_reg len = -(stride * h);
++ x86_reg len = -stride * h;
__asm__ volatile (
".p2align 4 \n\t"
"1: \n\t"
" js 1b \n\t"
: "+a" (len)
: "r" (blk1a - len), "r" (blk1b - len), "r" (blk2 - len),
- "r" ((x86_reg) stride));
+ "r" (stride));
}
- static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
+ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2,
+ ptrdiff_t stride, int h)
{
- x86_reg len = -(x86_reg)stride * h;
- x86_reg len = -(stride * h);
++ x86_reg len = -stride * h;
__asm__ volatile (
"movq (%1, %%"REG_a"), %%mm0 \n\t"
"movq 1(%1, %%"REG_a"), %%mm2 \n\t"
"punpckhbw %%mm7, %%mm5 \n\t"
"paddw %%mm4, %%mm2 \n\t"
"paddw %%mm5, %%mm3 \n\t"
- "movq 16+"MANGLE(round_tab)", %%mm5 \n\t"
+ "movq %5, %%mm5 \n\t"
"paddw %%mm2, %%mm0 \n\t"
"paddw %%mm3, %%mm1 \n\t"
"paddw %%mm5, %%mm0 \n\t"
" js 1b \n\t"
: "+a" (len)
: "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len),
- "r" ((x86_reg) stride), "m" (round_tab[2]));
- "r" (stride));
++ "r" (stride), "m" (round_tab[2]));
}
static inline int sum_mmx(void)
return ret & 0xFFFF;
}
- static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
-static inline int sum_mmxext(void)
-{
- int ret;
- __asm__ volatile (
- "movd %%mm6, %0 \n\t"
- : "=r" (ret));
- return ret;
-}
-
+ static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2,
+ ptrdiff_t stride, int h)
{
sad8_2_mmx(blk1, blk1 + 1, blk2, stride, h);
}
- static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
+ static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2,
+ ptrdiff_t stride, int h)
{
sad8_2_mmx(blk1, blk1 + stride, blk2, stride, h);
}
#define PIX_SAD(suf) \
static int sad8_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
- assert(h == 8); \
+ av_assert2(h == 8); \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
} \
\
static int sad8_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
- assert(h == 8); \
+ av_assert2(h == 8); \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
} \
\
static int sad8_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
- assert(h == 8); \
+ av_assert2(h == 8); \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
} \
\
static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
- assert(h == 8); \
+ av_assert2(h == 8); \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
} \
\
static int sad16_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
} \
\
static int sad16_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
} \
\
static int sad16_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
} \
\
static int sad16_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
} \
PIX_SAD(mmx)
-PIX_SAD(mmxext)
#endif /* HAVE_INLINE_ASM */
-int ff_sse16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- ptrdiff_t stride, int h);
-
-#define hadamard_func(cpu) \
- int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1, \
- uint8_t *src2, ptrdiff_t stride, int h); \
- int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1, \
- uint8_t *src2, ptrdiff_t stride, int h);
-
-hadamard_func(mmx)
-hadamard_func(mmxext)
-hadamard_func(sse2)
-hadamard_func(ssse3)
-
av_cold void ff_me_cmp_init_x86(MECmpContext *c, AVCodecContext *avctx)
{
int cpu_flags = av_get_cpu_flags();
#if HAVE_INLINE_ASM
if (INLINE_MMX(cpu_flags)) {
- c->sum_abs_dctelem = sum_abs_dctelem_mmx;
-
c->pix_abs[0][0] = sad16_mmx;
c->pix_abs[0][1] = sad16_x2_mmx;
c->pix_abs[0][2] = sad16_y2_mmx;
c->sad[0] = sad16_mmx;
c->sad[1] = sad8_mmx;
- c->sse[0] = sse16_mmx;
- c->sse[1] = sse8_mmx;
c->vsad[4] = vsad_intra16_mmx;
- c->nsse[0] = nsse16_mmx;
- c->nsse[1] = nsse8_mmx;
-
if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
c->vsad[0] = vsad16_mmx;
}
}
- if (INLINE_MMXEXT(cpu_flags)) {
- c->sum_abs_dctelem = sum_abs_dctelem_mmxext;
-
- c->vsad[4] = vsad_intra16_mmxext;
-
- c->pix_abs[0][0] = sad16_mmxext;
- c->pix_abs[1][0] = sad8_mmxext;
-
- c->sad[0] = sad16_mmxext;
- c->sad[1] = sad8_mmxext;
-
- if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
- c->pix_abs[0][1] = sad16_x2_mmxext;
- c->pix_abs[0][2] = sad16_y2_mmxext;
- c->pix_abs[0][3] = sad16_xy2_mmxext;
- c->pix_abs[1][1] = sad8_x2_mmxext;
- c->pix_abs[1][2] = sad8_y2_mmxext;
- c->pix_abs[1][3] = sad8_xy2_mmxext;
-
- c->vsad[0] = vsad16_mmxext;
- }
- }
-
- if (INLINE_SSE2(cpu_flags)) {
- c->sum_abs_dctelem = sum_abs_dctelem_sse2;
- }
-
- if (INLINE_SSE2(cpu_flags) && !(cpu_flags & AV_CPU_FLAG_3DNOW)) {
- c->sad[0] = sad16_sse2;
- }
-
-#if HAVE_SSSE3_INLINE
- if (INLINE_SSSE3(cpu_flags)) {
- c->sum_abs_dctelem = sum_abs_dctelem_ssse3;
- }
-#endif
#endif /* HAVE_INLINE_ASM */
if (EXTERNAL_MMX(cpu_flags)) {
c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
+ c->sum_abs_dctelem = ff_sum_abs_dctelem_mmx;
+ c->sse[0] = ff_sse16_mmx;
+ c->sse[1] = ff_sse8_mmx;
+#if HAVE_YASM
+ c->nsse[0] = nsse16_mmx;
+ c->nsse[1] = nsse8_mmx;
+#endif
}
if (EXTERNAL_MMXEXT(cpu_flags)) {
c->hadamard8_diff[0] = ff_hadamard8_diff16_mmxext;
c->hadamard8_diff[1] = ff_hadamard8_diff_mmxext;
+ c->sum_abs_dctelem = ff_sum_abs_dctelem_mmxext;
+
+ c->sad[0] = ff_sad16_mmxext;
+ c->sad[1] = ff_sad8_mmxext;
+
+ c->pix_abs[0][0] = ff_sad16_mmxext;
+ c->pix_abs[0][1] = ff_sad16_x2_mmxext;
+ c->pix_abs[0][2] = ff_sad16_y2_mmxext;
+ c->pix_abs[1][0] = ff_sad8_mmxext;
+ c->pix_abs[1][1] = ff_sad8_x2_mmxext;
+ c->pix_abs[1][2] = ff_sad8_y2_mmxext;
+
+ c->vsad[4] = ff_vsad_intra16_mmxext;
+ c->vsad[5] = ff_vsad_intra8_mmxext;
+
+ if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
+ c->pix_abs[0][3] = ff_sad16_approx_xy2_mmxext;
+ c->pix_abs[1][3] = ff_sad8_approx_xy2_mmxext;
+
+ c->vsad[0] = ff_vsad16_approx_mmxext;
+ c->vsad[1] = ff_vsad8_approx_mmxext;
+ }
}
if (EXTERNAL_SSE2(cpu_flags)) {
c->sse[0] = ff_sse16_sse2;
+ c->sum_abs_dctelem = ff_sum_abs_dctelem_sse2;
#if HAVE_ALIGNED_STACK
c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
#endif
+ if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW) && avctx->codec_id != AV_CODEC_ID_SNOW) {
+ c->sad[0] = ff_sad16_sse2;
+ c->pix_abs[0][0] = ff_sad16_sse2;
+ c->pix_abs[0][1] = ff_sad16_x2_sse2;
+ c->pix_abs[0][2] = ff_sad16_y2_sse2;
+
+ c->vsad[4] = ff_vsad_intra16_sse2;
+ if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
+ c->pix_abs[0][3] = ff_sad16_approx_xy2_sse2;
+ c->vsad[0] = ff_vsad16_approx_sse2;
+ }
+ }
}
- if (EXTERNAL_SSSE3(cpu_flags) && HAVE_ALIGNED_STACK) {
+ if (EXTERNAL_SSSE3(cpu_flags)) {
+ c->sum_abs_dctelem = ff_sum_abs_dctelem_ssse3;
+#if HAVE_ALIGNED_STACK
c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
+#endif
}
}