#include "libavcodec/mpegvideo.h"
int ff_pix_abs16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
int ff_pix_abs16_x2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
int ff_pix_abs16_y2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
int ff_pix_abs8_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
int ff_sse16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
av_cold void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx)
{
uint32_t ff_square_tab[512] = { 0, };
static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
uint32_t *sq = ff_square_tab + 256;
s += sq[pix1[1] - pix2[1]];
s += sq[pix1[2] - pix2[2]];
s += sq[pix1[3] - pix2[3]];
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
uint32_t *sq = ff_square_tab + 256;
s += sq[pix1[5] - pix2[5]];
s += sq[pix1[6] - pix2[6]];
s += sq[pix1[7] - pix2[7]];
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int sse16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
uint32_t *sq = ff_square_tab + 256;
s += sq[pix1[14] - pix2[14]];
s += sq[pix1[15] - pix2[15]];
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
#define avg4(a, b, c, d) ((a + b + c + d + 2) >> 2)
static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
s += abs(pix1[13] - pix2[13]);
s += abs(pix1[14] - pix2[14]);
s += abs(pix1[15] - pix2[15]);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
for (i = 0; i < h; i++) {
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
- pix1 += line_size;
- pix2 += line_size;
- pix3 += line_size;
+ pix1 += stride;
+ pix2 += stride;
+ pix3 += stride;
}
return s;
}
static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
for (i = 0; i < h; i++) {
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
- pix1 += line_size;
- pix2 += line_size;
- pix3 += line_size;
+ pix1 += stride;
+ pix2 += stride;
+ pix3 += stride;
}
return s;
}
static inline int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
s += abs(pix1[5] - pix2[5]);
s += abs(pix1[6] - pix2[6]);
s += abs(pix1[7] - pix2[7]);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
for (i = 0; i < h; i++) {
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
- pix1 += line_size;
- pix2 += line_size;
- pix3 += line_size;
+ pix1 += stride;
+ pix2 += stride;
+ pix3 += stride;
}
return s;
}
static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
for (i = 0; i < h; i++) {
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
- pix1 += line_size;
- pix2 += line_size;
- pix3 += line_size;
+ pix1 += stride;
+ pix2 += stride;
+ pix3 += stride;
}
return s;
}
-static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
+static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
+ ptrdiff_t stride, int h)
{
int score1 = 0, score2 = 0, x, y;
return score1 + FFABS(score2) * 8;
}
-static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
+static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
+ ptrdiff_t stride, int h)
{
int score1 = 0, score2 = 0, x, y;
}
static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
return 0;
}
#define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst,
- uint8_t *src, int stride, int h)
+ uint8_t *src, ptrdiff_t stride, int h)
{
int i, temp[64], sum = 0;
}
static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src,
- uint8_t *dummy, int stride, int h)
+ uint8_t *dummy, ptrdiff_t stride, int h)
{
int i, temp[64], sum = 0;
}
static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1,
- uint8_t *src2, int stride, int h)
+ uint8_t *src2, ptrdiff_t stride, int h)
{
LOCAL_ALIGNED_16(int16_t, temp, [64]);
}
static int dct264_sad8x8_c(MpegEncContext *s, uint8_t *src1,
- uint8_t *src2, int stride, int h)
+ uint8_t *src2, ptrdiff_t stride, int h)
{
int16_t dct[8][8];
int i, sum = 0;
#endif
static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1,
- uint8_t *src2, int stride, int h)
+ uint8_t *src2, ptrdiff_t stride, int h)
{
LOCAL_ALIGNED_16(int16_t, temp, [64]);
int sum = 0, i;
}
static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1,
- uint8_t *src2, int stride, int h)
+ uint8_t *src2, ptrdiff_t stride, int h)
{
LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
int16_t *const bak = temp + 64;
}
static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
const uint8_t *scantable = s->intra_scantable.permutated;
LOCAL_ALIGNED_16(int16_t, temp, [64]);
}
static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
const uint8_t *scantable = s->intra_scantable.permutated;
LOCAL_ALIGNED_16(int16_t, temp, [64]);
#define VSAD_INTRA(size) \
static int vsad_intra ## size ## _c(MpegEncContext *c, \
uint8_t *s, uint8_t *dummy, \
- int stride, int h) \
+ ptrdiff_t stride, int h) \
{ \
int score = 0, x, y; \
\
VSAD_INTRA(16)
static int vsad16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
int score = 0, x, y;
#define VSSE_INTRA(size) \
static int vsse_intra ## size ## _c(MpegEncContext *c, \
uint8_t *s, uint8_t *dummy, \
- int stride, int h) \
+ ptrdiff_t stride, int h) \
{ \
int score = 0, x, y; \
\
VSSE_INTRA(16)
static int vsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
int score = 0, x, y;
#define WRAPPER8_16_SQ(name8, name16) \
static int name16(MpegEncContext *s, uint8_t *dst, uint8_t *src, \
- int stride, int h) \
+ ptrdiff_t stride, int h) \
{ \
int score = 0; \
\
* width < 8 are neither used nor implemented. */
typedef int (*me_cmp_func)(struct MpegEncContext *c,
uint8_t *blk1 /* align width (8 or 16) */,
- uint8_t *blk2 /* align 1 */, int line_size, int h);
+ uint8_t *blk2 /* align 1 */, ptrdiff_t stride,
+ int h);
typedef struct MECmpContext {
int (*sum_abs_dctelem)(int16_t *block /* align 16 */);
#if HAVE_ALTIVEC
static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int i, s = 0;
const vector unsigned char zero =
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
}
static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int i, s = 0;
const vector unsigned char zero =
vector unsigned char pix1v, pix3v, avgv, t5;
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
- /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
+ /* Due to the fact that pix3 = pix2 + stride, the pix3 of one
* iteration becomes pix2 in the next iteration. We can use this
* fact to avoid a potentially expensive unaligned read, each
* time around the loop.
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
- pix1 += line_size;
+ pix1 += stride;
pix2v = pix3v;
- pix3 += line_size;
+ pix3 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
}
static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int i, s = 0;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
const vector unsigned char zero =
(const vector unsigned char) vec_splat_u8(0);
const vector unsigned short two =
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
- /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
+ /* Due to the fact that pix3 = pix2 + stride, the pix3 of one
* iteration becomes pix2 in the next iteration. We can use this
* fact to avoid a potentially expensive unaligned read, as well
* as some splitting, and vector addition each time around the loop.
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
- pix1 += line_size;
- pix3 += line_size;
+ pix1 += stride;
+ pix3 += stride;
/* Transfer the calculated values for pix3 into pix2. */
t1 = t3;
t2 = t4;
}
static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int i, s;
const vector unsigned int zero =
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
}
static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int i, s;
const vector unsigned int zero =
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
/* Sum of Squared Errors for an 8x8 block, AltiVec-enhanced.
* It's the sad8_altivec code above w/ squaring added. */
static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int i, s;
const vector unsigned int zero =
/* Square the values and add them to our sum. */
sum = vec_msum(t5, t5, sum);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
/* Sum of Squared Errors for a 16x16 block, AltiVec-enhanced.
* It's the sad16_altivec code above w/ squaring added. */
static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int i, s;
const vector unsigned int zero =
/* Square the values and add them to our sum. */
sum = vec_msum(t5, t5, sum);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
}
static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
- uint8_t *src, int stride, int h)
+ uint8_t *src, ptrdiff_t stride, int h)
{
int sum;
register const vector unsigned char vzero =
* but xlc goes to around 660 on the regular C code...
*/
static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
- uint8_t *src, int stride, int h)
+ uint8_t *src, ptrdiff_t stride, int h)
{
int sum;
register vector signed short
}
static int hadamard8_diff16_altivec(MpegEncContext *s, uint8_t *dst,
- uint8_t *src, int stride, int h)
+ uint8_t *src, ptrdiff_t stride, int h)
{
int score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
#if HAVE_INLINE_ASM
static int sse8_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int tmp;
"pmaddwd %%mm1, %%mm1 \n"
"pmaddwd %%mm3, %%mm3 \n"
- "lea (%0, %3, 2), %0 \n" /* pix1 += 2 * line_size */
- "lea (%1, %3, 2), %1 \n" /* pix2 += 2 * line_size */
+ "lea (%0, %3, 2), %0 \n" /* pix1 += 2 * stride */
+ "lea (%1, %3, 2), %1 \n" /* pix2 += 2 * stride */
"paddd %%mm2, %%mm1 \n"
"paddd %%mm4, %%mm3 \n"
"paddd %%mm7, %%mm1 \n"
"movd %%mm1, %2 \n"
: "+r" (pix1), "+r" (pix2), "=r" (tmp)
- : "r" ((x86_reg) line_size), "m" (h)
+ : "r" (stride), "m" (h)
: "%ecx");
return tmp;
}
static int sse16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int tmp;
"paddd %%mm7, %%mm1\n"
"movd %%mm1, %2\n"
: "+r" (pix1), "+r" (pix2), "=r" (tmp)
- : "r" ((x86_reg) line_size), "m" (h)
+ : "r" (stride), "m" (h)
: "%ecx");
return tmp;
}
-static int hf_noise8_mmx(uint8_t *pix1, int line_size, int h)
+static int hf_noise8_mmx(uint8_t *pix1, ptrdiff_t stride, int h)
{
int tmp;
"paddd %%mm6, %%mm0\n"
"movd %%mm0, %1\n"
: "+r" (pix1), "=r" (tmp)
- : "r" ((x86_reg) line_size), "g" (h - 2)
+ : "r" (stride), "g" (h - 2)
: "%ecx");
return tmp;
}
-static int hf_noise16_mmx(uint8_t *pix1, int line_size, int h)
+static int hf_noise16_mmx(uint8_t *pix1, ptrdiff_t stride, int h)
{
int tmp;
uint8_t *pix = pix1;
"paddd %%mm6, %%mm0\n"
"movd %%mm0, %1\n"
: "+r" (pix1), "=r" (tmp)
- : "r" ((x86_reg) line_size), "g" (h - 2)
+ : "r" (stride), "g" (h - 2)
: "%ecx");
- return tmp + hf_noise8_mmx(pix + 8, line_size, h);
+ return tmp + hf_noise8_mmx(pix + 8, stride, h);
}
static int nsse16_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int score1, score2;
if (c)
- score1 = c->mecc.sse[0](c, pix1, pix2, line_size, h);
+ score1 = c->mecc.sse[0](c, pix1, pix2, stride, h);
else
- score1 = sse16_mmx(c, pix1, pix2, line_size, h);
- score2 = hf_noise16_mmx(pix1, line_size, h) -
- hf_noise16_mmx(pix2, line_size, h);
+ score1 = sse16_mmx(c, pix1, pix2, stride, h);
+ score2 = hf_noise16_mmx(pix1, stride, h) -
+ hf_noise16_mmx(pix2, stride, h);
if (c)
return score1 + FFABS(score2) * c->avctx->nsse_weight;
}
static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
- int score1 = sse8_mmx(c, pix1, pix2, line_size, h);
- int score2 = hf_noise8_mmx(pix1, line_size, h) -
- hf_noise8_mmx(pix2, line_size, h);
+ int score1 = sse8_mmx(c, pix1, pix2, stride, h);
+ int score2 = hf_noise8_mmx(pix1, stride, h) -
+ hf_noise8_mmx(pix2, stride, h);
if (c)
return score1 + FFABS(score2) * c->avctx->nsse_weight;
}
static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int tmp;
assert((((int) pix) & 7) == 0);
- assert((line_size & 7) == 0);
+ assert((stride & 7) == 0);
#define SUM(in0, in1, out0, out1) \
"movq (%0), %%mm2\n" \
"paddw %%mm6, %%mm0\n"
"movd %%mm0, %1\n"
: "+r" (pix), "=r" (tmp)
- : "r" ((x86_reg) line_size), "m" (h)
+ : "r" (stride), "m" (h)
: "%ecx");
return tmp & 0xFFFF;
#undef SUM
static int vsad_intra16_mmxext(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int tmp;
assert((((int) pix) & 7) == 0);
- assert((line_size & 7) == 0);
+ assert((stride & 7) == 0);
#define SUM(in0, in1, out0, out1) \
"movq (%0), " #out0 "\n" \
"movd %%mm6, %1\n"
: "+r" (pix), "=r" (tmp)
- : "r" ((x86_reg) line_size), "m" (h)
+ : "r" (stride), "m" (h)
: "%ecx");
return tmp;
#undef SUM
static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int tmp;
assert((((int) pix1) & 7) == 0);
assert((((int) pix2) & 7) == 0);
- assert((line_size & 7) == 0);
+ assert((stride & 7) == 0);
#define SUM(in0, in1, out0, out1) \
"movq (%0), %%mm2\n" \
"paddw %%mm6, %%mm0\n"
"movd %%mm0, %2\n"
: "+r" (pix1), "+r" (pix2), "=r" (tmp)
- : "r" ((x86_reg) line_size), "m" (h)
+ : "r" (stride), "m" (h)
: "%ecx");
return tmp & 0x7FFF;
#undef SUM
static int vsad16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int tmp;
assert((((int) pix1) & 7) == 0);
assert((((int) pix2) & 7) == 0);
- assert((line_size & 7) == 0);
+ assert((stride & 7) == 0);
#define SUM(in0, in1, out0, out1) \
"movq (%0), " #out0 "\n" \
"movd %%mm6, %2\n"
: "+r" (pix1), "+r" (pix2), "=r" (tmp)
- : "r" ((x86_reg) line_size), "m" (h)
+ : "r" (stride), "m" (h)
: "%ecx");
return tmp;
DECLARE_ASM_CONST(8, uint64_t, bone) = 0x0101010101010101LL;
-static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
+static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2,
+ ptrdiff_t stride, int h)
{
x86_reg len = -(stride * h);
__asm__ volatile (
"add %3, %%"REG_a" \n\t"
" js 1b \n\t"
: "+a" (len)
- : "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg) stride));
+ : "r" (blk1 - len), "r" (blk2 - len), "r" (stride));
}
static inline void sad8_1_mmxext(uint8_t *blk1, uint8_t *blk2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
__asm__ volatile (
".p2align 4 \n\t"
"sub $2, %0 \n\t"
" jg 1b \n\t"
: "+r" (h), "+r" (blk1), "+r" (blk2)
- : "r" ((x86_reg) stride));
+ : "r" (stride));
}
static int sad16_sse2(MpegEncContext *v, uint8_t *blk2, uint8_t *blk1,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
int ret;
__asm__ volatile (
"paddw %%xmm0, %%xmm2 \n\t"
"movd %%xmm2, %3 \n\t"
: "+r" (h), "+r" (blk1), "+r" (blk2), "=r" (ret)
- : "r" ((x86_reg) stride));
+ : "r" (stride));
return ret;
}
static inline void sad8_x2a_mmxext(uint8_t *blk1, uint8_t *blk2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
__asm__ volatile (
".p2align 4 \n\t"
"sub $2, %0 \n\t"
" jg 1b \n\t"
: "+r" (h), "+r" (blk1), "+r" (blk2)
- : "r" ((x86_reg) stride));
+ : "r" (stride));
}
static inline void sad8_y2a_mmxext(uint8_t *blk1, uint8_t *blk2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
__asm__ volatile (
"movq (%1), %%mm0 \n\t"
"sub $2, %0 \n\t"
" jg 1b \n\t"
: "+r" (h), "+r" (blk1), "+r" (blk2)
- : "r" ((x86_reg) stride));
+ : "r" (stride));
}
static inline void sad8_4_mmxext(uint8_t *blk1, uint8_t *blk2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
__asm__ volatile (
"movq "MANGLE(bone)", %%mm5 \n\t"
"sub $2, %0 \n\t"
" jg 1b \n\t"
: "+r" (h), "+r" (blk1), "+r" (blk2)
- : "r" ((x86_reg) stride));
+ : "r" (stride));
}
static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
x86_reg len = -(stride * h);
__asm__ volatile (
" js 1b \n\t"
: "+a" (len)
: "r" (blk1a - len), "r" (blk1b - len), "r" (blk2 - len),
- "r" ((x86_reg) stride));
+ "r" (stride));
}
-static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
+static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2,
+ ptrdiff_t stride, int h)
{
x86_reg len = -(stride * h);
__asm__ volatile (
" js 1b \n\t"
: "+a" (len)
: "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len),
- "r" ((x86_reg) stride));
+ "r" (stride));
}
static inline int sum_mmx(void)
return ret;
}
-static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
+static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2,
+ ptrdiff_t stride, int h)
{
sad8_2_mmx(blk1, blk1 + 1, blk2, stride, h);
}
-static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
+static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2,
+ ptrdiff_t stride, int h)
{
sad8_2_mmx(blk1, blk1 + stride, blk2, stride, h);
}
#define PIX_SAD(suf) \
static int sad8_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
assert(h == 8); \
__asm__ volatile ( \
} \
\
static int sad8_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
assert(h == 8); \
__asm__ volatile ( \
} \
\
static int sad8_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
assert(h == 8); \
__asm__ volatile ( \
} \
\
static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
assert(h == 8); \
__asm__ volatile ( \
} \
\
static int sad16_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
} \
\
static int sad16_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
} \
\
static int sad16_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
} \
\
static int sad16_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
#endif /* HAVE_INLINE_ASM */
int ff_sse16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
-#define hadamard_func(cpu) \
- int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1, \
- uint8_t *src2, int stride, int h); \
- int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1, \
- uint8_t *src2, int stride, int h);
+#define hadamard_func(cpu) \
+ int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1, \
+ uint8_t *src2, ptrdiff_t stride, int h); \
+ int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1, \
+ uint8_t *src2, ptrdiff_t stride, int h);
hadamard_func(mmx)
hadamard_func(mmxext)