2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
38 #include "mpegvideo.h"
41 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
60 static const uint8_t ff_default_chroma_qscale_table[32] = {
61 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
62 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
63 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
66 const uint8_t ff_mpeg1_dc_scale_table[128] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 static const uint8_t mpeg2_dc_scale_table1[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 static const uint8_t mpeg2_dc_scale_table3[128] = {
103 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
115 ff_mpeg1_dc_scale_table,
116 mpeg2_dc_scale_table1,
117 mpeg2_dc_scale_table2,
118 mpeg2_dc_scale_table3,
121 const enum AVPixelFormat ff_pixfmt_list_420[] = {
126 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
128 int mb_x, int mb_y, int mb_intra, int mb_skipped)
130 MpegEncContext *s = opaque;
133 s->mv_type = mv_type;
134 s->mb_intra = mb_intra;
135 s->mb_skipped = mb_skipped;
138 memcpy(s->mv, mv, sizeof(*mv));
140 ff_init_block_index(s);
141 ff_update_block_index(s);
143 s->dsp.clear_blocks(s->block[0]);
145 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
146 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
150 ff_MPV_decode_mb(s, s->block);
153 /* init common dct for both encoder and decoder */
154 av_cold int ff_dct_common_init(MpegEncContext *s)
156 ff_dsputil_init(&s->dsp, s->avctx);
157 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
158 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
160 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
161 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
162 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
163 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
165 if (s->flags & CODEC_FLAG_BITEXACT)
166 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
167 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
170 ff_MPV_common_init_arm(s);
172 ff_MPV_common_init_bfin(s);
174 ff_MPV_common_init_ppc(s);
176 ff_MPV_common_init_x86(s);
178 /* load & permutate scantables
179 * note: only wmv uses different ones
181 if (s->alternate_scan) {
182 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
183 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
185 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
186 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
189 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
194 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
196 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
198 // edge emu needs blocksize + filter length - 1
199 // (= 17x17 for halfpel / 21x21 for h264)
200 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
201 // at uvlinesize. It supports only YUV420 so 24x24 is enough
202 // linesize * interlaced * MBsize
203 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
206 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
208 s->me.temp = s->me.scratchpad;
209 s->rd_scratchpad = s->me.scratchpad;
210 s->b_scratchpad = s->me.scratchpad;
211 s->obmc_scratchpad = s->me.scratchpad + 16;
215 av_freep(&s->edge_emu_buffer);
216 return AVERROR(ENOMEM);
220 * Allocate a frame buffer
222 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
227 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
228 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
229 s->codec_id != AV_CODEC_ID_MSS2)
230 r = ff_thread_get_buffer(s->avctx, &pic->tf,
231 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
233 pic->f.width = s->avctx->width;
234 pic->f.height = s->avctx->height;
235 pic->f.format = s->avctx->pix_fmt;
236 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
239 if (r < 0 || !pic->f.buf[0]) {
240 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
245 if (s->avctx->hwaccel) {
246 assert(!pic->hwaccel_picture_private);
247 if (s->avctx->hwaccel->priv_data_size) {
248 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
249 if (!pic->hwaccel_priv_buf) {
250 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
253 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
257 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
258 s->uvlinesize != pic->f.linesize[1])) {
259 av_log(s->avctx, AV_LOG_ERROR,
260 "get_buffer() failed (stride changed)\n");
261 ff_mpeg_unref_picture(s, pic);
265 if (pic->f.linesize[1] != pic->f.linesize[2]) {
266 av_log(s->avctx, AV_LOG_ERROR,
267 "get_buffer() failed (uv stride mismatch)\n");
268 ff_mpeg_unref_picture(s, pic);
272 if (!s->edge_emu_buffer &&
273 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
274 av_log(s->avctx, AV_LOG_ERROR,
275 "get_buffer() failed to allocate context scratch buffers.\n");
276 ff_mpeg_unref_picture(s, pic);
283 static void free_picture_tables(Picture *pic)
287 av_buffer_unref(&pic->mb_var_buf);
288 av_buffer_unref(&pic->mc_mb_var_buf);
289 av_buffer_unref(&pic->mb_mean_buf);
290 av_buffer_unref(&pic->mbskip_table_buf);
291 av_buffer_unref(&pic->qscale_table_buf);
292 av_buffer_unref(&pic->mb_type_buf);
294 for (i = 0; i < 2; i++) {
295 av_buffer_unref(&pic->motion_val_buf[i]);
296 av_buffer_unref(&pic->ref_index_buf[i]);
300 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
302 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
303 const int mb_array_size = s->mb_stride * s->mb_height;
304 const int b8_array_size = s->b8_stride * s->mb_height * 2;
308 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
309 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
310 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
312 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
313 return AVERROR(ENOMEM);
316 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
317 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
318 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
319 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
320 return AVERROR(ENOMEM);
323 if (s->out_format == FMT_H263 || s->encoding) {
324 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
325 int ref_index_size = 4 * mb_array_size;
327 for (i = 0; mv_size && i < 2; i++) {
328 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
329 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
330 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
331 return AVERROR(ENOMEM);
338 static int make_tables_writable(Picture *pic)
341 #define MAKE_WRITABLE(table) \
344 (ret = av_buffer_make_writable(&pic->table)) < 0)\
348 MAKE_WRITABLE(mb_var_buf);
349 MAKE_WRITABLE(mc_mb_var_buf);
350 MAKE_WRITABLE(mb_mean_buf);
351 MAKE_WRITABLE(mbskip_table_buf);
352 MAKE_WRITABLE(qscale_table_buf);
353 MAKE_WRITABLE(mb_type_buf);
355 for (i = 0; i < 2; i++) {
356 MAKE_WRITABLE(motion_val_buf[i]);
357 MAKE_WRITABLE(ref_index_buf[i]);
364 * Allocate a Picture.
365 * The pixels are allocated/set by calling get_buffer() if shared = 0
367 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
372 assert(pic->f.data[0]);
375 assert(!pic->f.buf[0]);
377 if (alloc_frame_buffer(s, pic) < 0)
380 s->linesize = pic->f.linesize[0];
381 s->uvlinesize = pic->f.linesize[1];
384 if (!pic->qscale_table_buf)
385 ret = alloc_picture_tables(s, pic);
387 ret = make_tables_writable(pic);
392 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
393 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
394 pic->mb_mean = pic->mb_mean_buf->data;
397 pic->mbskip_table = pic->mbskip_table_buf->data;
398 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
399 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
401 if (pic->motion_val_buf[0]) {
402 for (i = 0; i < 2; i++) {
403 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
404 pic->ref_index[i] = pic->ref_index_buf[i]->data;
410 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
411 ff_mpeg_unref_picture(s, pic);
412 free_picture_tables(pic);
413 return AVERROR(ENOMEM);
417 * Deallocate a picture.
419 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
421 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
424 /* WM Image / Screen codecs allocate internal buffers with different
425 * dimensions / colorspaces; ignore user-defined callbacks for these. */
426 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
427 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
428 s->codec_id != AV_CODEC_ID_MSS2)
429 ff_thread_release_buffer(s->avctx, &pic->tf);
431 av_frame_unref(&pic->f);
433 av_buffer_unref(&pic->hwaccel_priv_buf);
435 if (pic->needs_realloc)
436 free_picture_tables(pic);
438 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
441 static int update_picture_tables(Picture *dst, Picture *src)
445 #define UPDATE_TABLE(table)\
448 (!dst->table || dst->table->buffer != src->table->buffer)) {\
449 av_buffer_unref(&dst->table);\
450 dst->table = av_buffer_ref(src->table);\
452 free_picture_tables(dst);\
453 return AVERROR(ENOMEM);\
458 UPDATE_TABLE(mb_var_buf);
459 UPDATE_TABLE(mc_mb_var_buf);
460 UPDATE_TABLE(mb_mean_buf);
461 UPDATE_TABLE(mbskip_table_buf);
462 UPDATE_TABLE(qscale_table_buf);
463 UPDATE_TABLE(mb_type_buf);
464 for (i = 0; i < 2; i++) {
465 UPDATE_TABLE(motion_val_buf[i]);
466 UPDATE_TABLE(ref_index_buf[i]);
469 dst->mb_var = src->mb_var;
470 dst->mc_mb_var = src->mc_mb_var;
471 dst->mb_mean = src->mb_mean;
472 dst->mbskip_table = src->mbskip_table;
473 dst->qscale_table = src->qscale_table;
474 dst->mb_type = src->mb_type;
475 for (i = 0; i < 2; i++) {
476 dst->motion_val[i] = src->motion_val[i];
477 dst->ref_index[i] = src->ref_index[i];
483 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
487 av_assert0(!dst->f.buf[0]);
488 av_assert0(src->f.buf[0]);
492 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
496 ret = update_picture_tables(dst, src);
500 if (src->hwaccel_picture_private) {
501 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
502 if (!dst->hwaccel_priv_buf)
504 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
507 dst->field_picture = src->field_picture;
508 dst->mb_var_sum = src->mb_var_sum;
509 dst->mc_mb_var_sum = src->mc_mb_var_sum;
510 dst->b_frame_score = src->b_frame_score;
511 dst->needs_realloc = src->needs_realloc;
512 dst->reference = src->reference;
513 dst->shared = src->shared;
517 ff_mpeg_unref_picture(s, dst);
521 static void exchange_uv(MpegEncContext *s)
526 s->pblocks[4] = s->pblocks[5];
530 static int init_duplicate_context(MpegEncContext *s)
532 int y_size = s->b8_stride * (2 * s->mb_height + 1);
533 int c_size = s->mb_stride * (s->mb_height + 1);
534 int yc_size = y_size + 2 * c_size;
542 s->obmc_scratchpad = NULL;
545 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
546 ME_MAP_SIZE * sizeof(uint32_t), fail)
547 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
548 ME_MAP_SIZE * sizeof(uint32_t), fail)
549 if (s->avctx->noise_reduction) {
550 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
551 2 * 64 * sizeof(int), fail)
554 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
555 s->block = s->blocks[0];
557 for (i = 0; i < 12; i++) {
558 s->pblocks[i] = &s->block[i];
560 if (s->avctx->codec_tag == AV_RL32("VCR2"))
563 if (s->out_format == FMT_H263) {
565 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
566 yc_size * sizeof(int16_t) * 16, fail);
567 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
568 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
569 s->ac_val[2] = s->ac_val[1] + c_size;
574 return -1; // free() through ff_MPV_common_end()
577 static void free_duplicate_context(MpegEncContext *s)
582 av_freep(&s->edge_emu_buffer);
583 av_freep(&s->me.scratchpad);
587 s->obmc_scratchpad = NULL;
589 av_freep(&s->dct_error_sum);
590 av_freep(&s->me.map);
591 av_freep(&s->me.score_map);
592 av_freep(&s->blocks);
593 av_freep(&s->ac_val_base);
597 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
599 #define COPY(a) bak->a = src->a
600 COPY(edge_emu_buffer);
605 COPY(obmc_scratchpad);
612 COPY(me.map_generation);
624 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
628 // FIXME copy only needed parts
630 backup_duplicate_context(&bak, dst);
631 memcpy(dst, src, sizeof(MpegEncContext));
632 backup_duplicate_context(dst, &bak);
633 for (i = 0; i < 12; i++) {
634 dst->pblocks[i] = &dst->block[i];
636 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
638 if (!dst->edge_emu_buffer &&
639 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
640 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
641 "scratch buffers.\n");
644 // STOP_TIMER("update_duplicate_context")
645 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
649 int ff_mpeg_update_thread_context(AVCodecContext *dst,
650 const AVCodecContext *src)
653 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
655 if (dst == src || !s1->context_initialized)
658 // FIXME can parameters change on I-frames?
659 // in that case dst may need a reinit
660 if (!s->context_initialized) {
661 memcpy(s, s1, sizeof(MpegEncContext));
664 s->bitstream_buffer = NULL;
665 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
667 ff_MPV_common_init(s);
670 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
672 s->context_reinit = 0;
673 s->height = s1->height;
674 s->width = s1->width;
675 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
679 s->avctx->coded_height = s1->avctx->coded_height;
680 s->avctx->coded_width = s1->avctx->coded_width;
681 s->avctx->width = s1->avctx->width;
682 s->avctx->height = s1->avctx->height;
684 s->coded_picture_number = s1->coded_picture_number;
685 s->picture_number = s1->picture_number;
687 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
688 ff_mpeg_unref_picture(s, &s->picture[i]);
689 if (s1->picture[i].f.buf[0] &&
690 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
694 #define UPDATE_PICTURE(pic)\
696 ff_mpeg_unref_picture(s, &s->pic);\
697 if (s1->pic.f.buf[0])\
698 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
700 ret = update_picture_tables(&s->pic, &s1->pic);\
705 UPDATE_PICTURE(current_picture);
706 UPDATE_PICTURE(last_picture);
707 UPDATE_PICTURE(next_picture);
709 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
710 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
711 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
713 // Error/bug resilience
714 s->next_p_frame_damaged = s1->next_p_frame_damaged;
715 s->workaround_bugs = s1->workaround_bugs;
718 memcpy(&s->last_time_base, &s1->last_time_base,
719 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
720 (char *) &s1->last_time_base);
723 s->max_b_frames = s1->max_b_frames;
724 s->low_delay = s1->low_delay;
725 s->droppable = s1->droppable;
727 // DivX handling (doesn't work)
728 s->divx_packed = s1->divx_packed;
730 if (s1->bitstream_buffer) {
731 if (s1->bitstream_buffer_size +
732 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
733 av_fast_malloc(&s->bitstream_buffer,
734 &s->allocated_bitstream_buffer_size,
735 s1->allocated_bitstream_buffer_size);
736 s->bitstream_buffer_size = s1->bitstream_buffer_size;
737 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
738 s1->bitstream_buffer_size);
739 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
740 FF_INPUT_BUFFER_PADDING_SIZE);
743 // linesize dependend scratch buffer allocation
744 if (!s->edge_emu_buffer)
746 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
747 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
748 "scratch buffers.\n");
749 return AVERROR(ENOMEM);
752 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
753 "be allocated due to unknown size.\n");
757 // MPEG2/interlacing info
758 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
759 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
761 if (!s1->first_field) {
762 s->last_pict_type = s1->pict_type;
763 if (s1->current_picture_ptr)
764 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
766 if (s1->pict_type != AV_PICTURE_TYPE_B) {
767 s->last_non_b_pict_type = s1->pict_type;
775 * Set the given MpegEncContext to common defaults
776 * (same for encoding and decoding).
777 * The changed fields will not depend upon the
778 * prior state of the MpegEncContext.
780 void ff_MPV_common_defaults(MpegEncContext *s)
782 s->y_dc_scale_table =
783 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
784 s->chroma_qscale_table = ff_default_chroma_qscale_table;
785 s->progressive_frame = 1;
786 s->progressive_sequence = 1;
787 s->picture_structure = PICT_FRAME;
789 s->coded_picture_number = 0;
790 s->picture_number = 0;
795 s->slice_context_count = 1;
799 * Set the given MpegEncContext to defaults for decoding.
800 * the changed fields will not depend upon
801 * the prior state of the MpegEncContext.
803 void ff_MPV_decode_defaults(MpegEncContext *s)
805 ff_MPV_common_defaults(s);
808 static int init_er(MpegEncContext *s)
810 ERContext *er = &s->er;
811 int mb_array_size = s->mb_height * s->mb_stride;
814 er->avctx = s->avctx;
817 er->mb_index2xy = s->mb_index2xy;
818 er->mb_num = s->mb_num;
819 er->mb_width = s->mb_width;
820 er->mb_height = s->mb_height;
821 er->mb_stride = s->mb_stride;
822 er->b8_stride = s->b8_stride;
824 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
825 er->error_status_table = av_mallocz(mb_array_size);
826 if (!er->er_temp_buffer || !er->error_status_table)
829 er->mbskip_table = s->mbskip_table;
830 er->mbintra_table = s->mbintra_table;
832 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
833 er->dc_val[i] = s->dc_val[i];
835 er->decode_mb = mpeg_er_decode_mb;
840 av_freep(&er->er_temp_buffer);
841 av_freep(&er->error_status_table);
842 return AVERROR(ENOMEM);
846 * Initialize and allocates MpegEncContext fields dependent on the resolution.
848 static int init_context_frame(MpegEncContext *s)
850 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
852 s->mb_width = (s->width + 15) / 16;
853 s->mb_stride = s->mb_width + 1;
854 s->b8_stride = s->mb_width * 2 + 1;
855 s->b4_stride = s->mb_width * 4 + 1;
856 mb_array_size = s->mb_height * s->mb_stride;
857 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
859 /* set default edge pos, will be overriden
860 * in decode_header if needed */
861 s->h_edge_pos = s->mb_width * 16;
862 s->v_edge_pos = s->mb_height * 16;
864 s->mb_num = s->mb_width * s->mb_height;
869 s->block_wrap[3] = s->b8_stride;
871 s->block_wrap[5] = s->mb_stride;
873 y_size = s->b8_stride * (2 * s->mb_height + 1);
874 c_size = s->mb_stride * (s->mb_height + 1);
875 yc_size = y_size + 2 * c_size;
877 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
878 fail); // error ressilience code looks cleaner with this
879 for (y = 0; y < s->mb_height; y++)
880 for (x = 0; x < s->mb_width; x++)
881 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
883 s->mb_index2xy[s->mb_height * s->mb_width] =
884 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
887 /* Allocate MV tables */
888 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
889 mv_table_size * 2 * sizeof(int16_t), fail);
890 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
891 mv_table_size * 2 * sizeof(int16_t), fail);
892 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
893 mv_table_size * 2 * sizeof(int16_t), fail);
894 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
895 mv_table_size * 2 * sizeof(int16_t), fail);
896 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
897 mv_table_size * 2 * sizeof(int16_t), fail);
898 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
899 mv_table_size * 2 * sizeof(int16_t), fail);
900 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
901 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
902 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
903 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
905 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
907 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
909 /* Allocate MB type table */
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
911 sizeof(uint16_t), fail); // needed for encoding
913 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
916 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
917 mb_array_size * sizeof(float), fail);
918 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
919 mb_array_size * sizeof(float), fail);
923 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
924 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
925 /* interlaced direct mode decoding tables */
926 for (i = 0; i < 2; i++) {
928 for (j = 0; j < 2; j++) {
929 for (k = 0; k < 2; k++) {
930 FF_ALLOCZ_OR_GOTO(s->avctx,
931 s->b_field_mv_table_base[i][j][k],
932 mv_table_size * 2 * sizeof(int16_t),
934 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
937 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
938 mb_array_size * 2 * sizeof(uint8_t), fail);
939 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
940 mv_table_size * 2 * sizeof(int16_t), fail);
941 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
944 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
945 mb_array_size * 2 * sizeof(uint8_t), fail);
948 if (s->out_format == FMT_H263) {
950 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
951 s->coded_block = s->coded_block_base + s->b8_stride + 1;
953 /* cbp, ac_pred, pred_dir */
954 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
955 mb_array_size * sizeof(uint8_t), fail);
956 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
957 mb_array_size * sizeof(uint8_t), fail);
960 if (s->h263_pred || s->h263_plus || !s->encoding) {
962 // MN: we need these for error resilience of intra-frames
963 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
964 yc_size * sizeof(int16_t), fail);
965 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
966 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
967 s->dc_val[2] = s->dc_val[1] + c_size;
968 for (i = 0; i < yc_size; i++)
969 s->dc_val_base[i] = 1024;
972 /* which mb is a intra block */
973 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
974 memset(s->mbintra_table, 1, mb_array_size);
976 /* init macroblock skip table */
977 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
978 // Note the + 1 is for a quicker mpeg4 slice_end detection
982 return AVERROR(ENOMEM);
986 * init common structure for both encoder and decoder.
987 * this assumes that some variables like width/height are already set
989 av_cold int ff_MPV_common_init(MpegEncContext *s)
992 int nb_slices = (HAVE_THREADS &&
993 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
994 s->avctx->thread_count : 1;
996 if (s->encoding && s->avctx->slices)
997 nb_slices = s->avctx->slices;
999 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1000 s->mb_height = (s->height + 31) / 32 * 2;
1002 s->mb_height = (s->height + 15) / 16;
1004 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1005 av_log(s->avctx, AV_LOG_ERROR,
1006 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1010 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1013 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1015 max_slices = MAX_THREADS;
1016 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1017 " reducing to %d\n", nb_slices, max_slices);
1018 nb_slices = max_slices;
1021 if ((s->width || s->height) &&
1022 av_image_check_size(s->width, s->height, 0, s->avctx))
1025 ff_dct_common_init(s);
1027 s->flags = s->avctx->flags;
1028 s->flags2 = s->avctx->flags2;
1030 /* set chroma shifts */
1031 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1033 &s->chroma_y_shift);
1035 /* convert fourcc to upper case */
1036 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1038 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1040 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1041 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1042 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1043 avcodec_get_frame_defaults(&s->picture[i].f);
1045 memset(&s->next_picture, 0, sizeof(s->next_picture));
1046 memset(&s->last_picture, 0, sizeof(s->last_picture));
1047 memset(&s->current_picture, 0, sizeof(s->current_picture));
1048 avcodec_get_frame_defaults(&s->next_picture.f);
1049 avcodec_get_frame_defaults(&s->last_picture.f);
1050 avcodec_get_frame_defaults(&s->current_picture.f);
1052 if (s->width && s->height) {
1053 if (init_context_frame(s))
1056 s->parse_context.state = -1;
1059 s->context_initialized = 1;
1060 s->thread_context[0] = s;
1062 if (s->width && s->height) {
1063 if (nb_slices > 1) {
1064 for (i = 1; i < nb_slices; i++) {
1065 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1066 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1069 for (i = 0; i < nb_slices; i++) {
1070 if (init_duplicate_context(s->thread_context[i]) < 0)
1072 s->thread_context[i]->start_mb_y =
1073 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1074 s->thread_context[i]->end_mb_y =
1075 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1078 if (init_duplicate_context(s) < 0)
1081 s->end_mb_y = s->mb_height;
1083 s->slice_context_count = nb_slices;
1088 ff_MPV_common_end(s);
1093 * Frees and resets MpegEncContext fields depending on the resolution.
1094 * Is used during resolution changes to avoid a full reinitialization of the
1097 static int free_context_frame(MpegEncContext *s)
1101 av_freep(&s->mb_type);
1102 av_freep(&s->p_mv_table_base);
1103 av_freep(&s->b_forw_mv_table_base);
1104 av_freep(&s->b_back_mv_table_base);
1105 av_freep(&s->b_bidir_forw_mv_table_base);
1106 av_freep(&s->b_bidir_back_mv_table_base);
1107 av_freep(&s->b_direct_mv_table_base);
1108 s->p_mv_table = NULL;
1109 s->b_forw_mv_table = NULL;
1110 s->b_back_mv_table = NULL;
1111 s->b_bidir_forw_mv_table = NULL;
1112 s->b_bidir_back_mv_table = NULL;
1113 s->b_direct_mv_table = NULL;
1114 for (i = 0; i < 2; i++) {
1115 for (j = 0; j < 2; j++) {
1116 for (k = 0; k < 2; k++) {
1117 av_freep(&s->b_field_mv_table_base[i][j][k]);
1118 s->b_field_mv_table[i][j][k] = NULL;
1120 av_freep(&s->b_field_select_table[i][j]);
1121 av_freep(&s->p_field_mv_table_base[i][j]);
1122 s->p_field_mv_table[i][j] = NULL;
1124 av_freep(&s->p_field_select_table[i]);
1127 av_freep(&s->dc_val_base);
1128 av_freep(&s->coded_block_base);
1129 av_freep(&s->mbintra_table);
1130 av_freep(&s->cbp_table);
1131 av_freep(&s->pred_dir_table);
1133 av_freep(&s->mbskip_table);
1135 av_freep(&s->er.error_status_table);
1136 av_freep(&s->er.er_temp_buffer);
1137 av_freep(&s->mb_index2xy);
1138 av_freep(&s->lambda_table);
1139 av_freep(&s->cplx_tab);
1140 av_freep(&s->bits_tab);
1142 s->linesize = s->uvlinesize = 0;
1147 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1151 if (s->slice_context_count > 1) {
1152 for (i = 0; i < s->slice_context_count; i++) {
1153 free_duplicate_context(s->thread_context[i]);
1155 for (i = 1; i < s->slice_context_count; i++) {
1156 av_freep(&s->thread_context[i]);
1159 free_duplicate_context(s);
1161 if ((err = free_context_frame(s)) < 0)
1165 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1166 s->picture[i].needs_realloc = 1;
1169 s->last_picture_ptr =
1170 s->next_picture_ptr =
1171 s->current_picture_ptr = NULL;
1174 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1175 s->mb_height = (s->height + 31) / 32 * 2;
1177 s->mb_height = (s->height + 15) / 16;
1179 if ((s->width || s->height) &&
1180 av_image_check_size(s->width, s->height, 0, s->avctx))
1181 return AVERROR_INVALIDDATA;
1183 if ((err = init_context_frame(s)))
1186 s->thread_context[0] = s;
1188 if (s->width && s->height) {
1189 int nb_slices = s->slice_context_count;
1190 if (nb_slices > 1) {
1191 for (i = 1; i < nb_slices; i++) {
1192 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1193 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1196 for (i = 0; i < nb_slices; i++) {
1197 if (init_duplicate_context(s->thread_context[i]) < 0)
1199 s->thread_context[i]->start_mb_y =
1200 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1201 s->thread_context[i]->end_mb_y =
1202 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1205 if (init_duplicate_context(s) < 0)
1208 s->end_mb_y = s->mb_height;
1210 s->slice_context_count = nb_slices;
1215 ff_MPV_common_end(s);
1219 /* init common structure for both encoder and decoder */
1220 void ff_MPV_common_end(MpegEncContext *s)
1224 if (s->slice_context_count > 1) {
1225 for (i = 0; i < s->slice_context_count; i++) {
1226 free_duplicate_context(s->thread_context[i]);
1228 for (i = 1; i < s->slice_context_count; i++) {
1229 av_freep(&s->thread_context[i]);
1231 s->slice_context_count = 1;
1232 } else free_duplicate_context(s);
1234 av_freep(&s->parse_context.buffer);
1235 s->parse_context.buffer_size = 0;
1237 av_freep(&s->bitstream_buffer);
1238 s->allocated_bitstream_buffer_size = 0;
1240 av_freep(&s->avctx->stats_out);
1241 av_freep(&s->ac_stats);
1243 av_freep(&s->q_intra_matrix);
1244 av_freep(&s->q_inter_matrix);
1245 av_freep(&s->q_intra_matrix16);
1246 av_freep(&s->q_inter_matrix16);
1247 av_freep(&s->input_picture);
1248 av_freep(&s->reordered_input_picture);
1249 av_freep(&s->dct_offset);
1252 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1253 free_picture_tables(&s->picture[i]);
1254 ff_mpeg_unref_picture(s, &s->picture[i]);
1257 av_freep(&s->picture);
1258 free_picture_tables(&s->last_picture);
1259 ff_mpeg_unref_picture(s, &s->last_picture);
1260 free_picture_tables(&s->current_picture);
1261 ff_mpeg_unref_picture(s, &s->current_picture);
1262 free_picture_tables(&s->next_picture);
1263 ff_mpeg_unref_picture(s, &s->next_picture);
1264 free_picture_tables(&s->new_picture);
1265 ff_mpeg_unref_picture(s, &s->new_picture);
1267 free_context_frame(s);
1269 s->context_initialized = 0;
1270 s->last_picture_ptr =
1271 s->next_picture_ptr =
1272 s->current_picture_ptr = NULL;
1273 s->linesize = s->uvlinesize = 0;
1276 av_cold void ff_init_rl(RLTable *rl,
1277 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1279 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1280 uint8_t index_run[MAX_RUN + 1];
1281 int last, run, level, start, end, i;
1283 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1284 if (static_store && rl->max_level[0])
1287 /* compute max_level[], max_run[] and index_run[] */
1288 for (last = 0; last < 2; last++) {
1297 memset(max_level, 0, MAX_RUN + 1);
1298 memset(max_run, 0, MAX_LEVEL + 1);
1299 memset(index_run, rl->n, MAX_RUN + 1);
1300 for (i = start; i < end; i++) {
1301 run = rl->table_run[i];
1302 level = rl->table_level[i];
1303 if (index_run[run] == rl->n)
1305 if (level > max_level[run])
1306 max_level[run] = level;
1307 if (run > max_run[level])
1308 max_run[level] = run;
1311 rl->max_level[last] = static_store[last];
1313 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1314 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1316 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1318 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1319 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1321 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1323 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1324 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1328 av_cold void ff_init_vlc_rl(RLTable *rl)
1332 for (q = 0; q < 32; q++) {
1334 int qadd = (q - 1) | 1;
1340 for (i = 0; i < rl->vlc.table_size; i++) {
1341 int code = rl->vlc.table[i][0];
1342 int len = rl->vlc.table[i][1];
1345 if (len == 0) { // illegal code
1348 } else if (len < 0) { // more bits needed
1352 if (code == rl->n) { // esc
1356 run = rl->table_run[code] + 1;
1357 level = rl->table_level[code] * qmul + qadd;
1358 if (code >= rl->last) run += 192;
1361 rl->rl_vlc[q][i].len = len;
1362 rl->rl_vlc[q][i].level = level;
1363 rl->rl_vlc[q][i].run = run;
1368 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1372 /* release non reference frames */
1373 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1374 if (!s->picture[i].reference &&
1375 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1376 ff_mpeg_unref_picture(s, &s->picture[i]);
1381 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1383 if (pic->f.buf[0] == NULL)
1385 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1390 static int find_unused_picture(MpegEncContext *s, int shared)
1395 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1396 if (s->picture[i].f.buf[0] == NULL)
1400 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1401 if (pic_is_unused(s, &s->picture[i]))
1406 return AVERROR_INVALIDDATA;
1409 int ff_find_unused_picture(MpegEncContext *s, int shared)
1411 int ret = find_unused_picture(s, shared);
1413 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1414 if (s->picture[ret].needs_realloc) {
1415 s->picture[ret].needs_realloc = 0;
1416 free_picture_tables(&s->picture[ret]);
1417 ff_mpeg_unref_picture(s, &s->picture[ret]);
1418 avcodec_get_frame_defaults(&s->picture[ret].f);
1424 static void update_noise_reduction(MpegEncContext *s)
1428 for (intra = 0; intra < 2; intra++) {
1429 if (s->dct_count[intra] > (1 << 16)) {
1430 for (i = 0; i < 64; i++) {
1431 s->dct_error_sum[intra][i] >>= 1;
1433 s->dct_count[intra] >>= 1;
1436 for (i = 0; i < 64; i++) {
1437 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1438 s->dct_count[intra] +
1439 s->dct_error_sum[intra][i] / 2) /
1440 (s->dct_error_sum[intra][i] + 1);
1446 * generic function for encode/decode called after coding/decoding
1447 * the header and before a frame is coded/decoded.
1449 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1455 /* mark & release old frames */
1456 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1457 s->last_picture_ptr != s->next_picture_ptr &&
1458 s->last_picture_ptr->f.buf[0]) {
1459 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1462 /* release forgotten pictures */
1463 /* if (mpeg124/h263) */
1465 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1466 if (&s->picture[i] != s->last_picture_ptr &&
1467 &s->picture[i] != s->next_picture_ptr &&
1468 s->picture[i].reference && !s->picture[i].needs_realloc) {
1469 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1470 av_log(avctx, AV_LOG_ERROR,
1471 "releasing zombie picture\n");
1472 ff_mpeg_unref_picture(s, &s->picture[i]);
1477 ff_mpeg_unref_picture(s, &s->current_picture);
1480 ff_release_unused_pictures(s, 1);
1482 if (s->current_picture_ptr &&
1483 s->current_picture_ptr->f.buf[0] == NULL) {
1484 // we already have a unused image
1485 // (maybe it was set before reading the header)
1486 pic = s->current_picture_ptr;
1488 i = ff_find_unused_picture(s, 0);
1490 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1493 pic = &s->picture[i];
1497 if (!s->droppable) {
1498 if (s->pict_type != AV_PICTURE_TYPE_B)
1502 pic->f.coded_picture_number = s->coded_picture_number++;
1504 if (ff_alloc_picture(s, pic, 0) < 0)
1507 s->current_picture_ptr = pic;
1508 // FIXME use only the vars from current_pic
1509 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1510 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1511 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1512 if (s->picture_structure != PICT_FRAME)
1513 s->current_picture_ptr->f.top_field_first =
1514 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1516 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1517 !s->progressive_sequence;
1518 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1521 s->current_picture_ptr->f.pict_type = s->pict_type;
1522 // if (s->flags && CODEC_FLAG_QSCALE)
1523 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1524 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1526 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1527 s->current_picture_ptr)) < 0)
1530 if (s->pict_type != AV_PICTURE_TYPE_B) {
1531 s->last_picture_ptr = s->next_picture_ptr;
1533 s->next_picture_ptr = s->current_picture_ptr;
1535 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1536 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1537 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1538 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1539 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1540 s->pict_type, s->droppable);
1542 if ((s->last_picture_ptr == NULL ||
1543 s->last_picture_ptr->f.buf[0] == NULL) &&
1544 (s->pict_type != AV_PICTURE_TYPE_I ||
1545 s->picture_structure != PICT_FRAME)) {
1546 int h_chroma_shift, v_chroma_shift;
1547 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1548 &h_chroma_shift, &v_chroma_shift);
1549 if (s->pict_type != AV_PICTURE_TYPE_I)
1550 av_log(avctx, AV_LOG_ERROR,
1551 "warning: first frame is no keyframe\n");
1552 else if (s->picture_structure != PICT_FRAME)
1553 av_log(avctx, AV_LOG_INFO,
1554 "allocate dummy last picture for field based first keyframe\n");
1556 /* Allocate a dummy frame */
1557 i = ff_find_unused_picture(s, 0);
1559 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1562 s->last_picture_ptr = &s->picture[i];
1563 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1564 s->last_picture_ptr = NULL;
1568 memset(s->last_picture_ptr->f.data[0], 0,
1569 avctx->height * s->last_picture_ptr->f.linesize[0]);
1570 memset(s->last_picture_ptr->f.data[1], 0x80,
1571 (avctx->height >> v_chroma_shift) *
1572 s->last_picture_ptr->f.linesize[1]);
1573 memset(s->last_picture_ptr->f.data[2], 0x80,
1574 (avctx->height >> v_chroma_shift) *
1575 s->last_picture_ptr->f.linesize[2]);
1577 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1578 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1580 if ((s->next_picture_ptr == NULL ||
1581 s->next_picture_ptr->f.buf[0] == NULL) &&
1582 s->pict_type == AV_PICTURE_TYPE_B) {
1583 /* Allocate a dummy frame */
1584 i = ff_find_unused_picture(s, 0);
1586 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1589 s->next_picture_ptr = &s->picture[i];
1590 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1591 s->next_picture_ptr = NULL;
1594 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1595 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1598 if (s->last_picture_ptr) {
1599 ff_mpeg_unref_picture(s, &s->last_picture);
1600 if (s->last_picture_ptr->f.buf[0] &&
1601 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1602 s->last_picture_ptr)) < 0)
1605 if (s->next_picture_ptr) {
1606 ff_mpeg_unref_picture(s, &s->next_picture);
1607 if (s->next_picture_ptr->f.buf[0] &&
1608 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1609 s->next_picture_ptr)) < 0)
1613 if (s->pict_type != AV_PICTURE_TYPE_I &&
1614 !(s->last_picture_ptr && s->last_picture_ptr->f.buf[0])) {
1615 av_log(s, AV_LOG_ERROR,
1616 "Non-reference picture received and no reference available\n");
1617 return AVERROR_INVALIDDATA;
1620 if (s->picture_structure!= PICT_FRAME) {
1622 for (i = 0; i < 4; i++) {
1623 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1624 s->current_picture.f.data[i] +=
1625 s->current_picture.f.linesize[i];
1627 s->current_picture.f.linesize[i] *= 2;
1628 s->last_picture.f.linesize[i] *= 2;
1629 s->next_picture.f.linesize[i] *= 2;
1633 s->err_recognition = avctx->err_recognition;
1635 /* set dequantizer, we can't do it during init as
1636 * it might change for mpeg4 and we can't do it in the header
1637 * decode as init is not called for mpeg4 there yet */
1638 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1639 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1640 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1641 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1642 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1643 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1645 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1646 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1649 if (s->dct_error_sum) {
1650 assert(s->avctx->noise_reduction && s->encoding);
1651 update_noise_reduction(s);
1655 FF_DISABLE_DEPRECATION_WARNINGS
1656 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1657 return ff_xvmc_field_start(s, avctx);
1658 FF_ENABLE_DEPRECATION_WARNINGS
1659 #endif /* FF_API_XVMC */
1664 /* generic function for encode/decode called after a
1665 * frame has been coded/decoded. */
1666 void ff_MPV_frame_end(MpegEncContext *s)
1671 FF_DISABLE_DEPRECATION_WARNINGS
1672 /* redraw edges for the frame if decoding didn't complete */
1673 // just to make sure that all data is rendered.
1674 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1675 ff_xvmc_field_end(s);
1677 FF_ENABLE_DEPRECATION_WARNINGS
1678 #endif /* FF_API_XVMC */
1679 if ((s->er.error_count || s->encoding) &&
1680 !s->avctx->hwaccel &&
1681 s->unrestricted_mv &&
1682 s->current_picture.reference &&
1684 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1685 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1686 int hshift = desc->log2_chroma_w;
1687 int vshift = desc->log2_chroma_h;
1688 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1689 s->h_edge_pos, s->v_edge_pos,
1690 EDGE_WIDTH, EDGE_WIDTH,
1691 EDGE_TOP | EDGE_BOTTOM);
1692 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1693 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1694 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1695 EDGE_TOP | EDGE_BOTTOM);
1696 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1697 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1698 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1699 EDGE_TOP | EDGE_BOTTOM);
1704 s->last_pict_type = s->pict_type;
1705 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1706 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1707 s->last_non_b_pict_type = s->pict_type;
1710 /* copy back current_picture variables */
1711 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1712 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1713 s->picture[i] = s->current_picture;
1717 assert(i < MAX_PICTURE_COUNT);
1721 /* release non-reference frames */
1722 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1723 if (!s->picture[i].reference)
1724 ff_mpeg_unref_picture(s, &s->picture[i]);
1727 // clear copies, to avoid confusion
1729 memset(&s->last_picture, 0, sizeof(Picture));
1730 memset(&s->next_picture, 0, sizeof(Picture));
1731 memset(&s->current_picture, 0, sizeof(Picture));
1733 s->avctx->coded_frame = &s->current_picture_ptr->f;
1735 if (s->current_picture.reference)
1736 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1740 * Print debugging info for the given picture.
1742 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1745 if (s->avctx->hwaccel || !p || !p->mb_type)
1749 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1752 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1753 switch (pict->pict_type) {
1754 case AV_PICTURE_TYPE_I:
1755 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1757 case AV_PICTURE_TYPE_P:
1758 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1760 case AV_PICTURE_TYPE_B:
1761 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1763 case AV_PICTURE_TYPE_S:
1764 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1766 case AV_PICTURE_TYPE_SI:
1767 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1769 case AV_PICTURE_TYPE_SP:
1770 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1773 for (y = 0; y < s->mb_height; y++) {
1774 for (x = 0; x < s->mb_width; x++) {
1775 if (s->avctx->debug & FF_DEBUG_SKIP) {
1776 int count = s->mbskip_table[x + y * s->mb_stride];
1779 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1781 if (s->avctx->debug & FF_DEBUG_QP) {
1782 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1783 p->qscale_table[x + y * s->mb_stride]);
1785 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1786 int mb_type = p->mb_type[x + y * s->mb_stride];
1787 // Type & MV direction
1788 if (IS_PCM(mb_type))
1789 av_log(s->avctx, AV_LOG_DEBUG, "P");
1790 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1791 av_log(s->avctx, AV_LOG_DEBUG, "A");
1792 else if (IS_INTRA4x4(mb_type))
1793 av_log(s->avctx, AV_LOG_DEBUG, "i");
1794 else if (IS_INTRA16x16(mb_type))
1795 av_log(s->avctx, AV_LOG_DEBUG, "I");
1796 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1797 av_log(s->avctx, AV_LOG_DEBUG, "d");
1798 else if (IS_DIRECT(mb_type))
1799 av_log(s->avctx, AV_LOG_DEBUG, "D");
1800 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1801 av_log(s->avctx, AV_LOG_DEBUG, "g");
1802 else if (IS_GMC(mb_type))
1803 av_log(s->avctx, AV_LOG_DEBUG, "G");
1804 else if (IS_SKIP(mb_type))
1805 av_log(s->avctx, AV_LOG_DEBUG, "S");
1806 else if (!USES_LIST(mb_type, 1))
1807 av_log(s->avctx, AV_LOG_DEBUG, ">");
1808 else if (!USES_LIST(mb_type, 0))
1809 av_log(s->avctx, AV_LOG_DEBUG, "<");
1811 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1812 av_log(s->avctx, AV_LOG_DEBUG, "X");
1816 if (IS_8X8(mb_type))
1817 av_log(s->avctx, AV_LOG_DEBUG, "+");
1818 else if (IS_16X8(mb_type))
1819 av_log(s->avctx, AV_LOG_DEBUG, "-");
1820 else if (IS_8X16(mb_type))
1821 av_log(s->avctx, AV_LOG_DEBUG, "|");
1822 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1823 av_log(s->avctx, AV_LOG_DEBUG, " ");
1825 av_log(s->avctx, AV_LOG_DEBUG, "?");
1828 if (IS_INTERLACED(mb_type))
1829 av_log(s->avctx, AV_LOG_DEBUG, "=");
1831 av_log(s->avctx, AV_LOG_DEBUG, " ");
1834 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1840 * find the lowest MB row referenced in the MVs
1842 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1844 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1845 int my, off, i, mvs;
1847 if (s->picture_structure != PICT_FRAME || s->mcsel)
1850 switch (s->mv_type) {
1864 for (i = 0; i < mvs; i++) {
1865 my = s->mv[dir][i][1]<<qpel_shift;
1866 my_max = FFMAX(my_max, my);
1867 my_min = FFMIN(my_min, my);
1870 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1872 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1874 return s->mb_height-1;
1877 /* put block[] to dest[] */
1878 static inline void put_dct(MpegEncContext *s,
1879 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1881 s->dct_unquantize_intra(s, block, i, qscale);
1882 s->dsp.idct_put (dest, line_size, block);
1885 /* add block[] to dest[] */
1886 static inline void add_dct(MpegEncContext *s,
1887 int16_t *block, int i, uint8_t *dest, int line_size)
1889 if (s->block_last_index[i] >= 0) {
1890 s->dsp.idct_add (dest, line_size, block);
1894 static inline void add_dequant_dct(MpegEncContext *s,
1895 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1897 if (s->block_last_index[i] >= 0) {
1898 s->dct_unquantize_inter(s, block, i, qscale);
1900 s->dsp.idct_add (dest, line_size, block);
1905 * Clean dc, ac, coded_block for the current non-intra MB.
1907 void ff_clean_intra_table_entries(MpegEncContext *s)
1909 int wrap = s->b8_stride;
1910 int xy = s->block_index[0];
1913 s->dc_val[0][xy + 1 ] =
1914 s->dc_val[0][xy + wrap] =
1915 s->dc_val[0][xy + 1 + wrap] = 1024;
1917 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1918 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1919 if (s->msmpeg4_version>=3) {
1920 s->coded_block[xy ] =
1921 s->coded_block[xy + 1 ] =
1922 s->coded_block[xy + wrap] =
1923 s->coded_block[xy + 1 + wrap] = 0;
1926 wrap = s->mb_stride;
1927 xy = s->mb_x + s->mb_y * wrap;
1929 s->dc_val[2][xy] = 1024;
1931 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1932 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1934 s->mbintra_table[xy]= 0;
1937 /* generic function called after a macroblock has been parsed by the
1938 decoder or after it has been encoded by the encoder.
1940 Important variables used:
1941 s->mb_intra : true if intra macroblock
1942 s->mv_dir : motion vector direction
1943 s->mv_type : motion vector type
1944 s->mv : motion vector
1945 s->interlaced_dct : true if interlaced dct used (mpeg2)
1947 static av_always_inline
1948 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1951 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1954 FF_DISABLE_DEPRECATION_WARNINGS
1955 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1956 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1959 FF_ENABLE_DEPRECATION_WARNINGS
1960 #endif /* FF_API_XVMC */
1962 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1963 /* print DCT coefficients */
1965 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1967 for(j=0; j<64; j++){
1968 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
1970 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1974 s->current_picture.qscale_table[mb_xy] = s->qscale;
1976 /* update DC predictors for P macroblocks */
1978 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1979 if(s->mbintra_table[mb_xy])
1980 ff_clean_intra_table_entries(s);
1984 s->last_dc[2] = 128 << s->intra_dc_precision;
1987 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1988 s->mbintra_table[mb_xy]=1;
1990 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1991 uint8_t *dest_y, *dest_cb, *dest_cr;
1992 int dct_linesize, dct_offset;
1993 op_pixels_func (*op_pix)[4];
1994 qpel_mc_func (*op_qpix)[16];
1995 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
1996 const int uvlinesize = s->current_picture.f.linesize[1];
1997 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
1998 const int block_size = 8;
2000 /* avoid copy if macroblock skipped in last frame too */
2001 /* skip only during decoding as we might trash the buffers during encoding a bit */
2003 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2005 if (s->mb_skipped) {
2007 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2009 } else if(!s->current_picture.reference) {
2012 *mbskip_ptr = 0; /* not skipped */
2016 dct_linesize = linesize << s->interlaced_dct;
2017 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2021 dest_cb= s->dest[1];
2022 dest_cr= s->dest[2];
2024 dest_y = s->b_scratchpad;
2025 dest_cb= s->b_scratchpad+16*linesize;
2026 dest_cr= s->b_scratchpad+32*linesize;
2030 /* motion handling */
2031 /* decoding or more than one mb_type (MC was already done otherwise) */
2034 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2035 if (s->mv_dir & MV_DIR_FORWARD) {
2036 ff_thread_await_progress(&s->last_picture_ptr->tf,
2037 ff_MPV_lowest_referenced_row(s, 0),
2040 if (s->mv_dir & MV_DIR_BACKWARD) {
2041 ff_thread_await_progress(&s->next_picture_ptr->tf,
2042 ff_MPV_lowest_referenced_row(s, 1),
2047 op_qpix= s->me.qpel_put;
2048 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2049 op_pix = s->hdsp.put_pixels_tab;
2051 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2053 if (s->mv_dir & MV_DIR_FORWARD) {
2054 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2055 op_pix = s->hdsp.avg_pixels_tab;
2056 op_qpix= s->me.qpel_avg;
2058 if (s->mv_dir & MV_DIR_BACKWARD) {
2059 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2063 /* skip dequant / idct if we are really late ;) */
2064 if(s->avctx->skip_idct){
2065 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2066 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2067 || s->avctx->skip_idct >= AVDISCARD_ALL)
2071 /* add dct residue */
2072 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2073 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2074 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2075 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2076 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2077 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2079 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2080 if (s->chroma_y_shift){
2081 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2082 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2086 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2087 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2088 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2089 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2092 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2093 add_dct(s, block[0], 0, dest_y , dct_linesize);
2094 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2095 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2096 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2098 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2099 if(s->chroma_y_shift){//Chroma420
2100 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2101 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2104 dct_linesize = uvlinesize << s->interlaced_dct;
2105 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2107 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2108 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2109 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2110 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2111 if(!s->chroma_x_shift){//Chroma444
2112 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2113 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2114 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2115 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2120 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2121 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2124 /* dct only in intra block */
2125 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2126 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2127 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2128 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2129 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2131 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2132 if(s->chroma_y_shift){
2133 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2134 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2138 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2139 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2140 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2141 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2145 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2146 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2147 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2148 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2150 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2151 if(s->chroma_y_shift){
2152 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2153 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2156 dct_linesize = uvlinesize << s->interlaced_dct;
2157 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2159 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2160 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2161 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2162 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2163 if(!s->chroma_x_shift){//Chroma444
2164 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2165 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2166 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2167 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2175 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2176 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2177 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2182 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2184 if(s->out_format == FMT_MPEG1) {
2185 MPV_decode_mb_internal(s, block, 1);
2188 MPV_decode_mb_internal(s, block, 0);
2192 * @param h is the normal height, this will be reduced automatically if needed for the last row
2194 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2195 Picture *last, int y, int h, int picture_structure,
2196 int first_field, int draw_edges, int low_delay,
2197 int v_edge_pos, int h_edge_pos)
2199 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2200 int hshift = desc->log2_chroma_w;
2201 int vshift = desc->log2_chroma_h;
2202 const int field_pic = picture_structure != PICT_FRAME;
2208 if (!avctx->hwaccel &&
2211 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2212 int *linesize = cur->f.linesize;
2213 int sides = 0, edge_h;
2214 if (y==0) sides |= EDGE_TOP;
2215 if (y + h >= v_edge_pos)
2216 sides |= EDGE_BOTTOM;
2218 edge_h= FFMIN(h, v_edge_pos - y);
2220 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2221 linesize[0], h_edge_pos, edge_h,
2222 EDGE_WIDTH, EDGE_WIDTH, sides);
2223 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2224 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2225 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2226 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2227 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2228 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2231 h = FFMIN(h, avctx->height - y);
2233 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2235 if (avctx->draw_horiz_band) {
2237 int offset[AV_NUM_DATA_POINTERS];
2240 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2241 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2248 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2249 picture_structure == PICT_FRAME &&
2250 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2251 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2254 offset[0]= y * src->linesize[0];
2256 offset[2]= (y >> vshift) * src->linesize[1];
2257 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2263 avctx->draw_horiz_band(avctx, src, offset,
2264 y, picture_structure, h);
2268 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2270 int draw_edges = s->unrestricted_mv && !s->intra_only;
2271 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2272 &s->last_picture, y, h, s->picture_structure,
2273 s->first_field, draw_edges, s->low_delay,
2274 s->v_edge_pos, s->h_edge_pos);
2277 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2278 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2279 const int uvlinesize = s->current_picture.f.linesize[1];
2280 const int mb_size= 4;
2282 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2283 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2284 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2285 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2286 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2287 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2288 //block_index is not used by mpeg2, so it is not affected by chroma_format
2290 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2291 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2292 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2294 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2296 if(s->picture_structure==PICT_FRAME){
2297 s->dest[0] += s->mb_y * linesize << mb_size;
2298 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2299 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2301 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2302 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2303 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2304 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2310 * Permute an 8x8 block.
2311 * @param block the block which will be permuted according to the given permutation vector
2312 * @param permutation the permutation vector
2313 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2314 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2315 * (inverse) permutated to scantable order!
2317 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2323 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2325 for(i=0; i<=last; i++){
2326 const int j= scantable[i];
2331 for(i=0; i<=last; i++){
2332 const int j= scantable[i];
2333 const int perm_j= permutation[j];
2334 block[perm_j]= temp[j];
2338 void ff_mpeg_flush(AVCodecContext *avctx){
2340 MpegEncContext *s = avctx->priv_data;
2342 if(s==NULL || s->picture==NULL)
2345 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2346 ff_mpeg_unref_picture(s, &s->picture[i]);
2347 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2349 ff_mpeg_unref_picture(s, &s->current_picture);
2350 ff_mpeg_unref_picture(s, &s->last_picture);
2351 ff_mpeg_unref_picture(s, &s->next_picture);
2353 s->mb_x= s->mb_y= 0;
2355 s->parse_context.state= -1;
2356 s->parse_context.frame_start_found= 0;
2357 s->parse_context.overread= 0;
2358 s->parse_context.overread_index= 0;
2359 s->parse_context.index= 0;
2360 s->parse_context.last_index= 0;
2361 s->bitstream_buffer_size=0;
2365 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2366 int16_t *block, int n, int qscale)
2368 int i, level, nCoeffs;
2369 const uint16_t *quant_matrix;
2371 nCoeffs= s->block_last_index[n];
2374 block[0] = block[0] * s->y_dc_scale;
2376 block[0] = block[0] * s->c_dc_scale;
2377 /* XXX: only mpeg1 */
2378 quant_matrix = s->intra_matrix;
2379 for(i=1;i<=nCoeffs;i++) {
2380 int j= s->intra_scantable.permutated[i];
2385 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2386 level = (level - 1) | 1;
2389 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2390 level = (level - 1) | 1;
2397 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2398 int16_t *block, int n, int qscale)
2400 int i, level, nCoeffs;
2401 const uint16_t *quant_matrix;
2403 nCoeffs= s->block_last_index[n];
2405 quant_matrix = s->inter_matrix;
2406 for(i=0; i<=nCoeffs; i++) {
2407 int j= s->intra_scantable.permutated[i];
2412 level = (((level << 1) + 1) * qscale *
2413 ((int) (quant_matrix[j]))) >> 4;
2414 level = (level - 1) | 1;
2417 level = (((level << 1) + 1) * qscale *
2418 ((int) (quant_matrix[j]))) >> 4;
2419 level = (level - 1) | 1;
2426 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2427 int16_t *block, int n, int qscale)
2429 int i, level, nCoeffs;
2430 const uint16_t *quant_matrix;
2432 if(s->alternate_scan) nCoeffs= 63;
2433 else nCoeffs= s->block_last_index[n];
2436 block[0] = block[0] * s->y_dc_scale;
2438 block[0] = block[0] * s->c_dc_scale;
2439 quant_matrix = s->intra_matrix;
2440 for(i=1;i<=nCoeffs;i++) {
2441 int j= s->intra_scantable.permutated[i];
2446 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2449 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2456 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2457 int16_t *block, int n, int qscale)
2459 int i, level, nCoeffs;
2460 const uint16_t *quant_matrix;
2463 if(s->alternate_scan) nCoeffs= 63;
2464 else nCoeffs= s->block_last_index[n];
2467 block[0] = block[0] * s->y_dc_scale;
2469 block[0] = block[0] * s->c_dc_scale;
2470 quant_matrix = s->intra_matrix;
2471 for(i=1;i<=nCoeffs;i++) {
2472 int j= s->intra_scantable.permutated[i];
2477 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2480 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2489 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2490 int16_t *block, int n, int qscale)
2492 int i, level, nCoeffs;
2493 const uint16_t *quant_matrix;
2496 if(s->alternate_scan) nCoeffs= 63;
2497 else nCoeffs= s->block_last_index[n];
2499 quant_matrix = s->inter_matrix;
2500 for(i=0; i<=nCoeffs; i++) {
2501 int j= s->intra_scantable.permutated[i];
2506 level = (((level << 1) + 1) * qscale *
2507 ((int) (quant_matrix[j]))) >> 4;
2510 level = (((level << 1) + 1) * qscale *
2511 ((int) (quant_matrix[j]))) >> 4;
2520 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2521 int16_t *block, int n, int qscale)
2523 int i, level, qmul, qadd;
2526 assert(s->block_last_index[n]>=0);
2532 block[0] = block[0] * s->y_dc_scale;
2534 block[0] = block[0] * s->c_dc_scale;
2535 qadd = (qscale - 1) | 1;
2542 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2544 for(i=1; i<=nCoeffs; i++) {
2548 level = level * qmul - qadd;
2550 level = level * qmul + qadd;
2557 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2558 int16_t *block, int n, int qscale)
2560 int i, level, qmul, qadd;
2563 assert(s->block_last_index[n]>=0);
2565 qadd = (qscale - 1) | 1;
2568 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2570 for(i=0; i<=nCoeffs; i++) {
2574 level = level * qmul - qadd;
2576 level = level * qmul + qadd;
2584 * set qscale and update qscale dependent variables.
2586 void ff_set_qscale(MpegEncContext * s, int qscale)
2590 else if (qscale > 31)
2594 s->chroma_qscale= s->chroma_qscale_table[qscale];
2596 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2597 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2600 void ff_MPV_report_decode_progress(MpegEncContext *s)
2602 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2603 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2606 #if CONFIG_ERROR_RESILIENCE
2607 void ff_mpeg_er_frame_start(MpegEncContext *s)
2609 ERContext *er = &s->er;
2611 er->cur_pic = s->current_picture_ptr;
2612 er->last_pic = s->last_picture_ptr;
2613 er->next_pic = s->next_picture_ptr;
2615 er->pp_time = s->pp_time;
2616 er->pb_time = s->pb_time;
2617 er->quarter_sample = s->quarter_sample;
2618 er->partitioned_frame = s->partitioned_frame;
2620 ff_er_frame_start(er);
2622 #endif /* CONFIG_ERROR_RESILIENCE */