2 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
3 * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "bitstream.h"
23 #include "mpegvideo.h"
28 Picture picture; //currently decoded frame
29 Picture DPB[2]; //reference frames
30 int dist[2]; //temporal distances from current frame to ref frames
33 int mb_width, mb_height;
38 int loop_filter_disable;
39 int alpha_offset, beta_offset;
44 uint8_t *cy, *cu, *cv;
48 /* mv motion vector cache
53 X are the vectors in the current macroblock (5,6,9,10)
54 A is the macroblock to the left (4,8)
55 B is the macroblock to the top (1,2)
56 C is the macroblock to the top-right (3)
57 D is the macroblock to the top-left (0)
59 the same is repeated for backward motion vectors */
64 /* luma pred mode cache
70 int l_stride, c_stride;
76 /* intra prediction is done with un-deblocked samples
77 they are saved here before deblocking the MB */
78 uint8_t *top_border_y, *top_border_u, *top_border_v;
79 uint8_t left_border_y[16], left_border_u[10], left_border_v[10];
80 uint8_t topleft_border_y, topleft_border_u, topleft_border_v;
82 void (*intra_pred_l[8])(uint8_t *d,uint8_t *top,uint8_t *left,int stride);
83 void (*intra_pred_c[7])(uint8_t *d,uint8_t *top,uint8_t *left,int stride);
84 uint8_t *col_type_base;
92 /*****************************************************************************
94 * in-loop deblocking filter
96 ****************************************************************************/
98 static inline int get_bs_p(vector_t *mvP, vector_t *mvQ) {
99 if((mvP->ref == REF_INTRA) || (mvQ->ref == REF_INTRA))
101 if(mvP->ref != mvQ->ref)
103 if( (abs(mvP->x - mvQ->x) >= 4) || (abs(mvP->y - mvQ->y) >= 4) )
108 static inline int get_bs_b(vector_t *mvP, vector_t *mvQ) {
109 if((mvP->ref == REF_INTRA) || (mvQ->ref == REF_INTRA)) {
112 vector_t *mvPbw = mvP + MV_BWD_OFFS;
113 vector_t *mvQbw = mvQ + MV_BWD_OFFS;
114 if( (abs( mvP->x - mvQ->x) >= 4) ||
115 (abs( mvP->y - mvQ->y) >= 4) ||
116 (abs(mvPbw->x - mvQbw->x) >= 4) ||
117 (abs(mvPbw->y - mvQbw->y) >= 4) )
123 /* boundary strength (bs) mapping:
134 alpha = alpha_tab[clip(qp_avg + h->alpha_offset,0,63)]; \
135 beta = beta_tab[clip(qp_avg + h->beta_offset, 0,63)]; \
136 tc = tc_tab[clip(qp_avg + h->alpha_offset,0,63)];
138 static void filter_mb(AVSContext *h, enum mb_t mb_type) {
139 DECLARE_ALIGNED_8(uint8_t, bs[8]);
140 int qp_avg, alpha, beta, tc;
143 /* save un-deblocked lines */
144 h->topleft_border_y = h->top_border_y[h->mbx*16+15];
145 h->topleft_border_u = h->top_border_u[h->mbx*10+8];
146 h->topleft_border_v = h->top_border_v[h->mbx*10+8];
147 memcpy(&h->top_border_y[h->mbx*16], h->cy + 15* h->l_stride,16);
148 memcpy(&h->top_border_u[h->mbx*10+1], h->cu + 7* h->c_stride,8);
149 memcpy(&h->top_border_v[h->mbx*10+1], h->cv + 7* h->c_stride,8);
151 h->left_border_y[i*2+0] = *(h->cy + 15 + (i*2+0)*h->l_stride);
152 h->left_border_y[i*2+1] = *(h->cy + 15 + (i*2+1)*h->l_stride);
153 h->left_border_u[i+1] = *(h->cu + 7 + i*h->c_stride);
154 h->left_border_v[i+1] = *(h->cv + 7 + i*h->c_stride);
156 if(!h->loop_filter_disable) {
158 *((uint64_t *)bs) = 0;
162 *((uint64_t *)bs) = 0x0202020202020202ULL;
166 bs[2] = get_bs_p(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]);
167 bs[3] = get_bs_p(&h->mv[MV_FWD_X2], &h->mv[MV_FWD_X3]);
169 bs[6] = get_bs_p(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]);
170 bs[7] = get_bs_p(&h->mv[MV_FWD_X1], &h->mv[MV_FWD_X3]);
173 bs[0] = get_bs_p(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]);
174 bs[1] = get_bs_p(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]);
175 bs[4] = get_bs_p(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]);
176 bs[5] = get_bs_p(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]);
181 bs[2] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]);
182 bs[3] = get_bs_b(&h->mv[MV_FWD_X2], &h->mv[MV_FWD_X3]);
183 bs[6] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]);
184 bs[7] = get_bs_b(&h->mv[MV_FWD_X1], &h->mv[MV_FWD_X3]);
188 bs[0] = get_bs_b(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]);
189 bs[1] = get_bs_b(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]);
190 bs[4] = get_bs_b(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]);
191 bs[5] = get_bs_b(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]);
194 if(mb_type & 1) { //16X8
195 bs[6] = bs[7] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]);
197 bs[2] = bs[3] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]);
199 bs[0] = get_bs_b(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]);
200 bs[1] = get_bs_b(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]);
201 bs[4] = get_bs_b(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]);
202 bs[5] = get_bs_b(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]);
204 if( *((uint64_t *)bs) ) {
205 if(h->flags & A_AVAIL) {
206 qp_avg = (h->qp + h->left_qp + 1) >> 1;
208 h->s.dsp.cavs_filter_lv(h->cy,h->l_stride,alpha,beta,tc,bs[0],bs[1]);
209 h->s.dsp.cavs_filter_cv(h->cu,h->c_stride,alpha,beta,tc,bs[0],bs[1]);
210 h->s.dsp.cavs_filter_cv(h->cv,h->c_stride,alpha,beta,tc,bs[0],bs[1]);
214 h->s.dsp.cavs_filter_lv(h->cy + 8,h->l_stride,alpha,beta,tc,bs[2],bs[3]);
215 h->s.dsp.cavs_filter_lh(h->cy + 8*h->l_stride,h->l_stride,alpha,beta,tc,
218 if(h->flags & B_AVAIL) {
219 qp_avg = (h->qp + h->top_qp[h->mbx] + 1) >> 1;
221 h->s.dsp.cavs_filter_lh(h->cy,h->l_stride,alpha,beta,tc,bs[4],bs[5]);
222 h->s.dsp.cavs_filter_ch(h->cu,h->c_stride,alpha,beta,tc,bs[4],bs[5]);
223 h->s.dsp.cavs_filter_ch(h->cv,h->c_stride,alpha,beta,tc,bs[4],bs[5]);
228 h->top_qp[h->mbx] = h->qp;
233 /*****************************************************************************
235 * spatial intra prediction
237 ****************************************************************************/
239 static inline void load_intra_pred_luma(AVSContext *h, uint8_t *top,
240 uint8_t *left, int block) {
245 memcpy(&left[1],h->left_border_y,16);
248 memcpy(&top[1],&h->top_border_y[h->mbx*16],16);
251 if((h->flags & A_AVAIL) && (h->flags & B_AVAIL))
252 left[0] = top[0] = h->topleft_border_y;
256 left[i+1] = *(h->cy + 7 + i*h->l_stride);
257 memset(&left[9],left[8],9);
259 memcpy(&top[1],&h->top_border_y[h->mbx*16+8],8);
260 if(h->flags & C_AVAIL)
261 memcpy(&top[9],&h->top_border_y[(h->mbx + 1)*16],8);
263 memset(&top[9],top[8],9);
266 if(h->flags & B_AVAIL)
267 left[0] = top[0] = h->top_border_y[h->mbx*16+7];
270 memcpy(&left[1],&h->left_border_y[8],8);
271 memset(&left[9],left[8],9);
272 memcpy(&top[1],h->cy + 7*h->l_stride,16);
274 left[0] = h->left_border_y[7];
276 if(h->flags & A_AVAIL)
281 left[i] = *(h->cy + 7 + (i+7)*h->l_stride);
282 memset(&left[9],left[8],9);
283 memcpy(&top[0],h->cy + 7 + 7*h->l_stride,9);
284 memset(&top[9],top[8],9);
289 static void intra_pred_vert(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
291 uint64_t a = *((uint64_t *)(&top[1]));
293 *((uint64_t *)(d+y*stride)) = a;
297 static void intra_pred_horiz(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
301 a = left[y+1] * 0x0101010101010101ULL;
302 *((uint64_t *)(d+y*stride)) = a;
306 static void intra_pred_dc_128(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
308 uint64_t a = 0x8080808080808080ULL;
310 *((uint64_t *)(d+y*stride)) = a;
313 static void intra_pred_plane(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
317 uint8_t *cm = cropTbl + MAX_NEG_CROP;
320 ih += (x+1)*(top[5+x]-top[3-x]);
321 iv += (x+1)*(left[5+x]-left[3-x]);
323 ia = (top[8]+left[8])<<4;
328 d[y*stride+x] = cm[(ia+(x-3)*ih+(y-3)*iv+16)>>5];
331 #define LOWPASS(ARRAY,INDEX) \
332 (( ARRAY[(INDEX)-1] + 2*ARRAY[(INDEX)] + ARRAY[(INDEX)+1] + 2) >> 2)
334 static void intra_pred_lp(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
338 d[y*stride+x] = (LOWPASS(top,x+1) + LOWPASS(left,y+1)) >> 1;
341 static void intra_pred_down_left(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
345 d[y*stride+x] = (LOWPASS(top,x+y+2) + LOWPASS(left,x+y+2)) >> 1;
348 static void intra_pred_down_right(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
353 d[y*stride+x] = (left[1]+2*top[0]+top[1]+2)>>2;
355 d[y*stride+x] = LOWPASS(top,x-y);
357 d[y*stride+x] = LOWPASS(left,y-x);
360 static void intra_pred_lp_left(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
364 d[y*stride+x] = LOWPASS(left,y+1);
367 static void intra_pred_lp_top(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
371 d[y*stride+x] = LOWPASS(top,x+1);
376 static inline void modify_pred(const int_fast8_t *mod_table, int *mode) {
377 int newmode = mod_table[*mode];
379 av_log(NULL, AV_LOG_ERROR, "Illegal intra prediction mode\n");
386 /*****************************************************************************
388 * motion compensation
390 ****************************************************************************/
392 static inline void mc_dir_part(AVSContext *h,Picture *pic,int square,
393 int chroma_height,int delta,int list,uint8_t *dest_y,
394 uint8_t *dest_cb,uint8_t *dest_cr,int src_x_offset,
395 int src_y_offset,qpel_mc_func *qpix_op,
396 h264_chroma_mc_func chroma_op,vector_t *mv){
397 MpegEncContext * const s = &h->s;
398 const int mx= mv->x + src_x_offset*8;
399 const int my= mv->y + src_y_offset*8;
400 const int luma_xy= (mx&3) + ((my&3)<<2);
401 uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*h->l_stride;
402 uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*h->c_stride;
403 uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*h->c_stride;
404 int extra_width= 0; //(s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16;
405 int extra_height= extra_width;
407 const int full_mx= mx>>2;
408 const int full_my= my>>2;
409 const int pic_width = 16*h->mb_width;
410 const int pic_height = 16*h->mb_height;
414 if(mx&7) extra_width -= 3;
415 if(my&7) extra_height -= 3;
417 if( full_mx < 0-extra_width
418 || full_my < 0-extra_height
419 || full_mx + 16/*FIXME*/ > pic_width + extra_width
420 || full_my + 16/*FIXME*/ > pic_height + extra_height){
421 ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*h->l_stride, h->l_stride,
422 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
423 src_y= s->edge_emu_buffer + 2 + 2*h->l_stride;
427 qpix_op[luma_xy](dest_y, src_y, h->l_stride); //FIXME try variable height perhaps?
429 qpix_op[luma_xy](dest_y + delta, src_y + delta, h->l_stride);
433 ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, h->c_stride,
434 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
435 src_cb= s->edge_emu_buffer;
437 chroma_op(dest_cb, src_cb, h->c_stride, chroma_height, mx&7, my&7);
440 ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, h->c_stride,
441 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
442 src_cr= s->edge_emu_buffer;
444 chroma_op(dest_cr, src_cr, h->c_stride, chroma_height, mx&7, my&7);
447 static inline void mc_part_std(AVSContext *h,int square,int chroma_height,int delta,
448 uint8_t *dest_y,uint8_t *dest_cb,uint8_t *dest_cr,
449 int x_offset, int y_offset,qpel_mc_func *qpix_put,
450 h264_chroma_mc_func chroma_put,qpel_mc_func *qpix_avg,
451 h264_chroma_mc_func chroma_avg, vector_t *mv){
452 qpel_mc_func *qpix_op= qpix_put;
453 h264_chroma_mc_func chroma_op= chroma_put;
455 dest_y += 2*x_offset + 2*y_offset*h->l_stride;
456 dest_cb += x_offset + y_offset*h->c_stride;
457 dest_cr += x_offset + y_offset*h->c_stride;
458 x_offset += 8*h->mbx;
459 y_offset += 8*h->mby;
462 Picture *ref= &h->DPB[mv->ref];
463 mc_dir_part(h, ref, square, chroma_height, delta, 0,
464 dest_y, dest_cb, dest_cr, x_offset, y_offset,
465 qpix_op, chroma_op, mv);
468 chroma_op= chroma_avg;
471 if((mv+MV_BWD_OFFS)->ref >= 0){
472 Picture *ref= &h->DPB[0];
473 mc_dir_part(h, ref, square, chroma_height, delta, 1,
474 dest_y, dest_cb, dest_cr, x_offset, y_offset,
475 qpix_op, chroma_op, mv+MV_BWD_OFFS);
479 static void inter_pred(AVSContext *h) {
480 /* always do 8x8 blocks TODO: are larger blocks worth it? */
481 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 0, 0,
482 h->s.dsp.put_cavs_qpel_pixels_tab[1],
483 h->s.dsp.put_h264_chroma_pixels_tab[1],
484 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
485 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X0]);
486 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 4, 0,
487 h->s.dsp.put_cavs_qpel_pixels_tab[1],
488 h->s.dsp.put_h264_chroma_pixels_tab[1],
489 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
490 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X1]);
491 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 0, 4,
492 h->s.dsp.put_cavs_qpel_pixels_tab[1],
493 h->s.dsp.put_h264_chroma_pixels_tab[1],
494 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
495 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X2]);
496 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 4, 4,
497 h->s.dsp.put_cavs_qpel_pixels_tab[1],
498 h->s.dsp.put_h264_chroma_pixels_tab[1],
499 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
500 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X3]);
501 /* set intra prediction modes to default values */
502 h->pred_mode_Y[3] = h->pred_mode_Y[6] = INTRA_L_LP;
503 h->top_pred_Y[h->mbx*2+0] = h->top_pred_Y[h->mbx*2+1] = INTRA_L_LP;
506 /*****************************************************************************
508 * motion vector prediction
510 ****************************************************************************/
512 static inline void set_mvs(vector_t *mv, enum block_t size) {
515 mv[MV_STRIDE ] = mv[0];
516 mv[MV_STRIDE+1] = mv[0];
521 mv[MV_STRIDE] = mv[0];
526 static inline void store_mvs(AVSContext *h) {
527 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 0] = h->mv[MV_FWD_X0];
528 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 1] = h->mv[MV_FWD_X1];
529 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 2] = h->mv[MV_FWD_X2];
530 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 3] = h->mv[MV_FWD_X3];
533 static inline void scale_mv(AVSContext *h, int *d_x, int *d_y, vector_t *src, int distp) {
534 int den = h->scale_den[src->ref];
536 *d_x = (src->x*distp*den + 256 + (src->x>>31)) >> 9;
537 *d_y = (src->y*distp*den + 256 + (src->y>>31)) >> 9;
540 static inline void mv_pred_median(AVSContext *h, vector_t *mvP, vector_t *mvA, vector_t *mvB, vector_t *mvC) {
541 int ax, ay, bx, by, cx, cy;
542 int len_ab, len_bc, len_ca, len_mid;
544 /* scale candidates according to their temporal span */
545 scale_mv(h, &ax, &ay, mvA, mvP->dist);
546 scale_mv(h, &bx, &by, mvB, mvP->dist);
547 scale_mv(h, &cx, &cy, mvC, mvP->dist);
548 /* find the geometrical median of the three candidates */
549 len_ab = abs(ax - bx) + abs(ay - by);
550 len_bc = abs(bx - cx) + abs(by - cy);
551 len_ca = abs(cx - ax) + abs(cy - ay);
552 len_mid = mid_pred(len_ab, len_bc, len_ca);
553 if(len_mid == len_ab) {
556 } else if(len_mid == len_bc) {
565 static inline void mv_pred_direct(AVSContext *h, vector_t *pmv_fw,
566 vector_t *pmv_bw, vector_t *col_mv) {
567 int den = h->direct_den[col_mv->ref];
568 int m = col_mv->x >> 31;
570 pmv_fw->dist = h->dist[1];
571 pmv_bw->dist = h->dist[0];
574 /* scale the co-located motion vector according to its temporal span */
575 pmv_fw->x = (((den+(den*col_mv->x*pmv_fw->dist^m)-m-1)>>14)^m)-m;
576 pmv_bw->x = m-(((den+(den*col_mv->x*pmv_bw->dist^m)-m-1)>>14)^m);
578 pmv_fw->y = (((den+(den*col_mv->y*pmv_fw->dist^m)-m-1)>>14)^m)-m;
579 pmv_bw->y = m-(((den+(den*col_mv->y*pmv_bw->dist^m)-m-1)>>14)^m);
582 static inline void mv_pred_sym(AVSContext *h, vector_t *src, enum block_t size) {
583 vector_t *dst = src + MV_BWD_OFFS;
585 /* backward mv is the scaled and negated forward mv */
586 dst->x = -((src->x * h->sym_factor + 256) >> 9);
587 dst->y = -((src->y * h->sym_factor + 256) >> 9);
589 dst->dist = h->dist[0];
593 static void mv_pred(AVSContext *h, enum mv_loc_t nP, enum mv_loc_t nC,
594 enum mv_pred_t mode, enum block_t size, int ref) {
595 vector_t *mvP = &h->mv[nP];
596 vector_t *mvA = &h->mv[nP-1];
597 vector_t *mvB = &h->mv[nP-4];
598 vector_t *mvC = &h->mv[nC];
599 int mvAref = mvA->ref;
600 int mvBref = mvB->ref;
604 mvP->dist = h->dist[mvP->ref];
605 if(mvC->ref == NOT_AVAIL)
606 mvC = &h->mv[nP-5]; // set to top-left (mvD)
608 if(mode == MV_PRED_PSKIP) {
609 if((mvAref == NOT_AVAIL) || (mvBref == NOT_AVAIL) ||
610 ((mvA->x | mvA->y | mvA->ref) == 0) ||
611 ((mvB->x | mvB->y | mvB->ref) == 0) ) {
617 /* if there is only one suitable candidate, take it */
618 if((mvAref >= 0) && (mvBref < 0) && (mvCref < 0)) {
621 } else if((mvAref < 0) && (mvBref >= 0) && (mvCref < 0)) {
624 } else if((mvAref < 0) && (mvBref < 0) && (mvCref >= 0)) {
630 if(mvAref == mvP->ref) {
634 mv_pred_median(h, mvP, mvA, mvB, mvC);
637 if(mvBref == mvP->ref) {
641 mv_pred_median(h, mvP, mvA, mvB, mvC);
643 case MV_PRED_TOPRIGHT:
644 if(mvCref == mvP->ref) {
648 mv_pred_median(h, mvP, mvA, mvB, mvC);
651 mv_pred_median(h, mvP, mvA, mvB, mvC);
655 if(mode < MV_PRED_PSKIP) {
656 mvP->x += get_se_golomb(&h->s.gb);
657 mvP->y += get_se_golomb(&h->s.gb);
662 /*****************************************************************************
664 * residual data decoding
666 ****************************************************************************/
668 /* kth-order exponential golomb code */
669 static inline int get_ue_code(GetBitContext *gb, int order) {
671 int ret = get_ue_golomb(gb) << order;
672 return ret + get_bits(gb,order);
674 return get_ue_golomb(gb);
677 static int decode_residual_block(AVSContext *h, GetBitContext *gb,
678 const residual_vlc_t *r, int esc_golomb_order,
679 int qp, uint8_t *dst, int stride) {
681 int level_code, esc_code, level, run, mask;
684 int dqm = dequant_mul[qp];
685 int dqs = dequant_shift[qp];
686 int dqa = 1 << (dqs - 1);
687 const uint8_t *scantab = ff_zigzag_direct;
690 memset(block,0,64*sizeof(DCTELEM));
692 level_code = get_ue_code(gb,r->golomb_order);
693 if(level_code >= ESCAPE_CODE) {
694 run = (level_code - ESCAPE_CODE) >> 1;
695 esc_code = get_ue_code(gb,esc_golomb_order);
696 level = esc_code + (run > r->max_run ? 1 : r->level_add[run]);
697 while(level > r->inc_limit)
699 mask = -(level_code & 1);
700 level = (level^mask) - mask;
704 level = r->rltab[level_code][0];
705 if(!level) //end of block signal
707 run = r->rltab[level_code][1];
708 r += r->rltab[level_code][2];
710 level_buf[i] = level;
713 /* inverse scan and dequantization */
715 pos += 1 + run_buf[i];
717 av_log(h->s.avctx, AV_LOG_ERROR,
718 "position out of block bounds at pic %d MB(%d,%d)\n",
719 h->picture.poc, h->mbx, h->mby);
722 block[scantab[pos]] = (level_buf[i]*dqm + dqa) >> dqs;
724 h->s.dsp.cavs_idct8_add(dst,block,stride);
729 static inline void decode_residual_chroma(AVSContext *h) {
731 decode_residual_block(h,&h->s.gb,chroma_2dvlc,0, chroma_qp[h->qp],
734 decode_residual_block(h,&h->s.gb,chroma_2dvlc,0, chroma_qp[h->qp],
738 static inline void decode_residual_inter(AVSContext *h) {
741 /* get coded block pattern */
742 h->cbp = cbp_tab[get_ue_golomb(&h->s.gb)][1];
744 if(h->cbp && !h->qp_fixed)
745 h->qp += get_se_golomb(&h->s.gb);
746 for(block=0;block<4;block++)
747 if(h->cbp & (1<<block))
748 decode_residual_block(h,&h->s.gb,inter_2dvlc,0,h->qp,
749 h->cy + h->luma_scan[block], h->l_stride);
750 decode_residual_chroma(h);
753 /*****************************************************************************
757 ****************************************************************************/
759 static inline void init_mb(AVSContext *h) {
762 /* copy predictors from top line (MB B and C) into cache */
764 h->mv[MV_FWD_B2+i] = h->top_mv[0][h->mbx*2+i];
765 h->mv[MV_BWD_B2+i] = h->top_mv[1][h->mbx*2+i];
767 h->pred_mode_Y[1] = h->top_pred_Y[h->mbx*2+0];
768 h->pred_mode_Y[2] = h->top_pred_Y[h->mbx*2+1];
769 /* clear top predictors if MB B is not available */
770 if(!(h->flags & B_AVAIL)) {
771 h->mv[MV_FWD_B2] = un_mv;
772 h->mv[MV_FWD_B3] = un_mv;
773 h->mv[MV_BWD_B2] = un_mv;
774 h->mv[MV_BWD_B3] = un_mv;
775 h->pred_mode_Y[1] = h->pred_mode_Y[2] = NOT_AVAIL;
776 h->flags &= ~(C_AVAIL|D_AVAIL);
780 if(h->mbx == h->mb_width-1) //MB C not available
781 h->flags &= ~C_AVAIL;
782 /* clear top-right predictors if MB C is not available */
783 if(!(h->flags & C_AVAIL)) {
784 h->mv[MV_FWD_C2] = un_mv;
785 h->mv[MV_BWD_C2] = un_mv;
787 /* clear top-left predictors if MB D is not available */
788 if(!(h->flags & D_AVAIL)) {
789 h->mv[MV_FWD_D3] = un_mv;
790 h->mv[MV_BWD_D3] = un_mv;
792 /* set pointer for co-located macroblock type */
793 h->col_type = &h->col_type_base[h->mby*h->mb_width + h->mbx];
796 static inline void check_for_slice(AVSContext *h);
798 static inline int next_mb(AVSContext *h) {
805 /* copy mvs as predictors to the left */
807 h->mv[i] = h->mv[i+2];
808 /* copy bottom mvs from cache to top line */
809 h->top_mv[0][h->mbx*2+0] = h->mv[MV_FWD_X2];
810 h->top_mv[0][h->mbx*2+1] = h->mv[MV_FWD_X3];
811 h->top_mv[1][h->mbx*2+0] = h->mv[MV_BWD_X2];
812 h->top_mv[1][h->mbx*2+1] = h->mv[MV_BWD_X3];
813 /* next MB address */
815 if(h->mbx == h->mb_width) { //new mb line
816 h->flags = B_AVAIL|C_AVAIL;
817 /* clear left pred_modes */
818 h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
819 /* clear left mv predictors */
824 /* re-calculate sample pointers */
825 h->cy = h->picture.data[0] + h->mby*16*h->l_stride;
826 h->cu = h->picture.data[1] + h->mby*8*h->c_stride;
827 h->cv = h->picture.data[2] + h->mby*8*h->c_stride;
828 if(h->mby == h->mb_height) { //frame end
831 //check_for_slice(h);
837 static void decode_mb_i(AVSContext *h, int is_i_pic) {
838 GetBitContext *gb = &h->s.gb;
839 int block, pred_mode_uv;
844 /* get intra prediction modes from stream */
845 for(block=0;block<4;block++) {
847 int pos = scan3x3[block];
849 nA = h->pred_mode_Y[pos-1];
850 nB = h->pred_mode_Y[pos-3];
851 if((nA == NOT_AVAIL) || (nB == NOT_AVAIL))
854 predpred = FFMIN(nA,nB);
856 h->pred_mode_Y[pos] = predpred;
858 h->pred_mode_Y[pos] = get_bits(gb,2);
859 if(h->pred_mode_Y[pos] >= predpred)
860 h->pred_mode_Y[pos]++;
863 pred_mode_uv = get_ue_golomb(gb);
864 if(pred_mode_uv > 6) {
865 av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra chroma pred mode\n");
869 /* save pred modes before they get modified */
870 h->pred_mode_Y[3] = h->pred_mode_Y[5];
871 h->pred_mode_Y[6] = h->pred_mode_Y[8];
872 h->top_pred_Y[h->mbx*2+0] = h->pred_mode_Y[7];
873 h->top_pred_Y[h->mbx*2+1] = h->pred_mode_Y[8];
875 /* modify pred modes according to availability of neighbour samples */
876 if(!(h->flags & A_AVAIL)) {
877 modify_pred(left_modifier_l, &h->pred_mode_Y[4] );
878 modify_pred(left_modifier_l, &h->pred_mode_Y[7] );
879 modify_pred(left_modifier_c, &pred_mode_uv );
881 if(!(h->flags & B_AVAIL)) {
882 modify_pred(top_modifier_l, &h->pred_mode_Y[4] );
883 modify_pred(top_modifier_l, &h->pred_mode_Y[5] );
884 modify_pred(top_modifier_c, &pred_mode_uv );
887 /* get coded block pattern */
889 h->cbp = cbp_tab[get_ue_golomb(gb)][0];
890 if(h->cbp && !h->qp_fixed)
891 h->qp += get_se_golomb(gb); //qp_delta
893 /* luma intra prediction interleaved with residual decode/transform/add */
894 for(block=0;block<4;block++) {
895 d = h->cy + h->luma_scan[block];
896 load_intra_pred_luma(h, top, left, block);
897 h->intra_pred_l[h->pred_mode_Y[scan3x3[block]]]
898 (d, top, left, h->l_stride);
899 if(h->cbp & (1<<block))
900 decode_residual_block(h,gb,intra_2dvlc,1,h->qp,d,h->l_stride);
903 /* chroma intra prediction */
904 /* extend borders by one pixel */
905 h->left_border_u[9] = h->left_border_u[8];
906 h->left_border_v[9] = h->left_border_v[8];
907 h->top_border_u[h->mbx*10+9] = h->top_border_u[h->mbx*10+8];
908 h->top_border_v[h->mbx*10+9] = h->top_border_v[h->mbx*10+8];
909 if(h->mbx && h->mby) {
910 h->top_border_u[h->mbx*10] = h->left_border_u[0] = h->topleft_border_u;
911 h->top_border_v[h->mbx*10] = h->left_border_v[0] = h->topleft_border_v;
913 h->left_border_u[0] = h->left_border_u[1];
914 h->left_border_v[0] = h->left_border_v[1];
915 h->top_border_u[h->mbx*10] = h->top_border_u[h->mbx*10+1];
916 h->top_border_v[h->mbx*10] = h->top_border_v[h->mbx*10+1];
918 h->intra_pred_c[pred_mode_uv](h->cu, &h->top_border_u[h->mbx*10],
919 h->left_border_u, h->c_stride);
920 h->intra_pred_c[pred_mode_uv](h->cv, &h->top_border_v[h->mbx*10],
921 h->left_border_v, h->c_stride);
923 decode_residual_chroma(h);
926 /* mark motion vectors as intra */
927 h->mv[MV_FWD_X0] = intra_mv;
928 set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
929 h->mv[MV_BWD_X0] = intra_mv;
930 set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
931 if(h->pic_type != FF_B_TYPE)
932 *h->col_type = I_8X8;
935 static void mb_skip_p(AVSContext *h) {
936 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_PSKIP, BLK_16X16, 0);
940 *h->col_type = P_SKIP;
944 static void mb_skip_b(AVSContext *h) {
947 if(!(*h->col_type)) {
948 /* intra MB at co-location, do in-plane prediction */
949 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_BSKIP, BLK_16X16, 1);
950 mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_BSKIP, BLK_16X16, 0);
952 /* direct prediction from co-located P MB, block-wise */
954 mv_pred_direct(h,&h->mv[mv_scan[i]],
955 &h->mv[mv_scan[i]+MV_BWD_OFFS],
956 &h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + i]);
960 static void decode_mb_p(AVSContext *h, enum mb_t mb_type) {
961 GetBitContext *gb = &h->s.gb;
969 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
970 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16,ref[0]);
973 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
974 ref[2] = h->ref_flag ? 0 : get_bits1(gb);
975 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, ref[0]);
976 mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, ref[2]);
979 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
980 ref[1] = h->ref_flag ? 0 : get_bits1(gb);
981 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, ref[0]);
982 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT, BLK_8X16, ref[1]);
985 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
986 ref[1] = h->ref_flag ? 0 : get_bits1(gb);
987 ref[2] = h->ref_flag ? 0 : get_bits1(gb);
988 ref[3] = h->ref_flag ? 0 : get_bits1(gb);
989 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_MEDIAN, BLK_8X8, ref[0]);
990 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_MEDIAN, BLK_8X8, ref[1]);
991 mv_pred(h, MV_FWD_X2, MV_FWD_X1, MV_PRED_MEDIAN, BLK_8X8, ref[2]);
992 mv_pred(h, MV_FWD_X3, MV_FWD_X0, MV_PRED_MEDIAN, BLK_8X8, ref[3]);
996 decode_residual_inter(h);
997 filter_mb(h,mb_type);
998 *h->col_type = mb_type;
1001 static void decode_mb_b(AVSContext *h, enum mb_t mb_type) {
1003 enum sub_mb_t sub_type[4];
1007 h->mv[MV_FWD_X0] = dir_mv;
1008 set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
1009 h->mv[MV_BWD_X0] = dir_mv;
1010 set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
1015 filter_mb(h,B_SKIP);
1021 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1);
1024 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1);
1025 mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X16);
1028 mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_MEDIAN, BLK_16X16, 0);
1031 for(block=0;block<4;block++)
1032 sub_type[block] = get_bits(&h->s.gb,2);
1033 for(block=0;block<4;block++) {
1034 switch(sub_type[block]) {
1036 if(!(*h->col_type)) {
1037 /* intra MB at co-location, do in-plane prediction */
1038 mv_pred(h, mv_scan[block], mv_scan[block]-3,
1039 MV_PRED_BSKIP, BLK_8X8, 1);
1040 mv_pred(h, mv_scan[block]+MV_BWD_OFFS,
1041 mv_scan[block]-3+MV_BWD_OFFS,
1042 MV_PRED_BSKIP, BLK_8X8, 0);
1044 mv_pred_direct(h,&h->mv[mv_scan[block]],
1045 &h->mv[mv_scan[block]+MV_BWD_OFFS],
1046 &h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + block]);
1049 mv_pred(h, mv_scan[block], mv_scan[block]-3,
1050 MV_PRED_MEDIAN, BLK_8X8, 1);
1053 mv_pred(h, mv_scan[block], mv_scan[block]-3,
1054 MV_PRED_MEDIAN, BLK_8X8, 1);
1055 mv_pred_sym(h, &h->mv[mv_scan[block]], BLK_8X8);
1059 for(block=0;block<4;block++) {
1060 if(sub_type[block] == B_SUB_BWD)
1061 mv_pred(h, mv_scan[block]+MV_BWD_OFFS,
1062 mv_scan[block]+MV_BWD_OFFS-3,
1063 MV_PRED_MEDIAN, BLK_8X8, 0);
1067 assert((mb_type > B_SYM_16X16) && (mb_type < B_8X8));
1068 flags = b_partition_flags[(mb_type-1)>>1];
1069 if(mb_type & 1) { /* 16x8 macroblock types */
1071 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, 1);
1073 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, 1);
1074 mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X8);
1077 mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, 1);
1079 mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, 1);
1080 mv_pred_sym(h, &h->mv[9], BLK_16X8);
1083 mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_TOP, BLK_16X8, 0);
1085 mv_pred(h, MV_BWD_X2, MV_BWD_A1, MV_PRED_LEFT, BLK_16X8, 0);
1086 } else { /* 8x16 macroblock types */
1088 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, 1);
1090 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, 1);
1091 mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_8X16);
1094 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 1);
1096 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 1);
1097 mv_pred_sym(h, &h->mv[6], BLK_8X16);
1100 mv_pred(h, MV_BWD_X0, MV_BWD_B3, MV_PRED_LEFT, BLK_8X16, 0);
1102 mv_pred(h, MV_BWD_X1, MV_BWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 0);
1106 decode_residual_inter(h);
1107 filter_mb(h,mb_type);
1110 /*****************************************************************************
1114 ****************************************************************************/
1116 static inline int decode_slice_header(AVSContext *h, GetBitContext *gb) {
1118 av_log(h->s.avctx, AV_LOG_ERROR, "unexpected start code 0x%02x\n", h->stc);
1120 if((h->mby == 0) && (!h->qp_fixed)){
1121 h->qp_fixed = get_bits1(gb);
1122 h->qp = get_bits(gb,6);
1124 /* inter frame or second slice can have weighting params */
1125 if((h->pic_type != FF_I_TYPE) || (!h->pic_structure && h->mby >= h->mb_width/2))
1126 if(get_bits1(gb)) { //slice_weighting_flag
1127 av_log(h->s.avctx, AV_LOG_ERROR,
1128 "weighted prediction not yet supported\n");
1133 static inline void check_for_slice(AVSContext *h) {
1134 GetBitContext *gb = &h->s.gb;
1136 align = (-get_bits_count(gb)) & 7;
1137 if((show_bits_long(gb,24+align) & 0xFFFFFF) == 0x000001) {
1138 get_bits_long(gb,24+align);
1139 h->stc = get_bits(gb,8);
1140 decode_slice_header(h,gb);
1144 /*****************************************************************************
1148 ****************************************************************************/
1150 static void init_pic(AVSContext *h) {
1153 /* clear some predictors */
1156 h->mv[MV_BWD_X0] = dir_mv;
1157 set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
1158 h->mv[MV_FWD_X0] = dir_mv;
1159 set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
1160 h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
1161 h->cy = h->picture.data[0];
1162 h->cu = h->picture.data[1];
1163 h->cv = h->picture.data[2];
1164 h->l_stride = h->picture.linesize[0];
1165 h->c_stride = h->picture.linesize[1];
1166 h->luma_scan[2] = 8*h->l_stride;
1167 h->luma_scan[3] = 8*h->l_stride+8;
1168 h->mbx = h->mby = 0;
1172 static int decode_pic(AVSContext *h) {
1173 MpegEncContext *s = &h->s;
1177 if (!s->context_initialized) {
1178 if (MPV_common_init(s) < 0)
1181 get_bits(&s->gb,16);//bbv_dwlay
1182 if(h->stc == PIC_PB_START_CODE) {
1183 h->pic_type = get_bits(&s->gb,2) + FF_I_TYPE;
1184 /* make sure we have the reference frames we need */
1185 if(!h->DPB[0].data[0] ||
1186 (!h->DPB[1].data[0] && h->pic_type == FF_B_TYPE))
1189 h->pic_type = FF_I_TYPE;
1190 if(get_bits1(&s->gb))
1191 get_bits(&s->gb,16);//time_code
1193 /* release last B frame */
1194 if(h->picture.data[0])
1195 s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture);
1197 s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture);
1199 h->picture.poc = get_bits(&s->gb,8)*2;
1201 /* get temporal distances and MV scaling factors */
1202 if(h->pic_type != FF_B_TYPE) {
1203 h->dist[0] = (h->picture.poc - h->DPB[0].poc + 512) % 512;
1205 h->dist[0] = (h->DPB[0].poc - h->picture.poc + 512) % 512;
1207 h->dist[1] = (h->picture.poc - h->DPB[1].poc + 512) % 512;
1208 h->scale_den[0] = h->dist[0] ? 512/h->dist[0] : 0;
1209 h->scale_den[1] = h->dist[1] ? 512/h->dist[1] : 0;
1210 if(h->pic_type == FF_B_TYPE) {
1211 h->sym_factor = h->dist[0]*h->scale_den[1];
1213 h->direct_den[0] = h->dist[0] ? 16384/h->dist[0] : 0;
1214 h->direct_den[1] = h->dist[1] ? 16384/h->dist[1] : 0;
1218 get_ue_golomb(&s->gb); //bbv_check_times
1219 h->progressive = get_bits1(&s->gb);
1221 h->pic_structure = 1;
1222 else if(!(h->pic_structure = get_bits1(&s->gb) && (h->stc == PIC_PB_START_CODE)) )
1223 get_bits1(&s->gb); //advanced_pred_mode_disable
1224 skip_bits1(&s->gb); //top_field_first
1225 skip_bits1(&s->gb); //repeat_first_field
1226 h->qp_fixed = get_bits1(&s->gb);
1227 h->qp = get_bits(&s->gb,6);
1228 if(h->pic_type == FF_I_TYPE) {
1229 if(!h->progressive && !h->pic_structure)
1230 skip_bits1(&s->gb);//what is this?
1231 skip_bits(&s->gb,4); //reserved bits
1233 if(!(h->pic_type == FF_B_TYPE && h->pic_structure == 1))
1234 h->ref_flag = get_bits1(&s->gb);
1235 skip_bits(&s->gb,4); //reserved bits
1236 h->skip_mode_flag = get_bits1(&s->gb);
1238 h->loop_filter_disable = get_bits1(&s->gb);
1239 if(!h->loop_filter_disable && get_bits1(&s->gb)) {
1240 h->alpha_offset = get_se_golomb(&s->gb);
1241 h->beta_offset = get_se_golomb(&s->gb);
1243 h->alpha_offset = h->beta_offset = 0;
1246 if(h->pic_type == FF_I_TYPE) {
1250 } while(next_mb(h));
1251 } else if(h->pic_type == FF_P_TYPE) {
1253 if(h->skip_mode_flag) {
1254 skip_count = get_ue_golomb(&s->gb);
1255 for(i=0;i<skip_count;i++) {
1261 mb_type = get_ue_golomb(&s->gb) + P_16X16;
1263 mb_type = get_ue_golomb(&s->gb) + P_SKIP;
1266 if(mb_type > P_8X8) {
1267 h->cbp = cbp_tab[mb_type - P_8X8 - 1][0];
1270 decode_mb_p(h,mb_type);
1272 } while(next_mb(h));
1273 } else { //FF_B_TYPE
1275 if(h->skip_mode_flag) {
1276 skip_count = get_ue_golomb(&s->gb);
1277 for(i=0;i<skip_count;i++) {
1281 filter_mb(h,B_SKIP);
1285 mb_type = get_ue_golomb(&s->gb) + B_DIRECT;
1287 mb_type = get_ue_golomb(&s->gb) + B_SKIP;
1290 if(mb_type > B_8X8) {
1291 h->cbp = cbp_tab[mb_type - B_8X8 - 1][0];
1294 decode_mb_b(h,mb_type);
1296 } while(next_mb(h));
1299 if(h->pic_type != FF_B_TYPE) {
1300 if(h->DPB[1].data[0])
1301 s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]);
1302 memcpy(&h->DPB[1], &h->DPB[0], sizeof(Picture));
1303 memcpy(&h->DPB[0], &h->picture, sizeof(Picture));
1304 memset(&h->picture,0,sizeof(Picture));
1309 /*****************************************************************************
1311 * headers and interface
1313 ****************************************************************************/
1315 static void init_top_lines(AVSContext *h) {
1316 /* alloc top line of predictors */
1317 h->top_qp = av_malloc( h->mb_width);
1318 h->top_mv[0] = av_malloc((h->mb_width*2+1)*sizeof(vector_t));
1319 h->top_mv[1] = av_malloc((h->mb_width*2+1)*sizeof(vector_t));
1320 h->top_pred_Y = av_malloc( h->mb_width*2*sizeof(*h->top_pred_Y));
1321 h->top_border_y = av_malloc((h->mb_width+1)*16);
1322 h->top_border_u = av_malloc((h->mb_width)*10);
1323 h->top_border_v = av_malloc((h->mb_width)*10);
1325 /* alloc space for co-located MVs and types */
1326 h->col_mv = av_malloc( h->mb_width*h->mb_height*4*sizeof(vector_t));
1327 h->col_type_base = av_malloc(h->mb_width*h->mb_height);
1330 static int decode_seq_header(AVSContext *h) {
1331 MpegEncContext *s = &h->s;
1332 extern const AVRational ff_frame_rate_tab[];
1333 int frame_rate_code;
1335 h->profile = get_bits(&s->gb,8);
1336 h->level = get_bits(&s->gb,8);
1337 skip_bits1(&s->gb); //progressive sequence
1338 s->width = get_bits(&s->gb,14);
1339 s->height = get_bits(&s->gb,14);
1340 skip_bits(&s->gb,2); //chroma format
1341 skip_bits(&s->gb,3); //sample_precision
1342 h->aspect_ratio = get_bits(&s->gb,4);
1343 frame_rate_code = get_bits(&s->gb,4);
1344 skip_bits(&s->gb,18);//bit_rate_lower
1345 skip_bits1(&s->gb); //marker_bit
1346 skip_bits(&s->gb,12);//bit_rate_upper
1347 s->low_delay = get_bits1(&s->gb);
1348 h->mb_width = (s->width + 15) >> 4;
1349 h->mb_height = (s->height + 15) >> 4;
1350 h->s.avctx->time_base.den = ff_frame_rate_tab[frame_rate_code].num;
1351 h->s.avctx->time_base.num = ff_frame_rate_tab[frame_rate_code].den;
1352 h->s.avctx->width = s->width;
1353 h->s.avctx->height = s->height;
1360 * finds the end of the current frame in the bitstream.
1361 * @return the position of the first byte of the next frame, or -1
1363 int ff_cavs_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size) {
1367 pic_found= pc->frame_start_found;
1372 for(i=0; i<buf_size; i++){
1373 state= (state<<8) | buf[i];
1374 if(state == PIC_I_START_CODE || state == PIC_PB_START_CODE){
1383 /* EOF considered as end of frame */
1386 for(; i<buf_size; i++){
1387 state= (state<<8) | buf[i];
1388 if((state&0xFFFFFF00) == 0x100){
1389 if(state < SLICE_MIN_START_CODE || state > SLICE_MAX_START_CODE){
1390 pc->frame_start_found=0;
1397 pc->frame_start_found= pic_found;
1399 return END_NOT_FOUND;
1402 void ff_cavs_flush(AVCodecContext * avctx) {
1403 AVSContext *h = avctx->priv_data;
1404 h->got_keyframe = 0;
1407 static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
1408 uint8_t * buf, int buf_size) {
1409 AVSContext *h = avctx->priv_data;
1410 MpegEncContext *s = &h->s;
1412 const uint8_t *buf_end;
1413 const uint8_t *buf_ptr;
1414 AVFrame *picture = data;
1419 if (buf_size == 0) {
1420 if(!s->low_delay && h->DPB[0].data[0]) {
1421 *data_size = sizeof(AVPicture);
1422 *picture = *(AVFrame *) &h->DPB[0];
1428 buf_end = buf + buf_size;
1430 buf_ptr = ff_find_start_code(buf_ptr,buf_end, &stc);
1431 if(stc & 0xFFFFFE00)
1432 return FFMAX(0, buf_ptr - buf - s->parse_context.last_index);
1433 input_size = (buf_end - buf_ptr)*8;
1435 case SEQ_START_CODE:
1436 init_get_bits(&s->gb, buf_ptr, input_size);
1437 decode_seq_header(h);
1439 case PIC_I_START_CODE:
1440 if(!h->got_keyframe) {
1441 if(h->DPB[0].data[0])
1442 avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]);
1443 if(h->DPB[1].data[0])
1444 avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]);
1445 h->got_keyframe = 1;
1447 case PIC_PB_START_CODE:
1449 if(!h->got_keyframe)
1451 init_get_bits(&s->gb, buf_ptr, input_size);
1455 *data_size = sizeof(AVPicture);
1456 if(h->pic_type != FF_B_TYPE) {
1457 if(h->DPB[1].data[0]) {
1458 *picture = *(AVFrame *) &h->DPB[1];
1463 *picture = *(AVFrame *) &h->picture;
1465 case EXT_START_CODE:
1466 //mpeg_decode_extension(avctx,buf_ptr, input_size);
1468 case USER_START_CODE:
1469 //mpeg_decode_user_data(avctx,buf_ptr, input_size);
1472 if (stc >= SLICE_MIN_START_CODE &&
1473 stc <= SLICE_MAX_START_CODE) {
1474 init_get_bits(&s->gb, buf_ptr, input_size);
1475 decode_slice_header(h, &s->gb);
1482 static int cavs_decode_init(AVCodecContext * avctx) {
1483 AVSContext *h = avctx->priv_data;
1484 MpegEncContext * const s = &h->s;
1486 MPV_decode_defaults(s);
1489 avctx->pix_fmt= PIX_FMT_YUV420P;
1491 h->luma_scan[0] = 0;
1492 h->luma_scan[1] = 8;
1493 h->intra_pred_l[ INTRA_L_VERT] = intra_pred_vert;
1494 h->intra_pred_l[ INTRA_L_HORIZ] = intra_pred_horiz;
1495 h->intra_pred_l[ INTRA_L_LP] = intra_pred_lp;
1496 h->intra_pred_l[ INTRA_L_DOWN_LEFT] = intra_pred_down_left;
1497 h->intra_pred_l[INTRA_L_DOWN_RIGHT] = intra_pred_down_right;
1498 h->intra_pred_l[ INTRA_L_LP_LEFT] = intra_pred_lp_left;
1499 h->intra_pred_l[ INTRA_L_LP_TOP] = intra_pred_lp_top;
1500 h->intra_pred_l[ INTRA_L_DC_128] = intra_pred_dc_128;
1501 h->intra_pred_c[ INTRA_C_LP] = intra_pred_lp;
1502 h->intra_pred_c[ INTRA_C_HORIZ] = intra_pred_horiz;
1503 h->intra_pred_c[ INTRA_C_VERT] = intra_pred_vert;
1504 h->intra_pred_c[ INTRA_C_PLANE] = intra_pred_plane;
1505 h->intra_pred_c[ INTRA_C_LP_LEFT] = intra_pred_lp_left;
1506 h->intra_pred_c[ INTRA_C_LP_TOP] = intra_pred_lp_top;
1507 h->intra_pred_c[ INTRA_C_DC_128] = intra_pred_dc_128;
1513 static int cavs_decode_end(AVCodecContext * avctx) {
1514 AVSContext *h = avctx->priv_data;
1517 av_free(h->top_mv[0]);
1518 av_free(h->top_mv[1]);
1519 av_free(h->top_pred_Y);
1520 av_free(h->top_border_y);
1521 av_free(h->top_border_u);
1522 av_free(h->top_border_v);
1524 av_free(h->col_type_base);
1528 AVCodec cavs_decoder = {
1537 CODEC_CAP_DR1 | CODEC_CAP_DELAY,
1538 .flush= ff_cavs_flush,