2 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
3 * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder
23 * @author Stefan Gehrer <stefan.gehrer@gmx.de>
27 #include "bitstream.h"
29 #include "mpegvideo.h"
34 Picture picture; ///< currently decoded frame
35 Picture DPB[2]; ///< reference frames
36 int dist[2]; ///< temporal distances from current frame to ref frames
39 int mb_width, mb_height;
43 int skip_mode_flag; ///< select between skip_count or one skip_flag per MB
44 int loop_filter_disable;
45 int alpha_offset, beta_offset;
47 int mbx, mby; ///< macroblock coordinates
48 int flags; ///< availability flags of neighbouring macroblocks
49 int stc; ///< last start code
50 uint8_t *cy, *cu, *cv; ///< current MB sample pointers
54 /** mv motion vector cache
59 X are the vectors in the current macroblock (5,6,9,10)
60 A is the macroblock to the left (4,8)
61 B is the macroblock to the top (1,2)
62 C is the macroblock to the top-right (3)
63 D is the macroblock to the top-left (0)
65 the same is repeated for backward motion vectors */
70 /** luma pred mode cache
76 int l_stride, c_stride;
82 /** intra prediction is done with un-deblocked samples
83 they are saved here before deblocking the MB */
84 uint8_t *top_border_y, *top_border_u, *top_border_v;
85 uint8_t left_border_y[16], left_border_u[10], left_border_v[10];
86 uint8_t topleft_border_y, topleft_border_u, topleft_border_v;
88 void (*intra_pred_l[8])(uint8_t *d,uint8_t *top,uint8_t *left,int stride);
89 void (*intra_pred_c[7])(uint8_t *d,uint8_t *top,uint8_t *left,int stride);
90 uint8_t *col_type_base;
93 /* scaling factors for MV prediction */
94 int sym_factor; ///< for scaling in symmetrical B block
95 int direct_den[2]; ///< for scaling in direct B block
96 int scale_den[2]; ///< for scaling neighbouring MVs
101 /*****************************************************************************
103 * in-loop deblocking filter
105 ****************************************************************************/
107 static inline int get_bs_p(vector_t *mvP, vector_t *mvQ) {
108 if((mvP->ref == REF_INTRA) || (mvQ->ref == REF_INTRA))
110 if(mvP->ref != mvQ->ref)
112 if( (abs(mvP->x - mvQ->x) >= 4) || (abs(mvP->y - mvQ->y) >= 4) )
117 static inline int get_bs_b(vector_t *mvP, vector_t *mvQ) {
118 if((mvP->ref == REF_INTRA) || (mvQ->ref == REF_INTRA)) {
121 vector_t *mvPbw = mvP + MV_BWD_OFFS;
122 vector_t *mvQbw = mvQ + MV_BWD_OFFS;
123 if( (abs( mvP->x - mvQ->x) >= 4) ||
124 (abs( mvP->y - mvQ->y) >= 4) ||
125 (abs(mvPbw->x - mvQbw->x) >= 4) ||
126 (abs(mvPbw->y - mvQbw->y) >= 4) )
133 alpha = alpha_tab[clip(qp_avg + h->alpha_offset,0,63)]; \
134 beta = beta_tab[clip(qp_avg + h->beta_offset, 0,63)]; \
135 tc = tc_tab[clip(qp_avg + h->alpha_offset,0,63)];
138 * in-loop deblocking filter for a single macroblock
140 * boundary strength (bs) mapping:
149 static void filter_mb(AVSContext *h, enum mb_t mb_type) {
150 DECLARE_ALIGNED_8(uint8_t, bs[8]);
151 int qp_avg, alpha, beta, tc;
154 /* save un-deblocked lines */
155 h->topleft_border_y = h->top_border_y[h->mbx*16+15];
156 h->topleft_border_u = h->top_border_u[h->mbx*10+8];
157 h->topleft_border_v = h->top_border_v[h->mbx*10+8];
158 memcpy(&h->top_border_y[h->mbx*16], h->cy + 15* h->l_stride,16);
159 memcpy(&h->top_border_u[h->mbx*10+1], h->cu + 7* h->c_stride,8);
160 memcpy(&h->top_border_v[h->mbx*10+1], h->cv + 7* h->c_stride,8);
162 h->left_border_y[i*2+0] = *(h->cy + 15 + (i*2+0)*h->l_stride);
163 h->left_border_y[i*2+1] = *(h->cy + 15 + (i*2+1)*h->l_stride);
164 h->left_border_u[i+1] = *(h->cu + 7 + i*h->c_stride);
165 h->left_border_v[i+1] = *(h->cv + 7 + i*h->c_stride);
167 if(!h->loop_filter_disable) {
169 *((uint64_t *)bs) = 0;
173 *((uint64_t *)bs) = 0x0202020202020202ULL;
177 bs[2] = get_bs_p(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]);
178 bs[3] = get_bs_p(&h->mv[MV_FWD_X2], &h->mv[MV_FWD_X3]);
180 bs[6] = get_bs_p(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]);
181 bs[7] = get_bs_p(&h->mv[MV_FWD_X1], &h->mv[MV_FWD_X3]);
184 bs[0] = get_bs_p(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]);
185 bs[1] = get_bs_p(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]);
186 bs[4] = get_bs_p(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]);
187 bs[5] = get_bs_p(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]);
192 bs[2] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]);
193 bs[3] = get_bs_b(&h->mv[MV_FWD_X2], &h->mv[MV_FWD_X3]);
194 bs[6] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]);
195 bs[7] = get_bs_b(&h->mv[MV_FWD_X1], &h->mv[MV_FWD_X3]);
199 bs[0] = get_bs_b(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]);
200 bs[1] = get_bs_b(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]);
201 bs[4] = get_bs_b(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]);
202 bs[5] = get_bs_b(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]);
205 if(mb_type & 1) { //16X8
206 bs[6] = bs[7] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]);
208 bs[2] = bs[3] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]);
210 bs[0] = get_bs_b(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]);
211 bs[1] = get_bs_b(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]);
212 bs[4] = get_bs_b(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]);
213 bs[5] = get_bs_b(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]);
215 if( *((uint64_t *)bs) ) {
216 if(h->flags & A_AVAIL) {
217 qp_avg = (h->qp + h->left_qp + 1) >> 1;
219 h->s.dsp.cavs_filter_lv(h->cy,h->l_stride,alpha,beta,tc,bs[0],bs[1]);
220 h->s.dsp.cavs_filter_cv(h->cu,h->c_stride,alpha,beta,tc,bs[0],bs[1]);
221 h->s.dsp.cavs_filter_cv(h->cv,h->c_stride,alpha,beta,tc,bs[0],bs[1]);
225 h->s.dsp.cavs_filter_lv(h->cy + 8,h->l_stride,alpha,beta,tc,bs[2],bs[3]);
226 h->s.dsp.cavs_filter_lh(h->cy + 8*h->l_stride,h->l_stride,alpha,beta,tc,
229 if(h->flags & B_AVAIL) {
230 qp_avg = (h->qp + h->top_qp[h->mbx] + 1) >> 1;
232 h->s.dsp.cavs_filter_lh(h->cy,h->l_stride,alpha,beta,tc,bs[4],bs[5]);
233 h->s.dsp.cavs_filter_ch(h->cu,h->c_stride,alpha,beta,tc,bs[4],bs[5]);
234 h->s.dsp.cavs_filter_ch(h->cv,h->c_stride,alpha,beta,tc,bs[4],bs[5]);
239 h->top_qp[h->mbx] = h->qp;
244 /*****************************************************************************
246 * spatial intra prediction
248 ****************************************************************************/
250 static inline void load_intra_pred_luma(AVSContext *h, uint8_t *top,
251 uint8_t *left, int block) {
256 memcpy(&left[1],h->left_border_y,16);
259 memcpy(&top[1],&h->top_border_y[h->mbx*16],16);
262 if((h->flags & A_AVAIL) && (h->flags & B_AVAIL))
263 left[0] = top[0] = h->topleft_border_y;
267 left[i+1] = *(h->cy + 7 + i*h->l_stride);
268 memset(&left[9],left[8],9);
270 memcpy(&top[1],&h->top_border_y[h->mbx*16+8],8);
271 if(h->flags & C_AVAIL)
272 memcpy(&top[9],&h->top_border_y[(h->mbx + 1)*16],8);
274 memset(&top[9],top[8],9);
277 if(h->flags & B_AVAIL)
278 left[0] = top[0] = h->top_border_y[h->mbx*16+7];
281 memcpy(&left[1],&h->left_border_y[8],8);
282 memset(&left[9],left[8],9);
283 memcpy(&top[1],h->cy + 7*h->l_stride,16);
285 left[0] = h->left_border_y[7];
287 if(h->flags & A_AVAIL)
292 left[i] = *(h->cy + 7 + (i+7)*h->l_stride);
293 memset(&left[9],left[8],9);
294 memcpy(&top[0],h->cy + 7 + 7*h->l_stride,9);
295 memset(&top[9],top[8],9);
300 static void intra_pred_vert(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
302 uint64_t a = *((uint64_t *)(&top[1]));
304 *((uint64_t *)(d+y*stride)) = a;
308 static void intra_pred_horiz(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
312 a = left[y+1] * 0x0101010101010101ULL;
313 *((uint64_t *)(d+y*stride)) = a;
317 static void intra_pred_dc_128(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
319 uint64_t a = 0x8080808080808080ULL;
321 *((uint64_t *)(d+y*stride)) = a;
324 static void intra_pred_plane(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
328 uint8_t *cm = cropTbl + MAX_NEG_CROP;
331 ih += (x+1)*(top[5+x]-top[3-x]);
332 iv += (x+1)*(left[5+x]-left[3-x]);
334 ia = (top[8]+left[8])<<4;
339 d[y*stride+x] = cm[(ia+(x-3)*ih+(y-3)*iv+16)>>5];
342 #define LOWPASS(ARRAY,INDEX) \
343 (( ARRAY[(INDEX)-1] + 2*ARRAY[(INDEX)] + ARRAY[(INDEX)+1] + 2) >> 2)
345 static void intra_pred_lp(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
349 d[y*stride+x] = (LOWPASS(top,x+1) + LOWPASS(left,y+1)) >> 1;
352 static void intra_pred_down_left(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
356 d[y*stride+x] = (LOWPASS(top,x+y+2) + LOWPASS(left,x+y+2)) >> 1;
359 static void intra_pred_down_right(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
364 d[y*stride+x] = (left[1]+2*top[0]+top[1]+2)>>2;
366 d[y*stride+x] = LOWPASS(top,x-y);
368 d[y*stride+x] = LOWPASS(left,y-x);
371 static void intra_pred_lp_left(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
375 d[y*stride+x] = LOWPASS(left,y+1);
378 static void intra_pred_lp_top(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
382 d[y*stride+x] = LOWPASS(top,x+1);
387 static inline void modify_pred(const int_fast8_t *mod_table, int *mode) {
388 int newmode = mod_table[*mode];
390 av_log(NULL, AV_LOG_ERROR, "Illegal intra prediction mode\n");
397 /*****************************************************************************
399 * motion compensation
401 ****************************************************************************/
403 static inline void mc_dir_part(AVSContext *h,Picture *pic,int square,
404 int chroma_height,int delta,int list,uint8_t *dest_y,
405 uint8_t *dest_cb,uint8_t *dest_cr,int src_x_offset,
406 int src_y_offset,qpel_mc_func *qpix_op,
407 h264_chroma_mc_func chroma_op,vector_t *mv){
408 MpegEncContext * const s = &h->s;
409 const int mx= mv->x + src_x_offset*8;
410 const int my= mv->y + src_y_offset*8;
411 const int luma_xy= (mx&3) + ((my&3)<<2);
412 uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*h->l_stride;
413 uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*h->c_stride;
414 uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*h->c_stride;
415 int extra_width= 0; //(s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16;
416 int extra_height= extra_width;
418 const int full_mx= mx>>2;
419 const int full_my= my>>2;
420 const int pic_width = 16*h->mb_width;
421 const int pic_height = 16*h->mb_height;
425 if(mx&7) extra_width -= 3;
426 if(my&7) extra_height -= 3;
428 if( full_mx < 0-extra_width
429 || full_my < 0-extra_height
430 || full_mx + 16/*FIXME*/ > pic_width + extra_width
431 || full_my + 16/*FIXME*/ > pic_height + extra_height){
432 ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*h->l_stride, h->l_stride,
433 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
434 src_y= s->edge_emu_buffer + 2 + 2*h->l_stride;
438 qpix_op[luma_xy](dest_y, src_y, h->l_stride); //FIXME try variable height perhaps?
440 qpix_op[luma_xy](dest_y + delta, src_y + delta, h->l_stride);
444 ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, h->c_stride,
445 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
446 src_cb= s->edge_emu_buffer;
448 chroma_op(dest_cb, src_cb, h->c_stride, chroma_height, mx&7, my&7);
451 ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, h->c_stride,
452 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
453 src_cr= s->edge_emu_buffer;
455 chroma_op(dest_cr, src_cr, h->c_stride, chroma_height, mx&7, my&7);
458 static inline void mc_part_std(AVSContext *h,int square,int chroma_height,int delta,
459 uint8_t *dest_y,uint8_t *dest_cb,uint8_t *dest_cr,
460 int x_offset, int y_offset,qpel_mc_func *qpix_put,
461 h264_chroma_mc_func chroma_put,qpel_mc_func *qpix_avg,
462 h264_chroma_mc_func chroma_avg, vector_t *mv){
463 qpel_mc_func *qpix_op= qpix_put;
464 h264_chroma_mc_func chroma_op= chroma_put;
466 dest_y += 2*x_offset + 2*y_offset*h->l_stride;
467 dest_cb += x_offset + y_offset*h->c_stride;
468 dest_cr += x_offset + y_offset*h->c_stride;
469 x_offset += 8*h->mbx;
470 y_offset += 8*h->mby;
473 Picture *ref= &h->DPB[mv->ref];
474 mc_dir_part(h, ref, square, chroma_height, delta, 0,
475 dest_y, dest_cb, dest_cr, x_offset, y_offset,
476 qpix_op, chroma_op, mv);
479 chroma_op= chroma_avg;
482 if((mv+MV_BWD_OFFS)->ref >= 0){
483 Picture *ref= &h->DPB[0];
484 mc_dir_part(h, ref, square, chroma_height, delta, 1,
485 dest_y, dest_cb, dest_cr, x_offset, y_offset,
486 qpix_op, chroma_op, mv+MV_BWD_OFFS);
490 static void inter_pred(AVSContext *h) {
491 /* always do 8x8 blocks TODO: are larger blocks worth it? */
492 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 0, 0,
493 h->s.dsp.put_cavs_qpel_pixels_tab[1],
494 h->s.dsp.put_h264_chroma_pixels_tab[1],
495 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
496 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X0]);
497 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 4, 0,
498 h->s.dsp.put_cavs_qpel_pixels_tab[1],
499 h->s.dsp.put_h264_chroma_pixels_tab[1],
500 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
501 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X1]);
502 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 0, 4,
503 h->s.dsp.put_cavs_qpel_pixels_tab[1],
504 h->s.dsp.put_h264_chroma_pixels_tab[1],
505 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
506 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X2]);
507 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 4, 4,
508 h->s.dsp.put_cavs_qpel_pixels_tab[1],
509 h->s.dsp.put_h264_chroma_pixels_tab[1],
510 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
511 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X3]);
512 /* set intra prediction modes to default values */
513 h->pred_mode_Y[3] = h->pred_mode_Y[6] = INTRA_L_LP;
514 h->top_pred_Y[h->mbx*2+0] = h->top_pred_Y[h->mbx*2+1] = INTRA_L_LP;
517 /*****************************************************************************
519 * motion vector prediction
521 ****************************************************************************/
523 static inline void set_mvs(vector_t *mv, enum block_t size) {
526 mv[MV_STRIDE ] = mv[0];
527 mv[MV_STRIDE+1] = mv[0];
532 mv[MV_STRIDE] = mv[0];
537 static inline void store_mvs(AVSContext *h) {
538 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 0] = h->mv[MV_FWD_X0];
539 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 1] = h->mv[MV_FWD_X1];
540 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 2] = h->mv[MV_FWD_X2];
541 h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 3] = h->mv[MV_FWD_X3];
544 static inline void scale_mv(AVSContext *h, int *d_x, int *d_y, vector_t *src, int distp) {
545 int den = h->scale_den[src->ref];
547 *d_x = (src->x*distp*den + 256 + (src->x>>31)) >> 9;
548 *d_y = (src->y*distp*den + 256 + (src->y>>31)) >> 9;
551 static inline void mv_pred_median(AVSContext *h, vector_t *mvP, vector_t *mvA, vector_t *mvB, vector_t *mvC) {
552 int ax, ay, bx, by, cx, cy;
553 int len_ab, len_bc, len_ca, len_mid;
555 /* scale candidates according to their temporal span */
556 scale_mv(h, &ax, &ay, mvA, mvP->dist);
557 scale_mv(h, &bx, &by, mvB, mvP->dist);
558 scale_mv(h, &cx, &cy, mvC, mvP->dist);
559 /* find the geometrical median of the three candidates */
560 len_ab = abs(ax - bx) + abs(ay - by);
561 len_bc = abs(bx - cx) + abs(by - cy);
562 len_ca = abs(cx - ax) + abs(cy - ay);
563 len_mid = mid_pred(len_ab, len_bc, len_ca);
564 if(len_mid == len_ab) {
567 } else if(len_mid == len_bc) {
576 static inline void mv_pred_direct(AVSContext *h, vector_t *pmv_fw,
577 vector_t *pmv_bw, vector_t *col_mv) {
578 int den = h->direct_den[col_mv->ref];
579 int m = col_mv->x >> 31;
581 pmv_fw->dist = h->dist[1];
582 pmv_bw->dist = h->dist[0];
585 /* scale the co-located motion vector according to its temporal span */
586 pmv_fw->x = (((den+(den*col_mv->x*pmv_fw->dist^m)-m-1)>>14)^m)-m;
587 pmv_bw->x = m-(((den+(den*col_mv->x*pmv_bw->dist^m)-m-1)>>14)^m);
589 pmv_fw->y = (((den+(den*col_mv->y*pmv_fw->dist^m)-m-1)>>14)^m)-m;
590 pmv_bw->y = m-(((den+(den*col_mv->y*pmv_bw->dist^m)-m-1)>>14)^m);
593 static inline void mv_pred_sym(AVSContext *h, vector_t *src, enum block_t size) {
594 vector_t *dst = src + MV_BWD_OFFS;
596 /* backward mv is the scaled and negated forward mv */
597 dst->x = -((src->x * h->sym_factor + 256) >> 9);
598 dst->y = -((src->y * h->sym_factor + 256) >> 9);
600 dst->dist = h->dist[0];
604 static void mv_pred(AVSContext *h, enum mv_loc_t nP, enum mv_loc_t nC,
605 enum mv_pred_t mode, enum block_t size, int ref) {
606 vector_t *mvP = &h->mv[nP];
607 vector_t *mvA = &h->mv[nP-1];
608 vector_t *mvB = &h->mv[nP-4];
609 vector_t *mvC = &h->mv[nC];
610 int mvAref = mvA->ref;
611 int mvBref = mvB->ref;
615 mvP->dist = h->dist[mvP->ref];
616 if(mvC->ref == NOT_AVAIL)
617 mvC = &h->mv[nP-5]; // set to top-left (mvD)
619 if(mode == MV_PRED_PSKIP) {
620 if((mvAref == NOT_AVAIL) || (mvBref == NOT_AVAIL) ||
621 ((mvA->x | mvA->y | mvA->ref) == 0) ||
622 ((mvB->x | mvB->y | mvB->ref) == 0) ) {
628 /* if there is only one suitable candidate, take it */
629 if((mvAref >= 0) && (mvBref < 0) && (mvCref < 0)) {
632 } else if((mvAref < 0) && (mvBref >= 0) && (mvCref < 0)) {
635 } else if((mvAref < 0) && (mvBref < 0) && (mvCref >= 0)) {
641 if(mvAref == mvP->ref) {
645 mv_pred_median(h, mvP, mvA, mvB, mvC);
648 if(mvBref == mvP->ref) {
652 mv_pred_median(h, mvP, mvA, mvB, mvC);
654 case MV_PRED_TOPRIGHT:
655 if(mvCref == mvP->ref) {
659 mv_pred_median(h, mvP, mvA, mvB, mvC);
662 mv_pred_median(h, mvP, mvA, mvB, mvC);
666 if(mode < MV_PRED_PSKIP) {
667 mvP->x += get_se_golomb(&h->s.gb);
668 mvP->y += get_se_golomb(&h->s.gb);
673 /*****************************************************************************
675 * residual data decoding
677 ****************************************************************************/
679 /** kth-order exponential golomb code */
680 static inline int get_ue_code(GetBitContext *gb, int order) {
682 int ret = get_ue_golomb(gb) << order;
683 return ret + get_bits(gb,order);
685 return get_ue_golomb(gb);
689 * decode coefficients from one 8x8 block, dequantize, inverse transform
690 * and add them to sample block
691 * @param r pointer to 2D VLC table
692 * @param esc_golomb_order escape codes are k-golomb with this order k
693 * @param qp quantizer
694 * @param dst location of sample block
695 * @param stride line stride in frame buffer
697 static int decode_residual_block(AVSContext *h, GetBitContext *gb,
698 const residual_vlc_t *r, int esc_golomb_order,
699 int qp, uint8_t *dst, int stride) {
701 int level_code, esc_code, level, run, mask;
704 int dqm = dequant_mul[qp];
705 int dqs = dequant_shift[qp];
706 int dqa = 1 << (dqs - 1);
707 const uint8_t *scantab = ff_zigzag_direct;
710 memset(block,0,64*sizeof(DCTELEM));
712 level_code = get_ue_code(gb,r->golomb_order);
713 if(level_code >= ESCAPE_CODE) {
714 run = (level_code - ESCAPE_CODE) >> 1;
715 esc_code = get_ue_code(gb,esc_golomb_order);
716 level = esc_code + (run > r->max_run ? 1 : r->level_add[run]);
717 while(level > r->inc_limit)
719 mask = -(level_code & 1);
720 level = (level^mask) - mask;
724 level = r->rltab[level_code][0];
725 if(!level) //end of block signal
727 run = r->rltab[level_code][1];
728 r += r->rltab[level_code][2];
730 level_buf[i] = level;
733 /* inverse scan and dequantization */
735 pos += 1 + run_buf[i];
737 av_log(h->s.avctx, AV_LOG_ERROR,
738 "position out of block bounds at pic %d MB(%d,%d)\n",
739 h->picture.poc, h->mbx, h->mby);
742 block[scantab[pos]] = (level_buf[i]*dqm + dqa) >> dqs;
744 h->s.dsp.cavs_idct8_add(dst,block,stride);
749 static inline void decode_residual_chroma(AVSContext *h) {
751 decode_residual_block(h,&h->s.gb,chroma_2dvlc,0, chroma_qp[h->qp],
754 decode_residual_block(h,&h->s.gb,chroma_2dvlc,0, chroma_qp[h->qp],
758 static inline void decode_residual_inter(AVSContext *h) {
761 /* get coded block pattern */
762 h->cbp = cbp_tab[get_ue_golomb(&h->s.gb)][1];
764 if(h->cbp && !h->qp_fixed)
765 h->qp += get_se_golomb(&h->s.gb);
766 for(block=0;block<4;block++)
767 if(h->cbp & (1<<block))
768 decode_residual_block(h,&h->s.gb,inter_2dvlc,0,h->qp,
769 h->cy + h->luma_scan[block], h->l_stride);
770 decode_residual_chroma(h);
773 /*****************************************************************************
777 ****************************************************************************/
780 * initialise predictors for motion vectors and intra prediction
782 static inline void init_mb(AVSContext *h) {
785 /* copy predictors from top line (MB B and C) into cache */
787 h->mv[MV_FWD_B2+i] = h->top_mv[0][h->mbx*2+i];
788 h->mv[MV_BWD_B2+i] = h->top_mv[1][h->mbx*2+i];
790 h->pred_mode_Y[1] = h->top_pred_Y[h->mbx*2+0];
791 h->pred_mode_Y[2] = h->top_pred_Y[h->mbx*2+1];
792 /* clear top predictors if MB B is not available */
793 if(!(h->flags & B_AVAIL)) {
794 h->mv[MV_FWD_B2] = un_mv;
795 h->mv[MV_FWD_B3] = un_mv;
796 h->mv[MV_BWD_B2] = un_mv;
797 h->mv[MV_BWD_B3] = un_mv;
798 h->pred_mode_Y[1] = h->pred_mode_Y[2] = NOT_AVAIL;
799 h->flags &= ~(C_AVAIL|D_AVAIL);
803 if(h->mbx == h->mb_width-1) //MB C not available
804 h->flags &= ~C_AVAIL;
805 /* clear top-right predictors if MB C is not available */
806 if(!(h->flags & C_AVAIL)) {
807 h->mv[MV_FWD_C2] = un_mv;
808 h->mv[MV_BWD_C2] = un_mv;
810 /* clear top-left predictors if MB D is not available */
811 if(!(h->flags & D_AVAIL)) {
812 h->mv[MV_FWD_D3] = un_mv;
813 h->mv[MV_BWD_D3] = un_mv;
815 /* set pointer for co-located macroblock type */
816 h->col_type = &h->col_type_base[h->mby*h->mb_width + h->mbx];
819 static inline void check_for_slice(AVSContext *h);
822 * save predictors for later macroblocks and increase
824 * @returns 0 if end of frame is reached, 1 otherwise
826 static inline int next_mb(AVSContext *h) {
833 /* copy mvs as predictors to the left */
835 h->mv[i] = h->mv[i+2];
836 /* copy bottom mvs from cache to top line */
837 h->top_mv[0][h->mbx*2+0] = h->mv[MV_FWD_X2];
838 h->top_mv[0][h->mbx*2+1] = h->mv[MV_FWD_X3];
839 h->top_mv[1][h->mbx*2+0] = h->mv[MV_BWD_X2];
840 h->top_mv[1][h->mbx*2+1] = h->mv[MV_BWD_X3];
841 /* next MB address */
843 if(h->mbx == h->mb_width) { //new mb line
844 h->flags = B_AVAIL|C_AVAIL;
845 /* clear left pred_modes */
846 h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
847 /* clear left mv predictors */
852 /* re-calculate sample pointers */
853 h->cy = h->picture.data[0] + h->mby*16*h->l_stride;
854 h->cu = h->picture.data[1] + h->mby*8*h->c_stride;
855 h->cv = h->picture.data[2] + h->mby*8*h->c_stride;
856 if(h->mby == h->mb_height) { //frame end
859 //check_for_slice(h);
865 static void decode_mb_i(AVSContext *h, int is_i_pic) {
866 GetBitContext *gb = &h->s.gb;
867 int block, pred_mode_uv;
872 /* get intra prediction modes from stream */
873 for(block=0;block<4;block++) {
875 int pos = scan3x3[block];
877 nA = h->pred_mode_Y[pos-1];
878 nB = h->pred_mode_Y[pos-3];
879 if((nA == NOT_AVAIL) || (nB == NOT_AVAIL))
882 predpred = FFMIN(nA,nB);
884 h->pred_mode_Y[pos] = predpred;
886 h->pred_mode_Y[pos] = get_bits(gb,2);
887 if(h->pred_mode_Y[pos] >= predpred)
888 h->pred_mode_Y[pos]++;
891 pred_mode_uv = get_ue_golomb(gb);
892 if(pred_mode_uv > 6) {
893 av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra chroma pred mode\n");
897 /* save pred modes before they get modified */
898 h->pred_mode_Y[3] = h->pred_mode_Y[5];
899 h->pred_mode_Y[6] = h->pred_mode_Y[8];
900 h->top_pred_Y[h->mbx*2+0] = h->pred_mode_Y[7];
901 h->top_pred_Y[h->mbx*2+1] = h->pred_mode_Y[8];
903 /* modify pred modes according to availability of neighbour samples */
904 if(!(h->flags & A_AVAIL)) {
905 modify_pred(left_modifier_l, &h->pred_mode_Y[4] );
906 modify_pred(left_modifier_l, &h->pred_mode_Y[7] );
907 modify_pred(left_modifier_c, &pred_mode_uv );
909 if(!(h->flags & B_AVAIL)) {
910 modify_pred(top_modifier_l, &h->pred_mode_Y[4] );
911 modify_pred(top_modifier_l, &h->pred_mode_Y[5] );
912 modify_pred(top_modifier_c, &pred_mode_uv );
915 /* get coded block pattern */
917 h->cbp = cbp_tab[get_ue_golomb(gb)][0];
918 if(h->cbp && !h->qp_fixed)
919 h->qp += get_se_golomb(gb); //qp_delta
921 /* luma intra prediction interleaved with residual decode/transform/add */
922 for(block=0;block<4;block++) {
923 d = h->cy + h->luma_scan[block];
924 load_intra_pred_luma(h, top, left, block);
925 h->intra_pred_l[h->pred_mode_Y[scan3x3[block]]]
926 (d, top, left, h->l_stride);
927 if(h->cbp & (1<<block))
928 decode_residual_block(h,gb,intra_2dvlc,1,h->qp,d,h->l_stride);
931 /* chroma intra prediction */
932 /* extend borders by one pixel */
933 h->left_border_u[9] = h->left_border_u[8];
934 h->left_border_v[9] = h->left_border_v[8];
935 h->top_border_u[h->mbx*10+9] = h->top_border_u[h->mbx*10+8];
936 h->top_border_v[h->mbx*10+9] = h->top_border_v[h->mbx*10+8];
937 if(h->mbx && h->mby) {
938 h->top_border_u[h->mbx*10] = h->left_border_u[0] = h->topleft_border_u;
939 h->top_border_v[h->mbx*10] = h->left_border_v[0] = h->topleft_border_v;
941 h->left_border_u[0] = h->left_border_u[1];
942 h->left_border_v[0] = h->left_border_v[1];
943 h->top_border_u[h->mbx*10] = h->top_border_u[h->mbx*10+1];
944 h->top_border_v[h->mbx*10] = h->top_border_v[h->mbx*10+1];
946 h->intra_pred_c[pred_mode_uv](h->cu, &h->top_border_u[h->mbx*10],
947 h->left_border_u, h->c_stride);
948 h->intra_pred_c[pred_mode_uv](h->cv, &h->top_border_v[h->mbx*10],
949 h->left_border_v, h->c_stride);
951 decode_residual_chroma(h);
954 /* mark motion vectors as intra */
955 h->mv[MV_FWD_X0] = intra_mv;
956 set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
957 h->mv[MV_BWD_X0] = intra_mv;
958 set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
959 if(h->pic_type != FF_B_TYPE)
960 *h->col_type = I_8X8;
963 static void mb_skip_p(AVSContext *h) {
964 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_PSKIP, BLK_16X16, 0);
968 *h->col_type = P_SKIP;
972 static void mb_skip_b(AVSContext *h) {
975 if(!(*h->col_type)) {
976 /* intra MB at co-location, do in-plane prediction */
977 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_BSKIP, BLK_16X16, 1);
978 mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_BSKIP, BLK_16X16, 0);
980 /* direct prediction from co-located P MB, block-wise */
982 mv_pred_direct(h,&h->mv[mv_scan[i]],
983 &h->mv[mv_scan[i]+MV_BWD_OFFS],
984 &h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + i]);
988 static void decode_mb_p(AVSContext *h, enum mb_t mb_type) {
989 GetBitContext *gb = &h->s.gb;
997 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
998 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16,ref[0]);
1001 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
1002 ref[2] = h->ref_flag ? 0 : get_bits1(gb);
1003 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, ref[0]);
1004 mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, ref[2]);
1007 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
1008 ref[1] = h->ref_flag ? 0 : get_bits1(gb);
1009 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, ref[0]);
1010 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT, BLK_8X16, ref[1]);
1013 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
1014 ref[1] = h->ref_flag ? 0 : get_bits1(gb);
1015 ref[2] = h->ref_flag ? 0 : get_bits1(gb);
1016 ref[3] = h->ref_flag ? 0 : get_bits1(gb);
1017 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_MEDIAN, BLK_8X8, ref[0]);
1018 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_MEDIAN, BLK_8X8, ref[1]);
1019 mv_pred(h, MV_FWD_X2, MV_FWD_X1, MV_PRED_MEDIAN, BLK_8X8, ref[2]);
1020 mv_pred(h, MV_FWD_X3, MV_FWD_X0, MV_PRED_MEDIAN, BLK_8X8, ref[3]);
1024 decode_residual_inter(h);
1025 filter_mb(h,mb_type);
1026 *h->col_type = mb_type;
1029 static void decode_mb_b(AVSContext *h, enum mb_t mb_type) {
1031 enum sub_mb_t sub_type[4];
1035 h->mv[MV_FWD_X0] = dir_mv;
1036 set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
1037 h->mv[MV_BWD_X0] = dir_mv;
1038 set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
1043 filter_mb(h,B_SKIP);
1049 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1);
1052 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1);
1053 mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X16);
1056 mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_MEDIAN, BLK_16X16, 0);
1059 for(block=0;block<4;block++)
1060 sub_type[block] = get_bits(&h->s.gb,2);
1061 for(block=0;block<4;block++) {
1062 switch(sub_type[block]) {
1064 if(!(*h->col_type)) {
1065 /* intra MB at co-location, do in-plane prediction */
1066 mv_pred(h, mv_scan[block], mv_scan[block]-3,
1067 MV_PRED_BSKIP, BLK_8X8, 1);
1068 mv_pred(h, mv_scan[block]+MV_BWD_OFFS,
1069 mv_scan[block]-3+MV_BWD_OFFS,
1070 MV_PRED_BSKIP, BLK_8X8, 0);
1072 mv_pred_direct(h,&h->mv[mv_scan[block]],
1073 &h->mv[mv_scan[block]+MV_BWD_OFFS],
1074 &h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + block]);
1077 mv_pred(h, mv_scan[block], mv_scan[block]-3,
1078 MV_PRED_MEDIAN, BLK_8X8, 1);
1081 mv_pred(h, mv_scan[block], mv_scan[block]-3,
1082 MV_PRED_MEDIAN, BLK_8X8, 1);
1083 mv_pred_sym(h, &h->mv[mv_scan[block]], BLK_8X8);
1087 for(block=0;block<4;block++) {
1088 if(sub_type[block] == B_SUB_BWD)
1089 mv_pred(h, mv_scan[block]+MV_BWD_OFFS,
1090 mv_scan[block]+MV_BWD_OFFS-3,
1091 MV_PRED_MEDIAN, BLK_8X8, 0);
1095 assert((mb_type > B_SYM_16X16) && (mb_type < B_8X8));
1096 flags = b_partition_flags[(mb_type-1)>>1];
1097 if(mb_type & 1) { /* 16x8 macroblock types */
1099 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, 1);
1101 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, 1);
1102 mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X8);
1105 mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, 1);
1107 mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, 1);
1108 mv_pred_sym(h, &h->mv[9], BLK_16X8);
1111 mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_TOP, BLK_16X8, 0);
1113 mv_pred(h, MV_BWD_X2, MV_BWD_A1, MV_PRED_LEFT, BLK_16X8, 0);
1114 } else { /* 8x16 macroblock types */
1116 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, 1);
1118 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, 1);
1119 mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_8X16);
1122 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 1);
1124 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 1);
1125 mv_pred_sym(h, &h->mv[6], BLK_8X16);
1128 mv_pred(h, MV_BWD_X0, MV_BWD_B3, MV_PRED_LEFT, BLK_8X16, 0);
1130 mv_pred(h, MV_BWD_X1, MV_BWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 0);
1134 decode_residual_inter(h);
1135 filter_mb(h,mb_type);
1138 /*****************************************************************************
1142 ****************************************************************************/
1144 static inline int decode_slice_header(AVSContext *h, GetBitContext *gb) {
1146 av_log(h->s.avctx, AV_LOG_ERROR, "unexpected start code 0x%02x\n", h->stc);
1148 if((h->mby == 0) && (!h->qp_fixed)){
1149 h->qp_fixed = get_bits1(gb);
1150 h->qp = get_bits(gb,6);
1152 /* inter frame or second slice can have weighting params */
1153 if((h->pic_type != FF_I_TYPE) || (!h->pic_structure && h->mby >= h->mb_width/2))
1154 if(get_bits1(gb)) { //slice_weighting_flag
1155 av_log(h->s.avctx, AV_LOG_ERROR,
1156 "weighted prediction not yet supported\n");
1161 static inline void check_for_slice(AVSContext *h) {
1162 GetBitContext *gb = &h->s.gb;
1164 align = (-get_bits_count(gb)) & 7;
1165 if((show_bits_long(gb,24+align) & 0xFFFFFF) == 0x000001) {
1166 get_bits_long(gb,24+align);
1167 h->stc = get_bits(gb,8);
1168 decode_slice_header(h,gb);
1172 /*****************************************************************************
1176 ****************************************************************************/
1178 static void init_pic(AVSContext *h) {
1181 /* clear some predictors */
1184 h->mv[MV_BWD_X0] = dir_mv;
1185 set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
1186 h->mv[MV_FWD_X0] = dir_mv;
1187 set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
1188 h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
1189 h->cy = h->picture.data[0];
1190 h->cu = h->picture.data[1];
1191 h->cv = h->picture.data[2];
1192 h->l_stride = h->picture.linesize[0];
1193 h->c_stride = h->picture.linesize[1];
1194 h->luma_scan[2] = 8*h->l_stride;
1195 h->luma_scan[3] = 8*h->l_stride+8;
1196 h->mbx = h->mby = 0;
1200 static int decode_pic(AVSContext *h) {
1201 MpegEncContext *s = &h->s;
1205 if (!s->context_initialized) {
1206 if (MPV_common_init(s) < 0)
1209 get_bits(&s->gb,16);//bbv_dwlay
1210 if(h->stc == PIC_PB_START_CODE) {
1211 h->pic_type = get_bits(&s->gb,2) + FF_I_TYPE;
1212 /* make sure we have the reference frames we need */
1213 if(!h->DPB[0].data[0] ||
1214 (!h->DPB[1].data[0] && h->pic_type == FF_B_TYPE))
1217 h->pic_type = FF_I_TYPE;
1218 if(get_bits1(&s->gb))
1219 get_bits(&s->gb,16);//time_code
1221 /* release last B frame */
1222 if(h->picture.data[0])
1223 s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture);
1225 s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture);
1227 h->picture.poc = get_bits(&s->gb,8)*2;
1229 /* get temporal distances and MV scaling factors */
1230 if(h->pic_type != FF_B_TYPE) {
1231 h->dist[0] = (h->picture.poc - h->DPB[0].poc + 512) % 512;
1233 h->dist[0] = (h->DPB[0].poc - h->picture.poc + 512) % 512;
1235 h->dist[1] = (h->picture.poc - h->DPB[1].poc + 512) % 512;
1236 h->scale_den[0] = h->dist[0] ? 512/h->dist[0] : 0;
1237 h->scale_den[1] = h->dist[1] ? 512/h->dist[1] : 0;
1238 if(h->pic_type == FF_B_TYPE) {
1239 h->sym_factor = h->dist[0]*h->scale_den[1];
1241 h->direct_den[0] = h->dist[0] ? 16384/h->dist[0] : 0;
1242 h->direct_den[1] = h->dist[1] ? 16384/h->dist[1] : 0;
1246 get_ue_golomb(&s->gb); //bbv_check_times
1247 h->progressive = get_bits1(&s->gb);
1249 h->pic_structure = 1;
1250 else if(!(h->pic_structure = get_bits1(&s->gb) && (h->stc == PIC_PB_START_CODE)) )
1251 get_bits1(&s->gb); //advanced_pred_mode_disable
1252 skip_bits1(&s->gb); //top_field_first
1253 skip_bits1(&s->gb); //repeat_first_field
1254 h->qp_fixed = get_bits1(&s->gb);
1255 h->qp = get_bits(&s->gb,6);
1256 if(h->pic_type == FF_I_TYPE) {
1257 if(!h->progressive && !h->pic_structure)
1258 skip_bits1(&s->gb);//what is this?
1259 skip_bits(&s->gb,4); //reserved bits
1261 if(!(h->pic_type == FF_B_TYPE && h->pic_structure == 1))
1262 h->ref_flag = get_bits1(&s->gb);
1263 skip_bits(&s->gb,4); //reserved bits
1264 h->skip_mode_flag = get_bits1(&s->gb);
1266 h->loop_filter_disable = get_bits1(&s->gb);
1267 if(!h->loop_filter_disable && get_bits1(&s->gb)) {
1268 h->alpha_offset = get_se_golomb(&s->gb);
1269 h->beta_offset = get_se_golomb(&s->gb);
1271 h->alpha_offset = h->beta_offset = 0;
1274 if(h->pic_type == FF_I_TYPE) {
1278 } while(next_mb(h));
1279 } else if(h->pic_type == FF_P_TYPE) {
1281 if(h->skip_mode_flag) {
1282 skip_count = get_ue_golomb(&s->gb);
1283 for(i=0;i<skip_count;i++) {
1289 mb_type = get_ue_golomb(&s->gb) + P_16X16;
1291 mb_type = get_ue_golomb(&s->gb) + P_SKIP;
1293 if(mb_type > P_8X8) {
1294 h->cbp = cbp_tab[mb_type - P_8X8 - 1][0];
1297 decode_mb_p(h,mb_type);
1298 } while(next_mb(h));
1299 } else { /* FF_B_TYPE */
1301 if(h->skip_mode_flag) {
1302 skip_count = get_ue_golomb(&s->gb);
1303 for(i=0;i<skip_count;i++) {
1307 filter_mb(h,B_SKIP);
1311 mb_type = get_ue_golomb(&s->gb) + B_DIRECT;
1313 mb_type = get_ue_golomb(&s->gb) + B_SKIP;
1315 if(mb_type > B_8X8) {
1316 h->cbp = cbp_tab[mb_type - B_8X8 - 1][0];
1319 decode_mb_b(h,mb_type);
1320 } while(next_mb(h));
1323 if(h->pic_type != FF_B_TYPE) {
1324 if(h->DPB[1].data[0])
1325 s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]);
1326 memcpy(&h->DPB[1], &h->DPB[0], sizeof(Picture));
1327 memcpy(&h->DPB[0], &h->picture, sizeof(Picture));
1328 memset(&h->picture,0,sizeof(Picture));
1333 /*****************************************************************************
1335 * headers and interface
1337 ****************************************************************************/
1340 * some predictions require data from the top-neighbouring macroblock.
1341 * this data has to be stored for one complete row of macroblocks
1342 * and this storage space is allocated here
1344 static void init_top_lines(AVSContext *h) {
1345 /* alloc top line of predictors */
1346 h->top_qp = av_malloc( h->mb_width);
1347 h->top_mv[0] = av_malloc((h->mb_width*2+1)*sizeof(vector_t));
1348 h->top_mv[1] = av_malloc((h->mb_width*2+1)*sizeof(vector_t));
1349 h->top_pred_Y = av_malloc( h->mb_width*2*sizeof(*h->top_pred_Y));
1350 h->top_border_y = av_malloc((h->mb_width+1)*16);
1351 h->top_border_u = av_malloc((h->mb_width)*10);
1352 h->top_border_v = av_malloc((h->mb_width)*10);
1354 /* alloc space for co-located MVs and types */
1355 h->col_mv = av_malloc( h->mb_width*h->mb_height*4*sizeof(vector_t));
1356 h->col_type_base = av_malloc(h->mb_width*h->mb_height);
1359 static int decode_seq_header(AVSContext *h) {
1360 MpegEncContext *s = &h->s;
1361 extern const AVRational ff_frame_rate_tab[];
1362 int frame_rate_code;
1364 h->profile = get_bits(&s->gb,8);
1365 h->level = get_bits(&s->gb,8);
1366 skip_bits1(&s->gb); //progressive sequence
1367 s->width = get_bits(&s->gb,14);
1368 s->height = get_bits(&s->gb,14);
1369 skip_bits(&s->gb,2); //chroma format
1370 skip_bits(&s->gb,3); //sample_precision
1371 h->aspect_ratio = get_bits(&s->gb,4);
1372 frame_rate_code = get_bits(&s->gb,4);
1373 skip_bits(&s->gb,18);//bit_rate_lower
1374 skip_bits1(&s->gb); //marker_bit
1375 skip_bits(&s->gb,12);//bit_rate_upper
1376 s->low_delay = get_bits1(&s->gb);
1377 h->mb_width = (s->width + 15) >> 4;
1378 h->mb_height = (s->height + 15) >> 4;
1379 h->s.avctx->time_base.den = ff_frame_rate_tab[frame_rate_code].num;
1380 h->s.avctx->time_base.num = ff_frame_rate_tab[frame_rate_code].den;
1381 h->s.avctx->width = s->width;
1382 h->s.avctx->height = s->height;
1389 * finds the end of the current frame in the bitstream.
1390 * @return the position of the first byte of the next frame, or -1
1392 int ff_cavs_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size) {
1396 pic_found= pc->frame_start_found;
1401 for(i=0; i<buf_size; i++){
1402 state= (state<<8) | buf[i];
1403 if(state == PIC_I_START_CODE || state == PIC_PB_START_CODE){
1412 /* EOF considered as end of frame */
1415 for(; i<buf_size; i++){
1416 state= (state<<8) | buf[i];
1417 if((state&0xFFFFFF00) == 0x100){
1418 if(state < SLICE_MIN_START_CODE || state > SLICE_MAX_START_CODE){
1419 pc->frame_start_found=0;
1426 pc->frame_start_found= pic_found;
1428 return END_NOT_FOUND;
1431 void ff_cavs_flush(AVCodecContext * avctx) {
1432 AVSContext *h = avctx->priv_data;
1433 h->got_keyframe = 0;
1436 static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
1437 uint8_t * buf, int buf_size) {
1438 AVSContext *h = avctx->priv_data;
1439 MpegEncContext *s = &h->s;
1441 const uint8_t *buf_end;
1442 const uint8_t *buf_ptr;
1443 AVFrame *picture = data;
1448 if (buf_size == 0) {
1449 if(!s->low_delay && h->DPB[0].data[0]) {
1450 *data_size = sizeof(AVPicture);
1451 *picture = *(AVFrame *) &h->DPB[0];
1457 buf_end = buf + buf_size;
1459 buf_ptr = ff_find_start_code(buf_ptr,buf_end, &stc);
1460 if(stc & 0xFFFFFE00)
1461 return FFMAX(0, buf_ptr - buf - s->parse_context.last_index);
1462 input_size = (buf_end - buf_ptr)*8;
1464 case SEQ_START_CODE:
1465 init_get_bits(&s->gb, buf_ptr, input_size);
1466 decode_seq_header(h);
1468 case PIC_I_START_CODE:
1469 if(!h->got_keyframe) {
1470 if(h->DPB[0].data[0])
1471 avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]);
1472 if(h->DPB[1].data[0])
1473 avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]);
1474 h->got_keyframe = 1;
1476 case PIC_PB_START_CODE:
1478 if(!h->got_keyframe)
1480 init_get_bits(&s->gb, buf_ptr, input_size);
1484 *data_size = sizeof(AVPicture);
1485 if(h->pic_type != FF_B_TYPE) {
1486 if(h->DPB[1].data[0]) {
1487 *picture = *(AVFrame *) &h->DPB[1];
1492 *picture = *(AVFrame *) &h->picture;
1494 case EXT_START_CODE:
1495 //mpeg_decode_extension(avctx,buf_ptr, input_size);
1497 case USER_START_CODE:
1498 //mpeg_decode_user_data(avctx,buf_ptr, input_size);
1501 if (stc >= SLICE_MIN_START_CODE &&
1502 stc <= SLICE_MAX_START_CODE) {
1503 init_get_bits(&s->gb, buf_ptr, input_size);
1504 decode_slice_header(h, &s->gb);
1511 static int cavs_decode_init(AVCodecContext * avctx) {
1512 AVSContext *h = avctx->priv_data;
1513 MpegEncContext * const s = &h->s;
1515 MPV_decode_defaults(s);
1518 avctx->pix_fmt= PIX_FMT_YUV420P;
1520 h->luma_scan[0] = 0;
1521 h->luma_scan[1] = 8;
1522 h->intra_pred_l[ INTRA_L_VERT] = intra_pred_vert;
1523 h->intra_pred_l[ INTRA_L_HORIZ] = intra_pred_horiz;
1524 h->intra_pred_l[ INTRA_L_LP] = intra_pred_lp;
1525 h->intra_pred_l[ INTRA_L_DOWN_LEFT] = intra_pred_down_left;
1526 h->intra_pred_l[INTRA_L_DOWN_RIGHT] = intra_pred_down_right;
1527 h->intra_pred_l[ INTRA_L_LP_LEFT] = intra_pred_lp_left;
1528 h->intra_pred_l[ INTRA_L_LP_TOP] = intra_pred_lp_top;
1529 h->intra_pred_l[ INTRA_L_DC_128] = intra_pred_dc_128;
1530 h->intra_pred_c[ INTRA_C_LP] = intra_pred_lp;
1531 h->intra_pred_c[ INTRA_C_HORIZ] = intra_pred_horiz;
1532 h->intra_pred_c[ INTRA_C_VERT] = intra_pred_vert;
1533 h->intra_pred_c[ INTRA_C_PLANE] = intra_pred_plane;
1534 h->intra_pred_c[ INTRA_C_LP_LEFT] = intra_pred_lp_left;
1535 h->intra_pred_c[ INTRA_C_LP_TOP] = intra_pred_lp_top;
1536 h->intra_pred_c[ INTRA_C_DC_128] = intra_pred_dc_128;
1542 static int cavs_decode_end(AVCodecContext * avctx) {
1543 AVSContext *h = avctx->priv_data;
1546 av_free(h->top_mv[0]);
1547 av_free(h->top_mv[1]);
1548 av_free(h->top_pred_Y);
1549 av_free(h->top_border_y);
1550 av_free(h->top_border_u);
1551 av_free(h->top_border_v);
1553 av_free(h->col_type_base);
1557 AVCodec cavs_decoder = {
1566 CODEC_CAP_DR1 | CODEC_CAP_DELAY,
1567 .flush= ff_cavs_flush,