1
mirror of https://git.videolan.org/git/ffmpeg.git synced 2024-10-03 01:21:46 +02:00

H.264: merge fill_rectangle into P-SKIP MV prediction, to match B-SKIP

This commit is contained in:
Jason Garrett-Glaser 2011-07-06 07:58:50 -07:00
parent 5136ba7c69
commit ef0c594801
2 changed files with 24 additions and 25 deletions

View File

@ -770,7 +770,7 @@ static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale){
return h->pps.chroma_qp_table[t][qscale]; return h->pps.chroma_qp_table[t][qscale];
} }
static av_always_inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my); static av_always_inline void pred_pskip_motion(H264Context * const h);
static void fill_decode_neighbors(H264Context *h, int mb_type){ static void fill_decode_neighbors(H264Context *h, int mb_type){
MpegEncContext * const s = &h->s; MpegEncContext * const s = &h->s;
@ -1327,13 +1327,10 @@ static void av_unused decode_mb_skip(H264Context *h){
} }
else else
{ {
int mx, my;
mb_type|= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P1L0|MB_TYPE_SKIP; mb_type|= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P1L0|MB_TYPE_SKIP;
fill_decode_neighbors(h, mb_type); fill_decode_neighbors(h, mb_type);
pred_pskip_motion(h, &mx, &my); pred_pskip_motion(h);
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
fill_rectangle( h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx,my), 4);
} }
write_back_motion(h, mb_type); write_back_motion(h, mb_type);

View File

@ -232,16 +232,18 @@ static av_always_inline void pred_8x16_motion(H264Context * const h, int n, int
}\ }\
} }
static av_always_inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my){ static av_always_inline void pred_pskip_motion(H264Context * const h){
DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = {0}; DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = {0};
DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2]; DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
MpegEncContext * const s = &h->s; MpegEncContext * const s = &h->s;
int8_t *ref = s->current_picture.ref_index[0]; int8_t *ref = s->current_picture.ref_index[0];
int16_t (*mv)[2] = s->current_picture.motion_val[0]; int16_t (*mv)[2] = s->current_picture.motion_val[0];
int top_ref, left_ref, diagonal_ref, match_count; int top_ref, left_ref, diagonal_ref, match_count, mx, my;
const int16_t *A, *B, *C; const int16_t *A, *B, *C;
int b_stride = h->b_stride; int b_stride = h->b_stride;
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
/* To avoid doing an entire fill_decode_caches, we inline the relevant parts here. /* To avoid doing an entire fill_decode_caches, we inline the relevant parts here.
* FIXME: this is a partial duplicate of the logic in fill_decode_caches, but it's * FIXME: this is a partial duplicate of the logic in fill_decode_caches, but it's
* faster this way. Is there a way to avoid this duplication? * faster this way. Is there a way to avoid this duplication?
@ -251,15 +253,13 @@ static av_always_inline void pred_pskip_motion(H264Context * const h, int * cons
A = mv[h->mb2b_xy[h->left_mb_xy[LTOP]] + 3 + b_stride*h->left_block[0]]; A = mv[h->mb2b_xy[h->left_mb_xy[LTOP]] + 3 + b_stride*h->left_block[0]];
FIX_MV_MBAFF(h->left_type[LTOP], left_ref, A, 0); FIX_MV_MBAFF(h->left_type[LTOP], left_ref, A, 0);
if(!(left_ref | AV_RN32A(A))){ if(!(left_ref | AV_RN32A(A))){
*mx = *my = 0; goto zeromv;
return;
} }
}else if(h->left_type[LTOP]){ }else if(h->left_type[LTOP]){
left_ref = LIST_NOT_USED; left_ref = LIST_NOT_USED;
A = zeromv; A = zeromv;
}else{ }else{
*mx = *my = 0; goto zeromv;
return;
} }
if(USES_LIST(h->top_type, 0)){ if(USES_LIST(h->top_type, 0)){
@ -267,15 +267,13 @@ static av_always_inline void pred_pskip_motion(H264Context * const h, int * cons
B = mv[h->mb2b_xy[h->top_mb_xy] + 3*b_stride]; B = mv[h->mb2b_xy[h->top_mb_xy] + 3*b_stride];
FIX_MV_MBAFF(h->top_type, top_ref, B, 1); FIX_MV_MBAFF(h->top_type, top_ref, B, 1);
if(!(top_ref | AV_RN32A(B))){ if(!(top_ref | AV_RN32A(B))){
*mx = *my = 0; goto zeromv;
return;
} }
}else if(h->top_type){ }else if(h->top_type){
top_ref = LIST_NOT_USED; top_ref = LIST_NOT_USED;
B = zeromv; B = zeromv;
}else{ }else{
*mx = *my = 0; goto zeromv;
return;
} }
tprintf(h->s.avctx, "pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y); tprintf(h->s.avctx, "pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y);
@ -304,24 +302,28 @@ static av_always_inline void pred_pskip_motion(H264Context * const h, int * cons
match_count= !diagonal_ref + !top_ref + !left_ref; match_count= !diagonal_ref + !top_ref + !left_ref;
tprintf(h->s.avctx, "pred_pskip_motion match_count=%d\n", match_count); tprintf(h->s.avctx, "pred_pskip_motion match_count=%d\n", match_count);
if(match_count > 1){ if(match_count > 1){
*mx= mid_pred(A[0], B[0], C[0]); mx = mid_pred(A[0], B[0], C[0]);
*my= mid_pred(A[1], B[1], C[1]); my = mid_pred(A[1], B[1], C[1]);
}else if(match_count==1){ }else if(match_count==1){
if(!left_ref){ if(!left_ref){
*mx= A[0]; mx = A[0];
*my= A[1]; my = A[1];
}else if(!top_ref){ }else if(!top_ref){
*mx= B[0]; mx = B[0];
*my= B[1]; my = B[1];
}else{ }else{
*mx= C[0]; mx = C[0];
*my= C[1]; my = C[1];
} }
}else{ }else{
*mx= mid_pred(A[0], B[0], C[0]); mx = mid_pred(A[0], B[0], C[0]);
*my= mid_pred(A[1], B[1], C[1]); my = mid_pred(A[1], B[1], C[1]);
} }
fill_rectangle( h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx,my), 4);
return;
zeromv:
fill_rectangle( h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
return; return;
} }