lavc/vp9: split into vp9{block,data,mvs}

This is following Libav layout to ease merges.
This commit is contained in:
Clément Bœsch 2017-03-25 12:10:13 +01:00
parent 487ca38e8b
commit 1c9f4b5078
25 changed files with 5295 additions and 5166 deletions

View File

@ -610,8 +610,9 @@ OBJS-$(CONFIG_VP8_DECODER) += vp8.o vp56rac.o
OBJS-$(CONFIG_VP8_CUVID_DECODER) += cuvid.o
OBJS-$(CONFIG_VP8_MEDIACODEC_DECODER) += mediacodecdec.o
OBJS-$(CONFIG_VP8_VAAPI_ENCODER) += vaapi_encode_vp8.o
OBJS-$(CONFIG_VP9_DECODER) += vp9.o vp9dsp.o vp56rac.o vp9dsp_8bpp.o \
vp9dsp_10bpp.o vp9dsp_12bpp.o
OBJS-$(CONFIG_VP9_DECODER) += vp9.o vp9data.o vp9dsp.o \
vp9block.o vp9prob.o vp9mvs.o vp56rac.o \
vp9dsp_8bpp.o vp9dsp_10bpp.o vp9dsp_12bpp.o
OBJS-$(CONFIG_VP9_CUVID_DECODER) += cuvid.o
OBJS-$(CONFIG_VP9_MEDIACODEC_DECODER) += mediacodecdec.o
OBJS-$(CONFIG_VPLAYER_DECODER) += textdec.o ass.o

View File

@ -21,7 +21,7 @@
#ifndef AVCODEC_AARCH64_VP9DSP_INIT_H
#define AVCODEC_AARCH64_VP9DSP_INIT_H
#include "libavcodec/vp9dsp.h"
#include "libavcodec/vp9.h"
void ff_vp9dsp_init_10bpp_aarch64(VP9DSPContext *dsp);
void ff_vp9dsp_init_12bpp_aarch64(VP9DSPContext *dsp);

View File

@ -22,7 +22,7 @@
#include "libavutil/attributes.h"
#include "libavutil/aarch64/cpu.h"
#include "libavcodec/vp9dsp.h"
#include "libavcodec/vp9.h"
#include "vp9dsp_init.h"
#define declare_fpel(type, sz) \

View File

@ -21,7 +21,7 @@
#ifndef AVCODEC_ARM_VP9DSP_INIT_H
#define AVCODEC_ARM_VP9DSP_INIT_H
#include "libavcodec/vp9dsp.h"
#include "libavcodec/vp9.h"
void ff_vp9dsp_init_10bpp_arm(VP9DSPContext *dsp);
void ff_vp9dsp_init_12bpp_arm(VP9DSPContext *dsp);

View File

@ -22,7 +22,7 @@
#include "libavutil/attributes.h"
#include "libavutil/arm/cpu.h"
#include "libavcodec/vp9dsp.h"
#include "libavcodec/vp9.h"
#include "vp9dsp_init.h"
#define declare_fpel(type, sz) \

View File

@ -19,7 +19,7 @@
*/
#include <string.h>
#include "libavcodec/vp9dsp.h"
#include "libavcodec/vp9.h"
#include "libavutil/mips/generic_macros_msa.h"
#include "vp9dsp_mips.h"

View File

@ -18,7 +18,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/vp9dsp.h"
#include "libavcodec/vp9.h"
#include "libavutil/mips/generic_macros_msa.h"
#include "vp9dsp_mips.h"

View File

@ -18,7 +18,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/vp9dsp.h"
#include "libavcodec/vp9.h"
#include "libavutil/mips/generic_macros_msa.h"
#include "vp9dsp_mips.h"

View File

@ -18,7 +18,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/vp9dsp.h"
#include "libavcodec/vp9.h"
#include "libavutil/mips/generic_macros_msa.h"
#include "vp9dsp_mips.h"

View File

@ -20,7 +20,7 @@
#include "config.h"
#include "libavutil/common.h"
#include "libavcodec/vp9dsp.h"
#include "libavcodec/vp9.h"
#include "vp9dsp_mips.h"
#if HAVE_MSA

File diff suppressed because it is too large Load Diff

View File

@ -24,8 +24,13 @@
#ifndef AVCODEC_VP9_H
#define AVCODEC_VP9_H
#include <stddef.h>
#include <stdint.h>
#include "libavutil/buffer.h"
#include "libavutil/internal.h"
#include "avcodec.h"
#include "thread.h"
#include "vp56.h"
@ -118,11 +123,143 @@ enum CompPredMode {
PRED_SWITCHABLE,
};
enum MVJoint {
MV_JOINT_ZERO,
MV_JOINT_H,
MV_JOINT_V,
MV_JOINT_HV,
};
typedef struct ProbContext {
uint8_t y_mode[4][9];
uint8_t uv_mode[10][9];
uint8_t filter[4][2];
uint8_t mv_mode[7][3];
uint8_t intra[4];
uint8_t comp[5];
uint8_t single_ref[5][2];
uint8_t comp_ref[5];
uint8_t tx32p[2][3];
uint8_t tx16p[2][2];
uint8_t tx8p[2];
uint8_t skip[3];
uint8_t mv_joint[3];
struct {
uint8_t sign;
uint8_t classes[10];
uint8_t class0;
uint8_t bits[10];
uint8_t class0_fp[2][3];
uint8_t fp[3];
uint8_t class0_hp;
uint8_t hp;
} mv_comp[2];
uint8_t partition[4][4][3];
} ProbContext;
typedef void (*vp9_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
const uint8_t *ref, ptrdiff_t ref_stride,
int h, int mx, int my);
typedef void (*vp9_scaled_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
const uint8_t *ref, ptrdiff_t ref_stride,
int h, int mx, int my, int dx, int dy);
typedef struct VP9DSPContext {
/*
* dimension 1: 0=4x4, 1=8x8, 2=16x16, 3=32x32
* dimension 2: intra prediction modes
*
* dst/left/top is aligned by transform-size (i.e. 4, 8, 16 or 32 pixels)
* stride is aligned by 16 pixels
* top[-1] is top/left; top[4,7] is top-right for 4x4
*/
// FIXME(rbultje) maybe replace left/top pointers with HAVE_TOP/
// HAVE_LEFT/HAVE_TOPRIGHT flags instead, and then handle it in-place?
// also needs to fit in with what H.264/VP8/etc do
void (*intra_pred[N_TXFM_SIZES][N_INTRA_PRED_MODES])(uint8_t *dst,
ptrdiff_t stride,
const uint8_t *left,
const uint8_t *top);
/*
* dimension 1: 0=4x4, 1=8x8, 2=16x16, 3=32x32, 4=lossless (3-4=dct only)
* dimension 2: 0=dct/dct, 1=dct/adst, 2=adst/dct, 3=adst/adst
*
* dst is aligned by transform-size (i.e. 4, 8, 16 or 32 pixels)
* stride is aligned by 16 pixels
* block is 16-byte aligned
* eob indicates the position (+1) of the last non-zero coefficient,
* in scan-order. This can be used to write faster versions, e.g. a
* dc-only 4x4/8x8/16x16/32x32, or a 4x4-only (eob<10) 8x8/16x16/32x32,
* etc.
*/
// FIXME also write idct_add_block() versions for whole (inter) pred
// blocks, so we can do 2 4x4s at once
void (*itxfm_add[N_TXFM_SIZES + 1][N_TXFM_TYPES])(uint8_t *dst,
ptrdiff_t stride,
int16_t *block, int eob);
/*
* dimension 1: width of filter (0=4, 1=8, 2=16)
* dimension 2: 0=col-edge filter (h), 1=row-edge filter (v)
*
* dst/stride are aligned by 8
*/
void (*loop_filter_8[3][2])(uint8_t *dst, ptrdiff_t stride,
int mb_lim, int lim, int hev_thr);
/*
* dimension 1: 0=col-edge filter (h), 1=row-edge filter (v)
*
* The width of filter is assumed to be 16; dst/stride are aligned by 16
*/
void (*loop_filter_16[2])(uint8_t *dst, ptrdiff_t stride,
int mb_lim, int lim, int hev_thr);
/*
* dimension 1/2: width of filter (0=4, 1=8) for each filter half
* dimension 3: 0=col-edge filter (h), 1=row-edge filter (v)
*
* dst/stride are aligned by operation size
* this basically calls loop_filter[d1][d3][0](), followed by
* loop_filter[d2][d3][0]() on the next 8 pixels
* mb_lim/lim/hev_thr contain two values in the lowest two bytes of the
* integer.
*/
// FIXME perhaps a mix4 that operates on 32px (for AVX2)
void (*loop_filter_mix2[2][2][2])(uint8_t *dst, ptrdiff_t stride,
int mb_lim, int lim, int hev_thr);
/*
* dimension 1: hsize (0: 64, 1: 32, 2: 16, 3: 8, 4: 4)
* dimension 2: filter type (0: smooth, 1: regular, 2: sharp, 3: bilin)
* dimension 3: averaging type (0: put, 1: avg)
* dimension 4: x subpel interpolation (0: none, 1: 8tap/bilin)
* dimension 5: y subpel interpolation (0: none, 1: 8tap/bilin)
*
* dst/stride are aligned by hsize
*/
vp9_mc_func mc[5][4][2][2][2];
/*
* for scalable MC, first 3 dimensions identical to above, the other two
* don't exist since it changes per stepsize.
*/
vp9_scaled_mc_func smc[5][4][2];
} VP9DSPContext;
struct VP9mvrefPair {
VP56mv mv[2];
int8_t ref[2];
};
struct VP9Filter {
uint8_t level[8 * 8];
uint8_t /* bit=col */ mask[2 /* 0=y, 1=uv */][2 /* 0=col, 1=row */]
[8 /* rows */][4 /* 0=16, 1=8, 2=4, 3=inner4 */];
};
typedef struct VP9Frame {
ThreadFrame tf;
AVBufferRef *extradata;
@ -209,4 +346,147 @@ typedef struct VP9SharedContext {
VP9Frame frames[3];
} VP9SharedContext;
typedef struct VP9Block {
uint8_t seg_id, intra, comp, ref[2], mode[4], uvmode, skip;
enum FilterMode filter;
VP56mv mv[4 /* b_idx */][2 /* ref */];
enum BlockSize bs;
enum TxfmMode tx, uvtx;
enum BlockLevel bl;
enum BlockPartition bp;
} VP9Block;
typedef struct VP9Context {
VP9SharedContext s;
VP9DSPContext dsp;
VideoDSPContext vdsp;
GetBitContext gb;
VP56RangeCoder c;
VP56RangeCoder *c_b;
unsigned c_b_size;
VP9Block *b_base, *b;
int pass;
int row, row7, col, col7;
uint8_t *dst[3];
ptrdiff_t y_stride, uv_stride;
uint8_t ss_h, ss_v;
uint8_t last_bpp, bpp_index, bytesperpixel;
uint8_t last_keyframe;
// sb_cols/rows, rows/cols and last_fmt are used for allocating all internal
// arrays, and are thus per-thread. w/h and gf_fmt are synced between threads
// and are therefore per-stream. pix_fmt represents the value in the header
// of the currently processed frame.
int w, h;
enum AVPixelFormat pix_fmt, last_fmt, gf_fmt;
unsigned sb_cols, sb_rows, rows, cols;
ThreadFrame next_refs[8];
struct {
uint8_t lim_lut[64];
uint8_t mblim_lut[64];
} filter_lut;
unsigned tile_row_start, tile_row_end, tile_col_start, tile_col_end;
struct {
ProbContext p;
uint8_t coef[4][2][2][6][6][3];
} prob_ctx[4];
struct {
ProbContext p;
uint8_t coef[4][2][2][6][6][11];
} prob;
struct {
unsigned y_mode[4][10];
unsigned uv_mode[10][10];
unsigned filter[4][3];
unsigned mv_mode[7][4];
unsigned intra[4][2];
unsigned comp[5][2];
unsigned single_ref[5][2][2];
unsigned comp_ref[5][2];
unsigned tx32p[2][4];
unsigned tx16p[2][3];
unsigned tx8p[2][2];
unsigned skip[3][2];
unsigned mv_joint[4];
struct {
unsigned sign[2];
unsigned classes[11];
unsigned class0[2];
unsigned bits[10][2];
unsigned class0_fp[2][4];
unsigned fp[4];
unsigned class0_hp[2];
unsigned hp[2];
} mv_comp[2];
unsigned partition[4][4][4];
unsigned coef[4][2][2][6][6][3];
unsigned eob[4][2][2][6][6][2];
} counts;
// contextual (left/above) cache
DECLARE_ALIGNED(16, uint8_t, left_y_nnz_ctx)[16];
DECLARE_ALIGNED(16, uint8_t, left_mode_ctx)[16];
DECLARE_ALIGNED(16, VP56mv, left_mv_ctx)[16][2];
DECLARE_ALIGNED(16, uint8_t, left_uv_nnz_ctx)[2][16];
DECLARE_ALIGNED(8, uint8_t, left_partition_ctx)[8];
DECLARE_ALIGNED(8, uint8_t, left_skip_ctx)[8];
DECLARE_ALIGNED(8, uint8_t, left_txfm_ctx)[8];
DECLARE_ALIGNED(8, uint8_t, left_segpred_ctx)[8];
DECLARE_ALIGNED(8, uint8_t, left_intra_ctx)[8];
DECLARE_ALIGNED(8, uint8_t, left_comp_ctx)[8];
DECLARE_ALIGNED(8, uint8_t, left_ref_ctx)[8];
DECLARE_ALIGNED(8, uint8_t, left_filter_ctx)[8];
uint8_t *above_partition_ctx;
uint8_t *above_mode_ctx;
// FIXME maybe merge some of the below in a flags field?
uint8_t *above_y_nnz_ctx;
uint8_t *above_uv_nnz_ctx[2];
uint8_t *above_skip_ctx; // 1bit
uint8_t *above_txfm_ctx; // 2bit
uint8_t *above_segpred_ctx; // 1bit
uint8_t *above_intra_ctx; // 1bit
uint8_t *above_comp_ctx; // 1bit
uint8_t *above_ref_ctx; // 2bit
uint8_t *above_filter_ctx;
VP56mv (*above_mv_ctx)[2];
// whole-frame cache
uint8_t *intra_pred_data[3];
struct VP9Filter *lflvl;
DECLARE_ALIGNED(32, uint8_t, edge_emu_buffer)[135 * 144 * 2];
// block reconstruction intermediates
int block_alloc_using_2pass;
int16_t *block_base, *block, *uvblock_base[2], *uvblock[2];
uint8_t *eob_base, *uveob_base[2], *eob, *uveob[2];
struct { int x, y; } min_mv, max_mv;
DECLARE_ALIGNED(32, uint8_t, tmp_y)[64 * 64 * 2];
DECLARE_ALIGNED(32, uint8_t, tmp_uv)[2][64 * 64 * 2];
uint16_t mvscale[3][2];
uint8_t mvstep[3][2];
} VP9Context;
extern const int16_t ff_vp9_subpel_filters[3][16][8];
void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact);
void ff_vp9dsp_init_8(VP9DSPContext *dsp);
void ff_vp9dsp_init_10(VP9DSPContext *dsp);
void ff_vp9dsp_init_12(VP9DSPContext *dsp);
void ff_vp9dsp_init_aarch64(VP9DSPContext *dsp, int bpp);
void ff_vp9dsp_init_arm(VP9DSPContext *dsp, int bpp);
void ff_vp9dsp_init_x86(VP9DSPContext *dsp, int bpp, int bitexact);
void ff_vp9dsp_init_mips(VP9DSPContext *dsp, int bpp);
void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb);
void ff_vp9_adapt_probs(VP9Context *s);
void ff_vp9_decode_block(AVCodecContext *ctx, int row, int col,
struct VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff,
enum BlockLevel bl, enum BlockPartition bp);
#endif /* AVCODEC_VP9_H */

2059
libavcodec/vp9block.c Normal file

File diff suppressed because it is too large Load Diff

2237
libavcodec/vp9data.c Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -23,7 +23,7 @@
#include "libavutil/avassert.h"
#include "libavutil/common.h"
#include "vp9dsp.h"
#include "vp9.h"
const DECLARE_ALIGNED(16, int16_t, ff_vp9_subpel_filters)[3][16][8] = {
[FILTER_8TAP_REGULAR] = {

View File

@ -1,137 +0,0 @@
/*
* VP9 compatible video decoder
*
* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
* Copyright (C) 2013 Clément Bœsch <u pkh me>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_VP9DSP_H
#define AVCODEC_VP9DSP_H
#include <stddef.h>
#include <stdint.h>
#include "vp9.h"
typedef void (*vp9_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
const uint8_t *ref, ptrdiff_t ref_stride,
int h, int mx, int my);
typedef void (*vp9_scaled_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
const uint8_t *ref, ptrdiff_t ref_stride,
int h, int mx, int my, int dx, int dy);
typedef struct VP9DSPContext {
/*
* dimension 1: 0=4x4, 1=8x8, 2=16x16, 3=32x32
* dimension 2: intra prediction modes
*
* dst/left/top is aligned by transform-size (i.e. 4, 8, 16 or 32 pixels)
* stride is aligned by 16 pixels
* top[-1] is top/left; top[4,7] is top-right for 4x4
*/
// FIXME(rbultje) maybe replace left/top pointers with HAVE_TOP/
// HAVE_LEFT/HAVE_TOPRIGHT flags instead, and then handle it in-place?
// also needs to fit in with what H.264/VP8/etc do
void (*intra_pred[N_TXFM_SIZES][N_INTRA_PRED_MODES])(uint8_t *dst,
ptrdiff_t stride,
const uint8_t *left,
const uint8_t *top);
/*
* dimension 1: 0=4x4, 1=8x8, 2=16x16, 3=32x32, 4=lossless (3-4=dct only)
* dimension 2: 0=dct/dct, 1=dct/adst, 2=adst/dct, 3=adst/adst
*
* dst is aligned by transform-size (i.e. 4, 8, 16 or 32 pixels)
* stride is aligned by 16 pixels
* block is 16-byte aligned
* eob indicates the position (+1) of the last non-zero coefficient,
* in scan-order. This can be used to write faster versions, e.g. a
* dc-only 4x4/8x8/16x16/32x32, or a 4x4-only (eob<10) 8x8/16x16/32x32,
* etc.
*/
// FIXME also write idct_add_block() versions for whole (inter) pred
// blocks, so we can do 2 4x4s at once
void (*itxfm_add[N_TXFM_SIZES + 1][N_TXFM_TYPES])(uint8_t *dst,
ptrdiff_t stride,
int16_t *block, int eob);
/*
* dimension 1: width of filter (0=4, 1=8, 2=16)
* dimension 2: 0=col-edge filter (h), 1=row-edge filter (v)
*
* dst/stride are aligned by 8
*/
void (*loop_filter_8[3][2])(uint8_t *dst, ptrdiff_t stride,
int mb_lim, int lim, int hev_thr);
/*
* dimension 1: 0=col-edge filter (h), 1=row-edge filter (v)
*
* The width of filter is assumed to be 16; dst/stride are aligned by 16
*/
void (*loop_filter_16[2])(uint8_t *dst, ptrdiff_t stride,
int mb_lim, int lim, int hev_thr);
/*
* dimension 1/2: width of filter (0=4, 1=8) for each filter half
* dimension 3: 0=col-edge filter (h), 1=row-edge filter (v)
*
* dst/stride are aligned by operation size
* this basically calls loop_filter[d1][d3][0](), followed by
* loop_filter[d2][d3][0]() on the next 8 pixels
* mb_lim/lim/hev_thr contain two values in the lowest two bytes of the
* integer.
*/
// FIXME perhaps a mix4 that operates on 32px (for AVX2)
void (*loop_filter_mix2[2][2][2])(uint8_t *dst, ptrdiff_t stride,
int mb_lim, int lim, int hev_thr);
/*
* dimension 1: hsize (0: 64, 1: 32, 2: 16, 3: 8, 4: 4)
* dimension 2: filter type (0: smooth, 1: regular, 2: sharp, 3: bilin)
* dimension 3: averaging type (0: put, 1: avg)
* dimension 4: x subpel interpolation (0: none, 1: 8tap/bilin)
* dimension 5: y subpel interpolation (0: none, 1: 8tap/bilin)
*
* dst/stride are aligned by hsize
*/
vp9_mc_func mc[5][4][2][2][2];
/*
* for scalable MC, first 3 dimensions identical to above, the other two
* don't exist since it changes per stepsize.
*/
vp9_scaled_mc_func smc[5][4][2];
} VP9DSPContext;
extern const int16_t ff_vp9_subpel_filters[3][16][8];
void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact);
void ff_vp9dsp_init_8(VP9DSPContext *dsp);
void ff_vp9dsp_init_10(VP9DSPContext *dsp);
void ff_vp9dsp_init_12(VP9DSPContext *dsp);
void ff_vp9dsp_init_aarch64(VP9DSPContext *dsp, int bpp);
void ff_vp9dsp_init_arm(VP9DSPContext *dsp, int bpp);
void ff_vp9dsp_init_x86(VP9DSPContext *dsp, int bpp, int bitexact);
void ff_vp9dsp_init_mips(VP9DSPContext *dsp, int bpp);
#endif /* AVCODEC_VP9DSP_H */

View File

@ -23,7 +23,7 @@
#include "libavutil/common.h"
#include "bit_depth_template.c"
#include "vp9dsp.h"
#include "vp9.h"
#if BIT_DEPTH != 12

361
libavcodec/vp9mvs.c Normal file
View File

@ -0,0 +1,361 @@
/*
* VP9 compatible video decoder
*
* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
* Copyright (C) 2013 Clément Bœsch <u pkh me>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "internal.h"
#include "vp56.h"
#include "vp9.h"
#include "vp9data.h"
static av_always_inline void clamp_mv(VP56mv *dst, const VP56mv *src,
VP9Context *s)
{
dst->x = av_clip(src->x, s->min_mv.x, s->max_mv.x);
dst->y = av_clip(src->y, s->min_mv.y, s->max_mv.y);
}
static void find_ref_mvs(VP9Context *s,
VP56mv *pmv, int ref, int z, int idx, int sb)
{
static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = {
[BS_64x64] = {{ 3, -1 }, { -1, 3 }, { 4, -1 }, { -1, 4 },
{ -1, -1 }, { 0, -1 }, { -1, 0 }, { 6, -1 }},
[BS_64x32] = {{ 0, -1 }, { -1, 0 }, { 4, -1 }, { -1, 2 },
{ -1, -1 }, { 0, -3 }, { -3, 0 }, { 2, -1 }},
[BS_32x64] = {{ -1, 0 }, { 0, -1 }, { -1, 4 }, { 2, -1 },
{ -1, -1 }, { -3, 0 }, { 0, -3 }, { -1, 2 }},
[BS_32x32] = {{ 1, -1 }, { -1, 1 }, { 2, -1 }, { -1, 2 },
{ -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }},
[BS_32x16] = {{ 0, -1 }, { -1, 0 }, { 2, -1 }, { -1, -1 },
{ -1, 1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }},
[BS_16x32] = {{ -1, 0 }, { 0, -1 }, { -1, 2 }, { -1, -1 },
{ 1, -1 }, { -3, 0 }, { 0, -3 }, { -3, -3 }},
[BS_16x16] = {{ 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, 1 },
{ -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }},
[BS_16x8] = {{ 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, -1 },
{ 0, -2 }, { -2, 0 }, { -2, -1 }, { -1, -2 }},
[BS_8x16] = {{ -1, 0 }, { 0, -1 }, { -1, 1 }, { -1, -1 },
{ -2, 0 }, { 0, -2 }, { -1, -2 }, { -2, -1 }},
[BS_8x8] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
[BS_8x4] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
[BS_4x8] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
[BS_4x4] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
};
VP9Block *b = s->b;
int row = s->row, col = s->col, row7 = s->row7;
const int8_t (*p)[2] = mv_ref_blk_off[b->bs];
#define INVALID_MV 0x80008000U
uint32_t mem = INVALID_MV, mem_sub8x8 = INVALID_MV;
int i;
#define RETURN_DIRECT_MV(mv) \
do { \
uint32_t m = AV_RN32A(&mv); \
if (!idx) { \
AV_WN32A(pmv, m); \
return; \
} else if (mem == INVALID_MV) { \
mem = m; \
} else if (m != mem) { \
AV_WN32A(pmv, m); \
return; \
} \
} while (0)
if (sb >= 0) {
if (sb == 2 || sb == 1) {
RETURN_DIRECT_MV(b->mv[0][z]);
} else if (sb == 3) {
RETURN_DIRECT_MV(b->mv[2][z]);
RETURN_DIRECT_MV(b->mv[1][z]);
RETURN_DIRECT_MV(b->mv[0][z]);
}
#define RETURN_MV(mv) \
do { \
if (sb > 0) { \
VP56mv tmp; \
uint32_t m; \
av_assert2(idx == 1); \
av_assert2(mem != INVALID_MV); \
if (mem_sub8x8 == INVALID_MV) { \
clamp_mv(&tmp, &mv, s); \
m = AV_RN32A(&tmp); \
if (m != mem) { \
AV_WN32A(pmv, m); \
return; \
} \
mem_sub8x8 = AV_RN32A(&mv); \
} else if (mem_sub8x8 != AV_RN32A(&mv)) { \
clamp_mv(&tmp, &mv, s); \
m = AV_RN32A(&tmp); \
if (m != mem) { \
AV_WN32A(pmv, m); \
} else { \
/* BUG I'm pretty sure this isn't the intention */ \
AV_WN32A(pmv, 0); \
} \
return; \
} \
} else { \
uint32_t m = AV_RN32A(&mv); \
if (!idx) { \
clamp_mv(pmv, &mv, s); \
return; \
} else if (mem == INVALID_MV) { \
mem = m; \
} else if (m != mem) { \
clamp_mv(pmv, &mv, s); \
return; \
} \
} \
} while (0)
if (row > 0) {
struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[(row - 1) * s->sb_cols * 8 + col];
if (mv->ref[0] == ref) {
RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]);
} else if (mv->ref[1] == ref) {
RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][1]);
}
}
if (col > s->tile_col_start) {
struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[row * s->sb_cols * 8 + col - 1];
if (mv->ref[0] == ref) {
RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][0]);
} else if (mv->ref[1] == ref) {
RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][1]);
}
}
i = 2;
} else {
i = 0;
}
// previously coded MVs in this neighbourhood, using same reference frame
for (; i < 8; i++) {
int c = p[i][0] + col, r = p[i][1] + row;
if (c >= s->tile_col_start && c < s->cols && r >= 0 && r < s->rows) {
struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
if (mv->ref[0] == ref) {
RETURN_MV(mv->mv[0]);
} else if (mv->ref[1] == ref) {
RETURN_MV(mv->mv[1]);
}
}
}
// MV at this position in previous frame, using same reference frame
if (s->s.h.use_last_frame_mvs) {
struct VP9mvrefPair *mv = &s->s.frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col];
if (!s->s.frames[REF_FRAME_MVPAIR].uses_2pass)
ff_thread_await_progress(&s->s.frames[REF_FRAME_MVPAIR].tf, row >> 3, 0);
if (mv->ref[0] == ref) {
RETURN_MV(mv->mv[0]);
} else if (mv->ref[1] == ref) {
RETURN_MV(mv->mv[1]);
}
}
#define RETURN_SCALE_MV(mv, scale) \
do { \
if (scale) { \
VP56mv mv_temp = { -mv.x, -mv.y }; \
RETURN_MV(mv_temp); \
} else { \
RETURN_MV(mv); \
} \
} while (0)
// previously coded MVs in this neighbourhood, using different reference frame
for (i = 0; i < 8; i++) {
int c = p[i][0] + col, r = p[i][1] + row;
if (c >= s->tile_col_start && c < s->cols && r >= 0 && r < s->rows) {
struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
if (mv->ref[0] != ref && mv->ref[0] >= 0) {
RETURN_SCALE_MV(mv->mv[0], s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]);
}
if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
// BUG - libvpx has this condition regardless of whether
// we used the first ref MV and pre-scaling
AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
RETURN_SCALE_MV(mv->mv[1], s->s.h.signbias[mv->ref[1]] != s->s.h.signbias[ref]);
}
}
}
// MV at this position in previous frame, using different reference frame
if (s->s.h.use_last_frame_mvs) {
struct VP9mvrefPair *mv = &s->s.frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col];
// no need to await_progress, because we already did that above
if (mv->ref[0] != ref && mv->ref[0] >= 0) {
RETURN_SCALE_MV(mv->mv[0], s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]);
}
if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
// BUG - libvpx has this condition regardless of whether
// we used the first ref MV and pre-scaling
AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
RETURN_SCALE_MV(mv->mv[1], s->s.h.signbias[mv->ref[1]] != s->s.h.signbias[ref]);
}
}
AV_ZERO32(pmv);
clamp_mv(pmv, pmv, s);
#undef INVALID_MV
#undef RETURN_MV
#undef RETURN_SCALE_MV
}
static av_always_inline int read_mv_component(VP9Context *s, int idx, int hp)
{
int bit, sign = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].sign);
int n, c = vp8_rac_get_tree(&s->c, ff_vp9_mv_class_tree,
s->prob.p.mv_comp[idx].classes);
s->counts.mv_comp[idx].sign[sign]++;
s->counts.mv_comp[idx].classes[c]++;
if (c) {
int m;
for (n = 0, m = 0; m < c; m++) {
bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].bits[m]);
n |= bit << m;
s->counts.mv_comp[idx].bits[m][bit]++;
}
n <<= 3;
bit = vp8_rac_get_tree(&s->c, ff_vp9_mv_fp_tree, s->prob.p.mv_comp[idx].fp);
n |= bit << 1;
s->counts.mv_comp[idx].fp[bit]++;
if (hp) {
bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].hp);
s->counts.mv_comp[idx].hp[bit]++;
n |= bit;
} else {
n |= 1;
// bug in libvpx - we count for bw entropy purposes even if the
// bit wasn't coded
s->counts.mv_comp[idx].hp[1]++;
}
n += 8 << c;
} else {
n = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0);
s->counts.mv_comp[idx].class0[n]++;
bit = vp8_rac_get_tree(&s->c, ff_vp9_mv_fp_tree,
s->prob.p.mv_comp[idx].class0_fp[n]);
s->counts.mv_comp[idx].class0_fp[n][bit]++;
n = (n << 3) | (bit << 1);
if (hp) {
bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0_hp);
s->counts.mv_comp[idx].class0_hp[bit]++;
n |= bit;
} else {
n |= 1;
// bug in libvpx - we count for bw entropy purposes even if the
// bit wasn't coded
s->counts.mv_comp[idx].class0_hp[1]++;
}
}
return sign ? -(n + 1) : (n + 1);
}
void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb)
{
VP9Block *b = s->b;
if (mode == ZEROMV) {
AV_ZERO64(mv);
} else {
int hp;
// FIXME cache this value and reuse for other subblocks
find_ref_mvs(s, &mv[0], b->ref[0], 0, mode == NEARMV,
mode == NEWMV ? -1 : sb);
// FIXME maybe move this code into find_ref_mvs()
if ((mode == NEWMV || sb == -1) &&
!(hp = s->s.h.highprecisionmvs && abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) {
if (mv[0].y & 1) {
if (mv[0].y < 0)
mv[0].y++;
else
mv[0].y--;
}
if (mv[0].x & 1) {
if (mv[0].x < 0)
mv[0].x++;
else
mv[0].x--;
}
}
if (mode == NEWMV) {
enum MVJoint j = vp8_rac_get_tree(&s->c, ff_vp9_mv_joint_tree,
s->prob.p.mv_joint);
s->counts.mv_joint[j]++;
if (j >= MV_JOINT_V)
mv[0].y += read_mv_component(s, 0, hp);
if (j & 1)
mv[0].x += read_mv_component(s, 1, hp);
}
if (b->comp) {
// FIXME cache this value and reuse for other subblocks
find_ref_mvs(s, &mv[1], b->ref[1], 1, mode == NEARMV,
mode == NEWMV ? -1 : sb);
if ((mode == NEWMV || sb == -1) &&
!(hp = s->s.h.highprecisionmvs && abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) {
if (mv[1].y & 1) {
if (mv[1].y < 0)
mv[1].y++;
else
mv[1].y--;
}
if (mv[1].x & 1) {
if (mv[1].x < 0)
mv[1].x++;
else
mv[1].x--;
}
}
if (mode == NEWMV) {
enum MVJoint j = vp8_rac_get_tree(&s->c, ff_vp9_mv_joint_tree,
s->prob.p.mv_joint);
s->counts.mv_joint[j]++;
if (j >= MV_JOINT_V)
mv[1].y += read_mv_component(s, 0, hp);
if (j & 1)
mv[1].x += read_mv_component(s, 1, hp);
}
}
}
}

265
libavcodec/vp9prob.c Normal file
View File

@ -0,0 +1,265 @@
/*
* VP9 compatible video decoder
*
* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
* Copyright (C) 2013 Clément Bœsch <u pkh me>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "vp56.h"
#include "vp9.h"
#include "vp9data.h"
static av_always_inline void adapt_prob(uint8_t *p, unsigned ct0, unsigned ct1,
int max_count, int update_factor)
{
unsigned ct = ct0 + ct1, p2, p1;
if (!ct)
return;
update_factor = FASTDIV(update_factor * FFMIN(ct, max_count), max_count);
p1 = *p;
p2 = ((((int64_t) ct0) << 8) + (ct >> 1)) / ct;
p2 = av_clip(p2, 1, 255);
// (p1 * (256 - update_factor) + p2 * update_factor + 128) >> 8
*p = p1 + (((p2 - p1) * update_factor + 128) >> 8);
}
void ff_vp9_adapt_probs(VP9Context *s)
{
int i, j, k, l, m;
ProbContext *p = &s->prob_ctx[s->s.h.framectxid].p;
int uf = (s->s.h.keyframe || s->s.h.intraonly || !s->last_keyframe) ? 112 : 128;
// coefficients
for (i = 0; i < 4; i++)
for (j = 0; j < 2; j++)
for (k = 0; k < 2; k++)
for (l = 0; l < 6; l++)
for (m = 0; m < 6; m++) {
uint8_t *pp = s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m];
unsigned *e = s->counts.eob[i][j][k][l][m];
unsigned *c = s->counts.coef[i][j][k][l][m];
if (l == 0 && m >= 3) // dc only has 3 pt
break;
adapt_prob(&pp[0], e[0], e[1], 24, uf);
adapt_prob(&pp[1], c[0], c[1] + c[2], 24, uf);
adapt_prob(&pp[2], c[1], c[2], 24, uf);
}
if (s->s.h.keyframe || s->s.h.intraonly) {
memcpy(p->skip, s->prob.p.skip, sizeof(p->skip));
memcpy(p->tx32p, s->prob.p.tx32p, sizeof(p->tx32p));
memcpy(p->tx16p, s->prob.p.tx16p, sizeof(p->tx16p));
memcpy(p->tx8p, s->prob.p.tx8p, sizeof(p->tx8p));
return;
}
// skip flag
for (i = 0; i < 3; i++)
adapt_prob(&p->skip[i], s->counts.skip[i][0], s->counts.skip[i][1], 20, 128);
// intra/inter flag
for (i = 0; i < 4; i++)
adapt_prob(&p->intra[i], s->counts.intra[i][0], s->counts.intra[i][1], 20, 128);
// comppred flag
if (s->s.h.comppredmode == PRED_SWITCHABLE) {
for (i = 0; i < 5; i++)
adapt_prob(&p->comp[i], s->counts.comp[i][0], s->counts.comp[i][1], 20, 128);
}
// reference frames
if (s->s.h.comppredmode != PRED_SINGLEREF) {
for (i = 0; i < 5; i++)
adapt_prob(&p->comp_ref[i], s->counts.comp_ref[i][0],
s->counts.comp_ref[i][1], 20, 128);
}
if (s->s.h.comppredmode != PRED_COMPREF) {
for (i = 0; i < 5; i++) {
uint8_t *pp = p->single_ref[i];
unsigned (*c)[2] = s->counts.single_ref[i];
adapt_prob(&pp[0], c[0][0], c[0][1], 20, 128);
adapt_prob(&pp[1], c[1][0], c[1][1], 20, 128);
}
}
// block partitioning
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++) {
uint8_t *pp = p->partition[i][j];
unsigned *c = s->counts.partition[i][j];
adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
adapt_prob(&pp[2], c[2], c[3], 20, 128);
}
// tx size
if (s->s.h.txfmmode == TX_SWITCHABLE) {
for (i = 0; i < 2; i++) {
unsigned *c16 = s->counts.tx16p[i], *c32 = s->counts.tx32p[i];
adapt_prob(&p->tx8p[i], s->counts.tx8p[i][0], s->counts.tx8p[i][1], 20, 128);
adapt_prob(&p->tx16p[i][0], c16[0], c16[1] + c16[2], 20, 128);
adapt_prob(&p->tx16p[i][1], c16[1], c16[2], 20, 128);
adapt_prob(&p->tx32p[i][0], c32[0], c32[1] + c32[2] + c32[3], 20, 128);
adapt_prob(&p->tx32p[i][1], c32[1], c32[2] + c32[3], 20, 128);
adapt_prob(&p->tx32p[i][2], c32[2], c32[3], 20, 128);
}
}
// interpolation filter
if (s->s.h.filtermode == FILTER_SWITCHABLE) {
for (i = 0; i < 4; i++) {
uint8_t *pp = p->filter[i];
unsigned *c = s->counts.filter[i];
adapt_prob(&pp[0], c[0], c[1] + c[2], 20, 128);
adapt_prob(&pp[1], c[1], c[2], 20, 128);
}
}
// inter modes
for (i = 0; i < 7; i++) {
uint8_t *pp = p->mv_mode[i];
unsigned *c = s->counts.mv_mode[i];
adapt_prob(&pp[0], c[2], c[1] + c[0] + c[3], 20, 128);
adapt_prob(&pp[1], c[0], c[1] + c[3], 20, 128);
adapt_prob(&pp[2], c[1], c[3], 20, 128);
}
// mv joints
{
uint8_t *pp = p->mv_joint;
unsigned *c = s->counts.mv_joint;
adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
adapt_prob(&pp[2], c[2], c[3], 20, 128);
}
// mv components
for (i = 0; i < 2; i++) {
uint8_t *pp;
unsigned *c, (*c2)[2], sum;
adapt_prob(&p->mv_comp[i].sign, s->counts.mv_comp[i].sign[0],
s->counts.mv_comp[i].sign[1], 20, 128);
pp = p->mv_comp[i].classes;
c = s->counts.mv_comp[i].classes;
sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10];
adapt_prob(&pp[0], c[0], sum, 20, 128);
sum -= c[1];
adapt_prob(&pp[1], c[1], sum, 20, 128);
sum -= c[2] + c[3];
adapt_prob(&pp[2], c[2] + c[3], sum, 20, 128);
adapt_prob(&pp[3], c[2], c[3], 20, 128);
sum -= c[4] + c[5];
adapt_prob(&pp[4], c[4] + c[5], sum, 20, 128);
adapt_prob(&pp[5], c[4], c[5], 20, 128);
sum -= c[6];
adapt_prob(&pp[6], c[6], sum, 20, 128);
adapt_prob(&pp[7], c[7] + c[8], c[9] + c[10], 20, 128);
adapt_prob(&pp[8], c[7], c[8], 20, 128);
adapt_prob(&pp[9], c[9], c[10], 20, 128);
adapt_prob(&p->mv_comp[i].class0, s->counts.mv_comp[i].class0[0],
s->counts.mv_comp[i].class0[1], 20, 128);
pp = p->mv_comp[i].bits;
c2 = s->counts.mv_comp[i].bits;
for (j = 0; j < 10; j++)
adapt_prob(&pp[j], c2[j][0], c2[j][1], 20, 128);
for (j = 0; j < 2; j++) {
pp = p->mv_comp[i].class0_fp[j];
c = s->counts.mv_comp[i].class0_fp[j];
adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
adapt_prob(&pp[2], c[2], c[3], 20, 128);
}
pp = p->mv_comp[i].fp;
c = s->counts.mv_comp[i].fp;
adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
adapt_prob(&pp[2], c[2], c[3], 20, 128);
if (s->s.h.highprecisionmvs) {
adapt_prob(&p->mv_comp[i].class0_hp, s->counts.mv_comp[i].class0_hp[0],
s->counts.mv_comp[i].class0_hp[1], 20, 128);
adapt_prob(&p->mv_comp[i].hp, s->counts.mv_comp[i].hp[0],
s->counts.mv_comp[i].hp[1], 20, 128);
}
}
// y intra modes
for (i = 0; i < 4; i++) {
uint8_t *pp = p->y_mode[i];
unsigned *c = s->counts.y_mode[i], sum, s2;
sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
sum -= c[TM_VP8_PRED];
adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
sum -= c[VERT_PRED];
adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
sum -= s2;
adapt_prob(&pp[3], s2, sum, 20, 128);
s2 -= c[HOR_PRED];
adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128);
sum -= c[DIAG_DOWN_LEFT_PRED];
adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
sum -= c[VERT_LEFT_PRED];
adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
}
// uv intra modes
for (i = 0; i < 10; i++) {
uint8_t *pp = p->uv_mode[i];
unsigned *c = s->counts.uv_mode[i], sum, s2;
sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
sum -= c[TM_VP8_PRED];
adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
sum -= c[VERT_PRED];
adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
sum -= s2;
adapt_prob(&pp[3], s2, sum, 20, 128);
s2 -= c[HOR_PRED];
adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128);
sum -= c[DIAG_DOWN_LEFT_PRED];
adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
sum -= c[VERT_LEFT_PRED];
adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
}
}

View File

@ -24,7 +24,7 @@
#include "libavutil/cpu.h"
#include "libavutil/mem.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/vp9dsp.h"
#include "libavcodec/vp9.h"
#include "libavcodec/x86/vp9dsp_init.h"
#if HAVE_YASM

View File

@ -23,7 +23,7 @@
#ifndef AVCODEC_X86_VP9DSP_INIT_H
#define AVCODEC_X86_VP9DSP_INIT_H
#include "libavcodec/vp9dsp.h"
#include "libavcodec/vp9.h"
// hack to force-expand BPC
#define cat(a, bpp, b) a##bpp##b

View File

@ -24,7 +24,7 @@
#include "libavutil/cpu.h"
#include "libavutil/mem.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/vp9dsp.h"
#include "libavcodec/vp9.h"
#include "libavcodec/x86/vp9dsp_init.h"
#if HAVE_YASM

View File

@ -24,7 +24,7 @@
#include "libavutil/cpu.h"
#include "libavutil/mem.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/vp9dsp.h"
#include "libavcodec/vp9.h"
#include "libavcodec/x86/vp9dsp_init.h"
#if HAVE_YASM

View File

@ -22,7 +22,7 @@
#include <string.h>
#include "checkasm.h"
#include "libavcodec/vp9data.h"
#include "libavcodec/vp9dsp.h"
#include "libavcodec/vp9.h"
#include "libavutil/common.h"
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
@ -259,7 +259,7 @@ static int copy_subcoefs(int16_t *out, const int16_t *in, enum TxfmMode tx,
// test
int n;
const int16_t *scan = vp9_scans[tx][txtp];
const int16_t *scan = ff_vp9_scans[tx][txtp];
int eob;
for (n = 0; n < sz * sz; n++) {