lossless_videodsp: move shared functions from huffyuvdsp

Several codecs other than huffyuv use them.

Signed-off-by: James Almer <jamrial@gmail.com>
This commit is contained in:
James Almer 2017-01-07 19:04:39 -03:00
parent 3222786c5a
commit 5ac1dd8e23
17 changed files with 406 additions and 401 deletions

8
configure vendored
View File

@ -2440,10 +2440,10 @@ interplay_video_decoder_select="hpeldsp"
jpegls_decoder_select="golomb mjpeg_decoder"
jpegls_encoder_select="golomb"
jv_decoder_select="blockdsp"
lagarith_decoder_select="huffyuvdsp"
lagarith_decoder_select="llviddsp"
ljpeg_encoder_select="aandcttables idctdsp jpegtables mpegvideoenc"
loco_decoder_select="golomb"
magicyuv_decoder_select="huffyuvdsp llviddsp"
magicyuv_decoder_select="llviddsp"
mdec_decoder_select="blockdsp idctdsp mpegvideo"
metasound_decoder_select="lsp mdct sinewin"
mimic_decoder_select="blockdsp bswapdsp hpeldsp idctdsp"
@ -2533,9 +2533,9 @@ truespeech_decoder_select="bswapdsp"
tscc_decoder_select="zlib"
twinvq_decoder_select="mdct lsp sinewin"
txd_decoder_select="texturedsp"
utvideo_decoder_select="bswapdsp huffyuvdsp"
utvideo_decoder_select="bswapdsp llviddsp"
utvideo_encoder_select="bswapdsp huffman huffyuvencdsp"
vble_decoder_select="huffyuvdsp"
vble_decoder_select="llviddsp"
vc1_decoder_select="blockdsp h263_decoder h264qpel intrax8 mpegvideo vc1dsp"
vc1_qsv_decoder_deps="libmfx"
vc1_qsv_decoder_select="qsvdec vc1_qsv_hwaccel"

View File

@ -36,6 +36,7 @@
#include "get_bits.h"
#include "huffyuv.h"
#include "huffyuvdsp.h"
#include "lossless_videodsp.h"
#include "thread.h"
#include "libavutil/imgutils.h"
#include "libavutil/pixdesc.h"
@ -878,7 +879,7 @@ static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
static int left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int acc)
{
if (s->bps <= 8) {
return s->hdsp.add_hfyu_left_pred(dst, src, w, acc);
return s->llviddsp.add_left_pred(dst, src, w, acc);
} else {
return s->llviddsp.add_hfyu_left_pred_int16(( uint16_t *)dst, (const uint16_t *)src, s->n-1, w, acc);
}
@ -887,7 +888,7 @@ static int left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int
static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
{
if (s->bps <= 8) {
s->hdsp.add_bytes(dst, src, w);
s->llviddsp.add_bytes(dst, src, w);
} else {
s->llviddsp.add_int16((uint16_t*)dst, (const uint16_t*)src, s->n - 1, w);
}
@ -896,7 +897,7 @@ static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
static void add_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, const uint8_t *diff, int w, int *left, int *left_top)
{
if (s->bps <= 8) {
s->hdsp.add_hfyu_median_pred(dst, src, diff, w, left, left_top);
s->llviddsp.add_median_pred(dst, src, diff, w, left, left_top);
} else {
s->llviddsp.add_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src, (const uint16_t *)diff, s->n-1, w, left, left_top);
}
@ -1038,11 +1039,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
case LEFT:
case PLANE:
decode_422_bitstream(s, width - 2);
lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + 2, s->temp[0],
lefty = s->llviddsp.add_left_pred(p->data[0] + 2, s->temp[0],
width - 2, lefty);
if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
leftu = s->llviddsp.add_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
leftv = s->llviddsp.add_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
}
for (cy = y = 1; y < s->height; y++, cy++) {
@ -1053,11 +1054,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
ydst = p->data[0] + p->linesize[0] * y;
lefty = s->hdsp.add_hfyu_left_pred(ydst, s->temp[0],
lefty = s->llviddsp.add_left_pred(ydst, s->temp[0],
width, lefty);
if (s->predictor == PLANE) {
if (y > s->interlaced)
s->hdsp.add_bytes(ydst, ydst - fake_ystride, width);
s->llviddsp.add_bytes(ydst, ydst - fake_ystride, width);
}
y++;
if (y >= s->height)
@ -1071,18 +1072,18 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
vdst = p->data[2] + p->linesize[2] * cy;
decode_422_bitstream(s, width);
lefty = s->hdsp.add_hfyu_left_pred(ydst, s->temp[0],
lefty = s->llviddsp.add_left_pred(ydst, s->temp[0],
width, lefty);
if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
leftu = s->hdsp.add_hfyu_left_pred(udst, s->temp[1], width2, leftu);
leftv = s->hdsp.add_hfyu_left_pred(vdst, s->temp[2], width2, leftv);
leftu = s->llviddsp.add_left_pred(udst, s->temp[1], width2, leftu);
leftv = s->llviddsp.add_left_pred(vdst, s->temp[2], width2, leftv);
}
if (s->predictor == PLANE) {
if (cy > s->interlaced) {
s->hdsp.add_bytes(ydst, ydst - fake_ystride, width);
s->llviddsp.add_bytes(ydst, ydst - fake_ystride, width);
if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
s->hdsp.add_bytes(udst, udst - fake_ustride, width2);
s->hdsp.add_bytes(vdst, vdst - fake_vstride, width2);
s->llviddsp.add_bytes(udst, udst - fake_ustride, width2);
s->llviddsp.add_bytes(vdst, vdst - fake_vstride, width2);
}
}
}
@ -1093,11 +1094,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
case MEDIAN:
/* first line except first 2 pixels is left predicted */
decode_422_bitstream(s, width - 2);
lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + 2, s->temp[0],
lefty = s->llviddsp.add_left_pred(p->data[0] + 2, s->temp[0],
width - 2, lefty);
if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
leftu = s->llviddsp.add_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
leftv = s->llviddsp.add_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
}
cy = y = 1;
@ -1105,11 +1106,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
/* second line is left predicted for interlaced case */
if (s->interlaced) {
decode_422_bitstream(s, width);
lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + p->linesize[0],
lefty = s->llviddsp.add_left_pred(p->data[0] + p->linesize[0],
s->temp[0], width, lefty);
if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
leftu = s->llviddsp.add_left_pred(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
leftv = s->llviddsp.add_left_pred(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
}
y++;
cy++;
@ -1117,24 +1118,24 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
/* next 4 pixels are left predicted too */
decode_422_bitstream(s, 4);
lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + fake_ystride,
lefty = s->llviddsp.add_left_pred(p->data[0] + fake_ystride,
s->temp[0], 4, lefty);
if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
leftu = s->llviddsp.add_left_pred(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
leftv = s->llviddsp.add_left_pred(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
}
/* next line except the first 4 pixels is median predicted */
lefttopy = p->data[0][3];
decode_422_bitstream(s, width - 4);
s->hdsp.add_hfyu_median_pred(p->data[0] + fake_ystride + 4,
s->llviddsp.add_median_pred(p->data[0] + fake_ystride + 4,
p->data[0] + 4, s->temp[0],
width - 4, &lefty, &lefttopy);
if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
lefttopu = p->data[1][1];
lefttopv = p->data[2][1];
s->hdsp.add_hfyu_median_pred(p->data[1] + fake_ustride + 2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
s->hdsp.add_hfyu_median_pred(p->data[2] + fake_vstride + 2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
s->llviddsp.add_median_pred(p->data[1] + fake_ustride + 2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
s->llviddsp.add_median_pred(p->data[2] + fake_vstride + 2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
}
y++;
cy++;
@ -1146,7 +1147,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
while (2 * cy > y) {
decode_gray_bitstream(s, width);
ydst = p->data[0] + p->linesize[0] * y;
s->hdsp.add_hfyu_median_pred(ydst, ydst - fake_ystride,
s->llviddsp.add_median_pred(ydst, ydst - fake_ystride,
s->temp[0], width,
&lefty, &lefttopy);
y++;
@ -1162,12 +1163,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
udst = p->data[1] + p->linesize[1] * cy;
vdst = p->data[2] + p->linesize[2] * cy;
s->hdsp.add_hfyu_median_pred(ydst, ydst - fake_ystride,
s->llviddsp.add_median_pred(ydst, ydst - fake_ystride,
s->temp[0], width,
&lefty, &lefttopy);
if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
s->hdsp.add_hfyu_median_pred(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
s->hdsp.add_hfyu_median_pred(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
s->llviddsp.add_median_pred(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
s->llviddsp.add_median_pred(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
}
}
@ -1210,7 +1211,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
if (s->bitstream_bpp != 32)
left[A] = 0;
if (y < s->height - 1 - s->interlaced) {
s->hdsp.add_bytes(p->data[0] + p->linesize[0] * y,
s->llviddsp.add_bytes(p->data[0] + p->linesize[0] * y,
p->data[0] + p->linesize[0] * y +
fake_ystride, 4 * width);
}

View File

@ -23,64 +23,6 @@
#include "mathops.h"
#include "huffyuvdsp.h"
// 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
#define pb_7f (~0UL / 255 * 0x7f)
#define pb_80 (~0UL / 255 * 0x80)
static void add_bytes_c(uint8_t *dst, uint8_t *src, intptr_t w)
{
long i;
for (i = 0; i <= w - (int) sizeof(long); i += sizeof(long)) {
long a = *(long *) (src + i);
long b = *(long *) (dst + i);
*(long *) (dst + i) = ((a & pb_7f) + (b & pb_7f)) ^ ((a ^ b) & pb_80);
}
for (; i < w; i++)
dst[i + 0] += src[i + 0];
}
static void add_hfyu_median_pred_c(uint8_t *dst, const uint8_t *src1,
const uint8_t *diff, intptr_t w,
int *left, int *left_top)
{
int i;
uint8_t l, lt;
l = *left;
lt = *left_top;
for (i = 0; i < w; i++) {
l = mid_pred(l, src1[i], (l + src1[i] - lt) & 0xFF) + diff[i];
lt = src1[i];
dst[i] = l;
}
*left = l;
*left_top = lt;
}
static int add_hfyu_left_pred_c(uint8_t *dst, const uint8_t *src, intptr_t w,
int acc)
{
int i;
for (i = 0; i < w - 1; i++) {
acc += src[i];
dst[i] = acc;
i++;
acc += src[i];
dst[i] = acc;
}
for (; i < w; i++) {
acc += src[i];
dst[i] = acc;
}
return acc;
}
static void add_hfyu_left_pred_bgr32_c(uint8_t *dst, const uint8_t *src,
intptr_t w, uint8_t *left)
{
@ -107,9 +49,6 @@ static void add_hfyu_left_pred_bgr32_c(uint8_t *dst, const uint8_t *src,
av_cold void ff_huffyuvdsp_init(HuffYUVDSPContext *c)
{
c->add_bytes = add_bytes_c;
c->add_hfyu_median_pred = add_hfyu_median_pred_c;
c->add_hfyu_left_pred = add_hfyu_left_pred_c;
c->add_hfyu_left_pred_bgr32 = add_hfyu_left_pred_bgr32_c;
if (ARCH_X86)

View File

@ -35,13 +35,6 @@
#endif
typedef struct HuffYUVDSPContext {
void (*add_bytes)(uint8_t *dst /* align 16 */, uint8_t *src /* align 16 */,
intptr_t w);
void (*add_hfyu_median_pred)(uint8_t *dst, const uint8_t *top,
const uint8_t *diff, intptr_t w,
int *left, int *left_top);
int (*add_hfyu_left_pred)(uint8_t *dst, const uint8_t *src,
intptr_t w, int left);
void (*add_hfyu_left_pred_bgr32)(uint8_t *dst, const uint8_t *src,
intptr_t w, uint8_t *left);
} HuffYUVDSPContext;

View File

@ -30,8 +30,8 @@
#include "avcodec.h"
#include "get_bits.h"
#include "mathops.h"
#include "huffyuvdsp.h"
#include "lagarithrac.h"
#include "lossless_videodsp.h"
#include "thread.h"
enum LagarithFrameType {
@ -50,7 +50,7 @@ enum LagarithFrameType {
typedef struct LagarithContext {
AVCodecContext *avctx;
HuffYUVDSPContext hdsp;
LLVidDSPContext llviddsp;
int zeros; /**< number of consecutive zero bytes encountered */
int zeros_rem; /**< number of zero bytes remaining to output */
uint8_t *rgb_planes;
@ -260,7 +260,7 @@ static void lag_pred_line(LagarithContext *l, uint8_t *buf,
if (!line) {
/* Left prediction only for first line */
L = l->hdsp.add_hfyu_left_pred(buf, buf, width, 0);
L = l->llviddsp.add_left_pred(buf, buf, width, 0);
} else {
/* Left pixel is actually prev_row[width] */
L = buf[width - stride - 1];
@ -289,7 +289,7 @@ static void lag_pred_line_yuy2(LagarithContext *l, uint8_t *buf,
L= buf[0];
if (is_luma)
buf[0] = 0;
l->hdsp.add_hfyu_left_pred(buf, buf, width, 0);
l->llviddsp.add_left_pred(buf, buf, width, 0);
if (is_luma)
buf[0] = L;
return;
@ -312,7 +312,7 @@ static void lag_pred_line_yuy2(LagarithContext *l, uint8_t *buf,
} else {
TL = buf[width - (2 * stride) - 1];
L = buf[width - stride - 1];
l->hdsp.add_hfyu_median_pred(buf, buf - stride, buf, width, &L, &TL);
l->llviddsp.add_median_pred(buf, buf - stride, buf, width, &L, &TL);
}
}
@ -725,7 +725,7 @@ static av_cold int lag_decode_init(AVCodecContext *avctx)
LagarithContext *l = avctx->priv_data;
l->avctx = avctx;
ff_huffyuvdsp_init(&l->hdsp);
ff_llviddsp_init(&l->llviddsp, avctx);
return 0;
}

View File

@ -21,6 +21,64 @@
#include "lossless_videodsp.h"
#include "libavcodec/mathops.h"
// 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
#define pb_7f (~0UL / 255 * 0x7f)
#define pb_80 (~0UL / 255 * 0x80)
static void add_bytes_c(uint8_t *dst, uint8_t *src, intptr_t w)
{
long i;
for (i = 0; i <= w - (int) sizeof(long); i += sizeof(long)) {
long a = *(long *) (src + i);
long b = *(long *) (dst + i);
*(long *) (dst + i) = ((a & pb_7f) + (b & pb_7f)) ^ ((a ^ b) & pb_80);
}
for (; i < w; i++)
dst[i + 0] += src[i + 0];
}
static void add_median_pred_c(uint8_t *dst, const uint8_t *src1,
const uint8_t *diff, intptr_t w,
int *left, int *left_top)
{
int i;
uint8_t l, lt;
l = *left;
lt = *left_top;
for (i = 0; i < w; i++) {
l = mid_pred(l, src1[i], (l + src1[i] - lt) & 0xFF) + diff[i];
lt = src1[i];
dst[i] = l;
}
*left = l;
*left_top = lt;
}
static int add_left_pred_c(uint8_t *dst, const uint8_t *src, intptr_t w,
int acc)
{
int i;
for (i = 0; i < w - 1; i++) {
acc += src[i];
dst[i] = acc;
i++;
acc += src[i];
dst[i] = acc;
}
for (; i < w; i++) {
acc += src[i];
dst[i] = acc;
}
return acc;
}
static void add_int16_c(uint16_t *dst, const uint16_t *src, unsigned mask, int w){
long i;
unsigned long pw_lsb = (mask >> 1) * 0x0001000100010001ULL;
@ -117,6 +175,10 @@ static int add_hfyu_left_pred_int16_c(uint16_t *dst, const uint16_t *src, unsign
void ff_llviddsp_init(LLVidDSPContext *c, AVCodecContext *avctx)
{
c->add_bytes = add_bytes_c;
c->add_median_pred = add_median_pred_c;
c->add_left_pred = add_left_pred_c;
c->add_int16 = add_int16_c;
c->diff_int16= diff_int16_c;
c->add_hfyu_left_pred_int16 = add_hfyu_left_pred_int16_c;

View File

@ -26,6 +26,14 @@
#include "libavutil/cpu.h"
typedef struct LLVidDSPContext {
void (*add_bytes)(uint8_t *dst /* align 16 */, uint8_t *src /* align 16 */,
intptr_t w);
void (*add_median_pred)(uint8_t *dst, const uint8_t *top,
const uint8_t *diff, intptr_t w,
int *left, int *left_top);
int (*add_left_pred)(uint8_t *dst, const uint8_t *src,
intptr_t w, int left);
void (*add_int16)(uint16_t *dst/*align 16*/, const uint16_t *src/*align 16*/, unsigned mask, int w);
void (*diff_int16)(uint16_t *dst/*align 16*/, const uint16_t *src1/*align 16*/, const uint16_t *src2/*align 1*/, unsigned mask, int w);
@ -36,5 +44,6 @@ typedef struct LLVidDSPContext {
void ff_llviddsp_init(LLVidDSPContext *llviddsp, AVCodecContext *avctx);
void ff_llviddsp_init_x86(LLVidDSPContext *llviddsp, AVCodecContext *avctx);
void ff_llviddsp_init_ppc(LLVidDSPContext *llviddsp, AVCodecContext *avctx);
#endif //AVCODEC_LOSSLESS_VIDEODSP_H

View File

@ -70,7 +70,6 @@ typedef struct MagicYUVContext {
int (*huff_build)(VLC *vlc, uint8_t *len);
int (*magy_decode_slice)(AVCodecContext *avctx, void *tdata,
int j, int threadnr);
HuffYUVDSPContext hdsp;
LLVidDSPContext llviddsp;
} MagicYUVContext;
@ -353,24 +352,24 @@ static int magy_decode_slice(AVCodecContext *avctx, void *tdata,
switch (pred) {
case LEFT:
dst = p->data[i] + j * sheight * stride;
s->hdsp.add_hfyu_left_pred(dst, dst, width, 0);
s->llviddsp.add_left_pred(dst, dst, width, 0);
dst += stride;
if (interlaced) {
s->hdsp.add_hfyu_left_pred(dst, dst, width, 0);
s->llviddsp.add_left_pred(dst, dst, width, 0);
dst += stride;
}
for (k = 1 + interlaced; k < height; k++) {
s->hdsp.add_hfyu_left_pred(dst, dst, width, dst[-fake_stride]);
s->llviddsp.add_left_pred(dst, dst, width, dst[-fake_stride]);
dst += stride;
}
break;
case GRADIENT:
dst = p->data[i] + j * sheight * stride;
s->hdsp.add_hfyu_left_pred(dst, dst, width, 0);
s->llviddsp.add_left_pred(dst, dst, width, 0);
left = lefttop = 0;
dst += stride;
if (interlaced) {
s->hdsp.add_hfyu_left_pred(dst, dst, width, 0);
s->llviddsp.add_left_pred(dst, dst, width, 0);
left = lefttop = 0;
dst += stride;
}
@ -390,15 +389,15 @@ static int magy_decode_slice(AVCodecContext *avctx, void *tdata,
case MEDIAN:
dst = p->data[i] + j * sheight * stride;
lefttop = left = dst[0];
s->hdsp.add_hfyu_left_pred(dst, dst, width, 0);
s->llviddsp.add_left_pred(dst, dst, width, 0);
dst += stride;
if (interlaced) {
lefttop = left = dst[0];
s->hdsp.add_hfyu_left_pred(dst, dst, width, 0);
s->llviddsp.add_left_pred(dst, dst, width, 0);
dst += stride;
}
for (k = 1 + interlaced; k < height; k++) {
s->hdsp.add_hfyu_median_pred(dst, dst - fake_stride,
s->llviddsp.add_median_pred(dst, dst - fake_stride,
dst, width, &left, &lefttop);
lefttop = left = dst[0];
dst += stride;
@ -417,8 +416,8 @@ static int magy_decode_slice(AVCodecContext *avctx, void *tdata,
uint8_t *r = p->data[2] + j * s->slice_height * p->linesize[2];
for (i = 0; i < height; i++) {
s->hdsp.add_bytes(b, g, width);
s->hdsp.add_bytes(r, g, width);
s->llviddsp.add_bytes(b, g, width);
s->llviddsp.add_bytes(r, g, width);
b += p->linesize[0];
g += p->linesize[1];
r += p->linesize[2];
@ -698,7 +697,6 @@ static int magy_init_thread_copy(AVCodecContext *avctx)
static av_cold int magy_decode_init(AVCodecContext *avctx)
{
MagicYUVContext *s = avctx->priv_data;
ff_huffyuvdsp_init(&s->hdsp);
ff_llviddsp_init(&s->llviddsp, avctx);
return 0;
}

View File

@ -10,8 +10,8 @@ OBJS-$(CONFIG_H264CHROMA) += ppc/h264chroma_init.o
OBJS-$(CONFIG_H264DSP) += ppc/h264dsp.o ppc/hpeldsp_altivec.o
OBJS-$(CONFIG_H264QPEL) += ppc/h264qpel.o
OBJS-$(CONFIG_HPELDSP) += ppc/hpeldsp_altivec.o
OBJS-$(CONFIG_HUFFYUVDSP) += ppc/huffyuvdsp_altivec.o
OBJS-$(CONFIG_IDCTDSP) += ppc/idctdsp.o
OBJS-$(CONFIG_LLVIDDSP) += ppc/lossless_videodsp_altivec.o
OBJS-$(CONFIG_ME_CMP) += ppc/me_cmp.o
OBJS-$(CONFIG_MPEGAUDIODSP) += ppc/mpegaudiodsp_altivec.o
OBJS-$(CONFIG_MPEGVIDEO) += ppc/mpegvideo_altivec.o \

View File

@ -30,7 +30,7 @@
#include "libavutil/ppc/cpu.h"
#include "libavutil/ppc/types_altivec.h"
#include "libavutil/ppc/util_altivec.h"
#include "libavcodec/huffyuvdsp.h"
#include "libavcodec/lossless_videodsp.h"
#if HAVE_ALTIVEC
static void add_bytes_altivec(uint8_t *dst, uint8_t *src, intptr_t w)
@ -51,7 +51,7 @@ static void add_bytes_altivec(uint8_t *dst, uint8_t *src, intptr_t w)
}
#endif /* HAVE_ALTIVEC */
av_cold void ff_huffyuvdsp_init_ppc(HuffYUVDSPContext *c)
av_cold void ff_llviddsp_init_ppc(LLVidDSPContext *c, AVCodecContext *avctx)
{
#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))

View File

@ -30,8 +30,8 @@
#include "libavutil/common.h"
#include "avcodec.h"
#include "bswapdsp.h"
#include "huffyuvdsp.h"
#include "huffyuvencdsp.h"
#include "lossless_videodsp.h"
enum {
PRED_NONE = 0,
@ -70,8 +70,8 @@ typedef struct UtvideoContext {
const AVClass *class;
AVCodecContext *avctx;
BswapDSPContext bdsp;
HuffYUVDSPContext hdspdec;
HuffYUVEncDSPContext hdsp;
LLVidDSPContext llviddsp;
uint32_t frame_info_size, flags, frame_info;
int planes;

View File

@ -396,7 +396,7 @@ static void restore_median_planar(UtvideoContext *c, uint8_t *src, int stride,
// first line - left neighbour prediction
bsrc[0] += 0x80;
c->hdspdec.add_hfyu_left_pred(bsrc, bsrc, width, 0);
c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
bsrc += stride;
if (slice_height <= 1)
continue;
@ -413,7 +413,7 @@ static void restore_median_planar(UtvideoContext *c, uint8_t *src, int stride,
bsrc += stride;
// the rest of lines use continuous median prediction
for (j = 2; j < slice_height; j++) {
c->hdspdec.add_hfyu_median_pred(bsrc, bsrc - stride,
c->llviddsp.add_median_pred(bsrc, bsrc - stride,
bsrc, width, &A, &B);
bsrc += stride;
}
@ -446,8 +446,8 @@ static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, int stride
// first line - left neighbour prediction
bsrc[0] += 0x80;
A = c->hdspdec.add_hfyu_left_pred(bsrc, bsrc, width, 0);
c->hdspdec.add_hfyu_left_pred(bsrc + stride, bsrc + stride, width, A);
A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
bsrc += stride2;
if (slice_height <= 1)
continue;
@ -461,14 +461,14 @@ static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, int stride
C = B;
A = bsrc[i];
}
c->hdspdec.add_hfyu_median_pred(bsrc + stride, bsrc - stride,
c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
bsrc + stride, width, &A, &B);
bsrc += stride2;
// the rest of lines use continuous median prediction
for (j = 2; j < slice_height; j++) {
c->hdspdec.add_hfyu_median_pred(bsrc, bsrc - stride2,
c->llviddsp.add_median_pred(bsrc, bsrc - stride2,
bsrc, width, &A, &B);
c->hdspdec.add_hfyu_median_pred(bsrc + stride, bsrc - stride,
c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
bsrc + stride, width, &A, &B);
bsrc += stride2;
}
@ -827,7 +827,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
c->avctx = avctx;
ff_bswapdsp_init(&c->bdsp);
ff_huffyuvdsp_init(&c->hdspdec);
ff_llviddsp_init(&c->llviddsp, avctx);
if (avctx->extradata_size >= 16) {
av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",

View File

@ -29,14 +29,14 @@
#define BITSTREAM_READER_LE
#include "avcodec.h"
#include "get_bits.h"
#include "huffyuvdsp.h"
#include "internal.h"
#include "lossless_videodsp.h"
#include "mathops.h"
#include "thread.h"
typedef struct VBLEContext {
AVCodecContext *avctx;
HuffYUVDSPContext hdsp;
LLVidDSPContext llviddsp;
int size;
uint8_t *val; ///< This array first holds the lengths of vlc symbols and then their value.
@ -102,8 +102,8 @@ static void vble_restore_plane(VBLEContext *ctx, AVFrame *pic,
if (i) {
left = 0;
left_top = dst[-stride];
ctx->hdsp.add_hfyu_median_pred(dst, dst - stride, val,
width, &left, &left_top);
ctx->llviddsp.add_median_pred(dst, dst - stride, val,
width, &left, &left_top);
} else {
dst[0] = val[0];
for (j = 1; j < width; j++)
@ -185,7 +185,7 @@ static av_cold int vble_decode_init(AVCodecContext *avctx)
/* Stash for later use */
ctx->avctx = avctx;
ff_huffyuvdsp_init(&ctx->hdsp);
ff_llviddsp_init(&ctx->llviddsp, avctx);
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
avctx->bits_per_raw_sample = 8;

View File

@ -22,196 +22,8 @@
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
cextern pb_15
pb_zzzzzzzz77777777: times 8 db -1
pb_7: times 8 db 7
pb_zzzz3333zzzzbbbb: db -1,-1,-1,-1,3,3,3,3,-1,-1,-1,-1,11,11,11,11
pb_zz11zz55zz99zzdd: db -1,-1,1,1,-1,-1,5,5,-1,-1,9,9,-1,-1,13,13
SECTION .text
; void ff_add_hfyu_median_pred_mmxext(uint8_t *dst, const uint8_t *top,
; const uint8_t *diff, int w,
; int *left, int *left_top)
%macro HFYU_MEDIAN 0
cglobal add_hfyu_median_pred, 6,6,8, dst, top, diff, w, left, left_top
movu m0, [topq]
mova m2, m0
movd m4, [left_topq]
LSHIFT m2, 1
mova m1, m0
por m4, m2
movd m3, [leftq]
psubb m0, m4 ; t-tl
add dstq, wq
add topq, wq
add diffq, wq
neg wq
jmp .skip
.loop:
movu m4, [topq+wq]
mova m0, m4
LSHIFT m4, 1
por m4, m1
mova m1, m0 ; t
psubb m0, m4 ; t-tl
.skip:
movu m2, [diffq+wq]
%assign i 0
%rep mmsize
mova m4, m0
paddb m4, m3 ; t-tl+l
mova m5, m3
pmaxub m3, m1
pminub m5, m1
pminub m3, m4
pmaxub m3, m5 ; median
paddb m3, m2 ; +residual
%if i==0
mova m7, m3
LSHIFT m7, mmsize-1
%else
mova m6, m3
RSHIFT m7, 1
LSHIFT m6, mmsize-1
por m7, m6
%endif
%if i<mmsize-1
RSHIFT m0, 1
RSHIFT m1, 1
RSHIFT m2, 1
%endif
%assign i i+1
%endrep
movu [dstq+wq], m7
add wq, mmsize
jl .loop
movzx r2d, byte [dstq-1]
mov [leftq], r2d
movzx r2d, byte [topq-1]
mov [left_topq], r2d
RET
%endmacro
%if ARCH_X86_32
INIT_MMX mmxext
HFYU_MEDIAN
%endif
INIT_XMM sse2
HFYU_MEDIAN
%macro ADD_HFYU_LEFT_LOOP 2 ; %1 = dst_is_aligned, %2 = src_is_aligned
add srcq, wq
add dstq, wq
neg wq
%%.loop:
%if %2
mova m1, [srcq+wq]
%else
movu m1, [srcq+wq]
%endif
mova m2, m1
psllw m1, 8
paddb m1, m2
mova m2, m1
pshufb m1, m3
paddb m1, m2
pshufb m0, m5
mova m2, m1
pshufb m1, m4
paddb m1, m2
%if mmsize == 16
mova m2, m1
pshufb m1, m6
paddb m1, m2
%endif
paddb m0, m1
%if %1
mova [dstq+wq], m0
%else
movq [dstq+wq], m0
movhps [dstq+wq+8], m0
%endif
add wq, mmsize
jl %%.loop
mov eax, mmsize-1
sub eax, wd
movd m1, eax
pshufb m0, m1
movd eax, m0
RET
%endmacro
; int ff_add_hfyu_left_pred(uint8_t *dst, const uint8_t *src, int w, int left)
INIT_MMX ssse3
cglobal add_hfyu_left_pred, 3,3,7, dst, src, w, left
.skip_prologue:
mova m5, [pb_7]
mova m4, [pb_zzzz3333zzzzbbbb]
mova m3, [pb_zz11zz55zz99zzdd]
movd m0, leftm
psllq m0, 56
ADD_HFYU_LEFT_LOOP 1, 1
INIT_XMM sse4
cglobal add_hfyu_left_pred, 3,3,7, dst, src, w, left
mova m5, [pb_15]
mova m6, [pb_zzzzzzzz77777777]
mova m4, [pb_zzzz3333zzzzbbbb]
mova m3, [pb_zz11zz55zz99zzdd]
movd m0, leftm
pslldq m0, 15
test srcq, 15
jnz .src_unaligned
test dstq, 15
jnz .dst_unaligned
ADD_HFYU_LEFT_LOOP 1, 1
.dst_unaligned:
ADD_HFYU_LEFT_LOOP 0, 1
.src_unaligned:
ADD_HFYU_LEFT_LOOP 0, 0
%macro ADD_BYTES 0
cglobal add_bytes, 3,4,2, dst, src, w, size
mov sizeq, wq
and sizeq, -2*mmsize
jz .2
add dstq, sizeq
add srcq, sizeq
neg sizeq
.1:
mova m0, [srcq + sizeq]
mova m1, [srcq + sizeq + mmsize]
paddb m0, [dstq + sizeq]
paddb m1, [dstq + sizeq + mmsize]
mova [dstq + sizeq], m0
mova [dstq + sizeq + mmsize], m1
add sizeq, 2*mmsize
jl .1
.2:
and wq, 2*mmsize-1
jz .end
add dstq, wq
add srcq, wq
neg wq
.3:
mov sizeb, [srcq + wq]
add [dstq + wq], sizeb
inc wq
jl .3
.end:
REP_RET
%endmacro
%if ARCH_X86_32
INIT_MMX mmx
ADD_BYTES
%endif
INIT_XMM sse2
ADD_BYTES
; void add_hfyu_left_pred_bgr32(uint8_t *dst, const uint8_t *src,
; intptr_t w, uint8_t *left)
%macro LEFT_BGR32 0

View File

@ -25,93 +25,20 @@
#include "libavutil/x86/cpu.h"
#include "libavcodec/huffyuvdsp.h"
void ff_add_bytes_mmx(uint8_t *dst, uint8_t *src, intptr_t w);
void ff_add_bytes_sse2(uint8_t *dst, uint8_t *src, intptr_t w);
void ff_add_hfyu_median_pred_mmxext(uint8_t *dst, const uint8_t *top,
const uint8_t *diff, intptr_t w,
int *left, int *left_top);
void ff_add_hfyu_median_pred_sse2(uint8_t *dst, const uint8_t *top,
const uint8_t *diff, intptr_t w,
int *left, int *left_top);
int ff_add_hfyu_left_pred_ssse3(uint8_t *dst, const uint8_t *src,
intptr_t w, int left);
int ff_add_hfyu_left_pred_sse4(uint8_t *dst, const uint8_t *src,
intptr_t w, int left);
void ff_add_hfyu_left_pred_bgr32_mmx(uint8_t *dst, const uint8_t *src,
intptr_t w, uint8_t *left);
void ff_add_hfyu_left_pred_bgr32_sse2(uint8_t *dst, const uint8_t *src,
intptr_t w, uint8_t *left);
#if HAVE_INLINE_ASM && HAVE_7REGS && ARCH_X86_32
static void add_hfyu_median_pred_cmov(uint8_t *dst, const uint8_t *top,
const uint8_t *diff, intptr_t w,
int *left, int *left_top)
{
x86_reg w2 = -w;
x86_reg x;
int l = *left & 0xff;
int tl = *left_top & 0xff;
int t;
__asm__ volatile (
"mov %7, %3 \n"
"1: \n"
"movzbl (%3, %4), %2 \n"
"mov %2, %k3 \n"
"sub %b1, %b3 \n"
"add %b0, %b3 \n"
"mov %2, %1 \n"
"cmp %0, %2 \n"
"cmovg %0, %2 \n"
"cmovg %1, %0 \n"
"cmp %k3, %0 \n"
"cmovg %k3, %0 \n"
"mov %7, %3 \n"
"cmp %2, %0 \n"
"cmovl %2, %0 \n"
"add (%6, %4), %b0 \n"
"mov %b0, (%5, %4) \n"
"inc %4 \n"
"jl 1b \n"
: "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
: "r"(dst + w), "r"(diff + w), "rm"(top + w)
);
*left = l;
*left_top = tl;
}
#endif
av_cold void ff_huffyuvdsp_init_x86(HuffYUVDSPContext *c)
{
int cpu_flags = av_get_cpu_flags();
#if HAVE_INLINE_ASM && HAVE_7REGS && ARCH_X86_32
if (cpu_flags & AV_CPU_FLAG_CMOV)
c->add_hfyu_median_pred = add_hfyu_median_pred_cmov;
#endif
if (ARCH_X86_32 && EXTERNAL_MMX(cpu_flags)) {
c->add_bytes = ff_add_bytes_mmx;
c->add_hfyu_left_pred_bgr32 = ff_add_hfyu_left_pred_bgr32_mmx;
}
if (ARCH_X86_32 && EXTERNAL_MMXEXT(cpu_flags)) {
/* slower than cmov version on AMD */
if (!(cpu_flags & AV_CPU_FLAG_3DNOW))
c->add_hfyu_median_pred = ff_add_hfyu_median_pred_mmxext;
}
if (EXTERNAL_SSE2(cpu_flags)) {
c->add_bytes = ff_add_bytes_sse2;
c->add_hfyu_median_pred = ff_add_hfyu_median_pred_sse2;
c->add_hfyu_left_pred_bgr32 = ff_add_hfyu_left_pred_bgr32_sse2;
}
if (EXTERNAL_SSSE3(cpu_flags)) {
c->add_hfyu_left_pred = ff_add_hfyu_left_pred_ssse3;
if (cpu_flags & AV_CPU_FLAG_SSE4) // not really SSE4, just slow on Conroe
c->add_hfyu_left_pred = ff_add_hfyu_left_pred_sse4;
}
}

View File

@ -24,13 +24,199 @@
SECTION_RODATA
cextern pb_15
pb_zzzzzzzz77777777: times 8 db -1
pb_7: times 8 db 7
pb_ef: times 8 db 14,15
pb_67: times 8 db 6, 7
pb_zzzz3333zzzzbbbb: db -1,-1,-1,-1,3,3,3,3,-1,-1,-1,-1,11,11,11,11
pb_zz11zz55zz99zzdd: db -1,-1,1,1,-1,-1,5,5,-1,-1,9,9,-1,-1,13,13
pb_zzzz2323zzzzabab: db -1,-1,-1,-1, 2, 3, 2, 3,-1,-1,-1,-1,10,11,10,11
pb_zzzzzzzz67676767: db -1,-1,-1,-1,-1,-1,-1,-1, 6, 7, 6, 7, 6, 7, 6, 7
SECTION .text
; void ff_add_median_pred_mmxext(uint8_t *dst, const uint8_t *top,
; const uint8_t *diff, int w,
; int *left, int *left_top)
%macro MEDIAN_PRED 0
cglobal add_median_pred, 6,6,8, dst, top, diff, w, left, left_top
movu m0, [topq]
mova m2, m0
movd m4, [left_topq]
LSHIFT m2, 1
mova m1, m0
por m4, m2
movd m3, [leftq]
psubb m0, m4 ; t-tl
add dstq, wq
add topq, wq
add diffq, wq
neg wq
jmp .skip
.loop:
movu m4, [topq+wq]
mova m0, m4
LSHIFT m4, 1
por m4, m1
mova m1, m0 ; t
psubb m0, m4 ; t-tl
.skip:
movu m2, [diffq+wq]
%assign i 0
%rep mmsize
mova m4, m0
paddb m4, m3 ; t-tl+l
mova m5, m3
pmaxub m3, m1
pminub m5, m1
pminub m3, m4
pmaxub m3, m5 ; median
paddb m3, m2 ; +residual
%if i==0
mova m7, m3
LSHIFT m7, mmsize-1
%else
mova m6, m3
RSHIFT m7, 1
LSHIFT m6, mmsize-1
por m7, m6
%endif
%if i<mmsize-1
RSHIFT m0, 1
RSHIFT m1, 1
RSHIFT m2, 1
%endif
%assign i i+1
%endrep
movu [dstq+wq], m7
add wq, mmsize
jl .loop
movzx r2d, byte [dstq-1]
mov [leftq], r2d
movzx r2d, byte [topq-1]
mov [left_topq], r2d
RET
%endmacro
%if ARCH_X86_32
INIT_MMX mmxext
MEDIAN_PRED
%endif
INIT_XMM sse2
MEDIAN_PRED
%macro ADD_LEFT_LOOP 2 ; %1 = dst_is_aligned, %2 = src_is_aligned
add srcq, wq
add dstq, wq
neg wq
%%.loop:
%if %2
mova m1, [srcq+wq]
%else
movu m1, [srcq+wq]
%endif
mova m2, m1
psllw m1, 8
paddb m1, m2
mova m2, m1
pshufb m1, m3
paddb m1, m2
pshufb m0, m5
mova m2, m1
pshufb m1, m4
paddb m1, m2
%if mmsize == 16
mova m2, m1
pshufb m1, m6
paddb m1, m2
%endif
paddb m0, m1
%if %1
mova [dstq+wq], m0
%else
movq [dstq+wq], m0
movhps [dstq+wq+8], m0
%endif
add wq, mmsize
jl %%.loop
mov eax, mmsize-1
sub eax, wd
movd m1, eax
pshufb m0, m1
movd eax, m0
RET
%endmacro
; int ff_add_left_pred(uint8_t *dst, const uint8_t *src, int w, int left)
INIT_MMX ssse3
cglobal add_left_pred, 3,3,7, dst, src, w, left
.skip_prologue:
mova m5, [pb_7]
mova m4, [pb_zzzz3333zzzzbbbb]
mova m3, [pb_zz11zz55zz99zzdd]
movd m0, leftm
psllq m0, 56
ADD_LEFT_LOOP 1, 1
INIT_XMM sse4
cglobal add_left_pred, 3,3,7, dst, src, w, left
mova m5, [pb_15]
mova m6, [pb_zzzzzzzz77777777]
mova m4, [pb_zzzz3333zzzzbbbb]
mova m3, [pb_zz11zz55zz99zzdd]
movd m0, leftm
pslldq m0, 15
test srcq, 15
jnz .src_unaligned
test dstq, 15
jnz .dst_unaligned
ADD_LEFT_LOOP 1, 1
.dst_unaligned:
ADD_LEFT_LOOP 0, 1
.src_unaligned:
ADD_LEFT_LOOP 0, 0
%macro ADD_BYTES 0
cglobal add_bytes, 3,4,2, dst, src, w, size
mov sizeq, wq
and sizeq, -2*mmsize
jz .2
add dstq, sizeq
add srcq, sizeq
neg sizeq
.1:
mova m0, [srcq + sizeq]
mova m1, [srcq + sizeq + mmsize]
paddb m0, [dstq + sizeq]
paddb m1, [dstq + sizeq + mmsize]
mova [dstq + sizeq], m0
mova [dstq + sizeq + mmsize], m1
add sizeq, 2*mmsize
jl .1
.2:
and wq, 2*mmsize-1
jz .end
add dstq, wq
add srcq, wq
neg wq
.3:
mov sizeb, [srcq + wq]
add [dstq + wq], sizeb
inc wq
jl .3
.end:
REP_RET
%endmacro
%if ARCH_X86_32
INIT_MMX mmx
ADD_BYTES
%endif
INIT_XMM sse2
ADD_BYTES
%macro INT16_LOOP 2 ; %1 = a/u (aligned/unaligned), %2 = add/sub
movd m4, maskd
SPLATW m4, m4
@ -86,9 +272,11 @@ SECTION .text
RET
%endmacro
%if ARCH_X86_32
INIT_MMX mmx
cglobal add_int16, 4,4,5, dst, src, mask, w, tmp
INT16_LOOP a, add
%endif
INIT_XMM sse2
cglobal add_int16, 4,4,5, dst, src, mask, w, tmp
@ -100,9 +288,11 @@ cglobal add_int16, 4,4,5, dst, src, mask, w, tmp
.unaligned:
INT16_LOOP u, add
%if ARCH_X86_32
INIT_MMX mmx
cglobal diff_int16, 5,5,5, dst, src1, src2, mask, w, tmp
INT16_LOOP a, sub
%endif
INIT_XMM sse2
cglobal diff_int16, 5,5,5, dst, src1, src2, mask, w, tmp

View File

@ -18,10 +18,27 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/x86/asm.h"
#include "../lossless_videodsp.h"
#include "libavutil/pixdesc.h"
#include "libavutil/x86/cpu.h"
void ff_add_bytes_mmx(uint8_t *dst, uint8_t *src, intptr_t w);
void ff_add_bytes_sse2(uint8_t *dst, uint8_t *src, intptr_t w);
void ff_add_median_pred_mmxext(uint8_t *dst, const uint8_t *top,
const uint8_t *diff, intptr_t w,
int *left, int *left_top);
void ff_add_median_pred_sse2(uint8_t *dst, const uint8_t *top,
const uint8_t *diff, intptr_t w,
int *left, int *left_top);
int ff_add_left_pred_ssse3(uint8_t *dst, const uint8_t *src,
intptr_t w, int left);
int ff_add_left_pred_sse4(uint8_t *dst, const uint8_t *src,
intptr_t w, int left);
void ff_add_int16_mmx(uint16_t *dst, const uint16_t *src, unsigned mask, int w);
void ff_add_int16_sse2(uint16_t *dst, const uint16_t *src, unsigned mask, int w);
void ff_diff_int16_mmx (uint16_t *dst, const uint16_t *src1, const uint16_t *src2, unsigned mask, int w);
@ -31,28 +48,85 @@ int ff_add_hfyu_left_pred_int16_sse4(uint16_t *dst, const uint16_t *src, unsigne
void ff_add_hfyu_median_pred_int16_mmxext(uint16_t *dst, const uint16_t *top, const uint16_t *diff, unsigned mask, int w, int *left, int *left_top);
void ff_sub_hfyu_median_pred_int16_mmxext(uint16_t *dst, const uint16_t *src1, const uint16_t *src2, unsigned mask, int w, int *left, int *left_top);
#if HAVE_INLINE_ASM && HAVE_7REGS && ARCH_X86_32
static void add_median_pred_cmov(uint8_t *dst, const uint8_t *top,
const uint8_t *diff, intptr_t w,
int *left, int *left_top)
{
x86_reg w2 = -w;
x86_reg x;
int l = *left & 0xff;
int tl = *left_top & 0xff;
int t;
__asm__ volatile (
"mov %7, %3 \n"
"1: \n"
"movzbl (%3, %4), %2 \n"
"mov %2, %k3 \n"
"sub %b1, %b3 \n"
"add %b0, %b3 \n"
"mov %2, %1 \n"
"cmp %0, %2 \n"
"cmovg %0, %2 \n"
"cmovg %1, %0 \n"
"cmp %k3, %0 \n"
"cmovg %k3, %0 \n"
"mov %7, %3 \n"
"cmp %2, %0 \n"
"cmovl %2, %0 \n"
"add (%6, %4), %b0 \n"
"mov %b0, (%5, %4) \n"
"inc %4 \n"
"jl 1b \n"
: "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
: "r"(dst + w), "r"(diff + w), "rm"(top + w)
);
*left = l;
*left_top = tl;
}
#endif
void ff_llviddsp_init_x86(LLVidDSPContext *c, AVCodecContext *avctx)
{
int cpu_flags = av_get_cpu_flags();
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(avctx->pix_fmt);
if (EXTERNAL_MMX(cpu_flags)) {
#if HAVE_INLINE_ASM && HAVE_7REGS && ARCH_X86_32
if (cpu_flags & AV_CPU_FLAG_CMOV)
c->add_median_pred = add_median_pred_cmov;
#endif
if (ARCH_X86_32 && EXTERNAL_MMX(cpu_flags)) {
c->add_bytes = ff_add_bytes_mmx;
c->add_int16 = ff_add_int16_mmx;
c->diff_int16 = ff_diff_int16_mmx;
}
if (ARCH_X86_32 && EXTERNAL_MMXEXT(cpu_flags)) {
/* slower than cmov version on AMD */
if (!(cpu_flags & AV_CPU_FLAG_3DNOW))
c->add_median_pred = ff_add_median_pred_mmxext;
}
if (EXTERNAL_MMXEXT(cpu_flags) && pix_desc && pix_desc->comp[0].depth<16) {
c->add_hfyu_median_pred_int16 = ff_add_hfyu_median_pred_int16_mmxext;
c->sub_hfyu_median_pred_int16 = ff_sub_hfyu_median_pred_int16_mmxext;
}
if (EXTERNAL_SSE2(cpu_flags)) {
c->add_bytes = ff_add_bytes_sse2;
c->add_median_pred = ff_add_median_pred_sse2;
c->add_int16 = ff_add_int16_sse2;
c->diff_int16 = ff_diff_int16_sse2;
}
if (EXTERNAL_SSSE3(cpu_flags)) {
c->add_left_pred = ff_add_left_pred_ssse3;
if (cpu_flags & AV_CPU_FLAG_SSE4) // not really SSE4, just slow on Conroe
c->add_left_pred = ff_add_left_pred_sse4;
c->add_hfyu_left_pred_int16 = ff_add_hfyu_left_pred_int16_ssse3;
}