Revert "i420_yuy2/i422_yuy2/i420_rgb: purge MMX"

This reverts commit e04412a31b.
This commit is contained in:
Rémi Denis-Courmont 2019-03-12 19:07:13 +02:00
parent 10131fcdd6
commit 0620a2600a
10 changed files with 1214 additions and 3 deletions

View File

@ -188,14 +188,17 @@ $Id$
* https: HTTP/TLS access module for HTTP 2.0 support
* i420_nv12: planar YUV to semi-planar YUV conversion functions
* i420_rgb: planar YUV to packed RGB conversion functions
* i420_rgb_mmx: MMX accelerated version of i420_rgb
* i420_rgb_sse2: sse2 accelerated version of i420_rgb
* i420_rgb_avx2: avx2 accelerated version of i420_rgb
* i420_yuy2: planar 4:2:0 YUV to packed YUV conversion functions
* i420_yuy2_altivec: AltiVec accelerated version of i420_yuy2
* i420_yuy2_mmx: MMX accelerated version of i420_yuy2
* i420_yuy2_sse2: sse2 accelerated version of i420_yuy2
* i420_yuy2_avx2: avx2 accelerated version of i420_yuy2
* i422_i420: 4:2:2 to 4:2:0 conversion functions
* i422_yuy2: planar 4:2:2 YUV to packed YUV conversion functions
* i422_yuy2_mmx: MMX accelerated version of i422_yuy2
* i422_yuy2_sse2: sse2 accelerated version of i422_yuy2
* i422_yuy2_avx2: avx2 accelerated version of i422_yuy2
* idummy: dummy input

View File

@ -70,6 +70,26 @@ chroma_LTLIBRARIES += \
libi420_yuy2_altivec_plugin.la
endif
# MMX
libi420_rgb_mmx_plugin_la_SOURCES = video_chroma/i420_rgb.c video_chroma/i420_rgb.h \
video_chroma/i420_rgb16_x86.c video_chroma/i420_rgb_mmx.h
libi420_rgb_mmx_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) -DMMX
libi420_yuy2_mmx_plugin_la_SOURCES = video_chroma/i420_yuy2.c video_chroma/i420_yuy2.h
libi420_yuy2_mmx_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) \
-DMODULE_NAME_IS_i420_yuy2_mmx
libi422_yuy2_mmx_plugin_la_SOURCES = video_chroma/i422_yuy2.c video_chroma/i422_yuy2.h
libi422_yuy2_mmx_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) \
-DMODULE_NAME_IS_i422_yuy2_mmx
if HAVE_MMX
chroma_LTLIBRARIES += \
libi420_rgb_mmx_plugin.la \
libi420_yuy2_mmx_plugin.la \
libi422_yuy2_mmx_plugin.la
endif
# SSE2
libi420_rgb_sse2_plugin_la_SOURCES = video_chroma/i420_rgb.c video_chroma/i420_rgb.h \
video_chroma/i420_rgb16_x86.c video_chroma/i420_rgb_sse2.h

View File

@ -78,6 +78,11 @@ vlc_module_begin ()
"RV15,RV16,RV24,RV32 conversions") )
set_capability( "video converter", 120 )
# define vlc_CPU_capable() vlc_CPU_SSE2()
#elif defined (MMX)
set_description( N_( "MMX I420,IYUV,YV12 to "
"RV15,RV16,RV24,RV32 conversions") )
set_capability( "video converter", 100 )
# define vlc_CPU_capable() vlc_CPU_MMX()
#else
set_description( N_("I420,IYUV,YV12 to "
"RGB8,RV15,RV16,RV24,RV32 conversions") )

View File

@ -21,7 +21,7 @@
*****************************************************************************/
#include <limits.h>
#if !defined (AVX2) && !defined (SSE2)
#if !defined (AVX2) && !defined (SSE2) && !defined (MMX)
# define PLAIN
#endif

View File

@ -39,6 +39,10 @@
# include "i420_rgb_sse2.h"
# define VLC_TARGET VLC_SSE
#endif
#ifdef MMX
# include "i420_rgb_mmx.h"
# define VLC_TARGET VLC_MMX
#endif
/*****************************************************************************
* SetOffset: build offset array for conversion functions
@ -393,6 +397,60 @@ void I420_R5G5B5( filter_t *p_filter, picture_t *p_src, picture_t *p_dest )
/* make sure all SSE2 stores are visible thereafter */
SSE2_END;
#elif defined (MODULE_NAME_IS_i420_rgb_mmx) // MMX
i_rewind = (-(p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width)) & 7;
for( i_y = 0; i_y < (p_filter->fmt_in.video.i_y_offset + p_filter->fmt_in.video.i_visible_height); i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_16
MMX_YUV_MUL
MMX_YUV_ADD
MMX_UNPACK_15
);
p_y += 8;
p_u += 4;
p_v += 4;
p_buffer += 8;
}
/* Here we do some unaligned reads and duplicate conversions, but
* at least we have all the pixels */
if( i_rewind )
{
p_y -= i_rewind;
p_u -= i_rewind >> 1;
p_v -= i_rewind >> 1;
p_buffer -= i_rewind;
MMX_CALL (
MMX_INIT_16
MMX_YUV_MUL
MMX_YUV_ADD
MMX_UNPACK_15
);
p_y += 8;
p_u += 4;
p_v += 4;
}
SCALE_WIDTH;
SCALE_HEIGHT( 420, 2 );
p_y += i_source_margin;
if( i_y % 2 )
{
p_u += i_source_margin_c;
p_v += i_source_margin_c;
}
}
/* re-enable FPU registers */
MMX_END;
#endif
}
@ -689,6 +747,60 @@ void I420_R5G6B5( filter_t *p_filter, picture_t *p_src, picture_t *p_dest )
/* make sure all SSE2 stores are visible thereafter */
SSE2_END;
#elif defined (MODULE_NAME_IS_i420_rgb_mmx) // MMX
i_rewind = (-(p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width)) & 7;
for( i_y = 0; i_y < (p_filter->fmt_in.video.i_y_offset + p_filter->fmt_in.video.i_visible_height); i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_16
MMX_YUV_MUL
MMX_YUV_ADD
MMX_UNPACK_16
);
p_y += 8;
p_u += 4;
p_v += 4;
p_buffer += 8;
}
/* Here we do some unaligned reads and duplicate conversions, but
* at least we have all the pixels */
if( i_rewind )
{
p_y -= i_rewind;
p_u -= i_rewind >> 1;
p_v -= i_rewind >> 1;
p_buffer -= i_rewind;
MMX_CALL (
MMX_INIT_16
MMX_YUV_MUL
MMX_YUV_ADD
MMX_UNPACK_16
);
p_y += 8;
p_u += 4;
p_v += 4;
}
SCALE_WIDTH;
SCALE_HEIGHT( 420, 2 );
p_y += i_source_margin;
if( i_y % 2 )
{
p_u += i_source_margin_c;
p_v += i_source_margin_c;
}
}
/* re-enable FPU registers */
MMX_END;
#endif
}
@ -985,6 +1097,60 @@ void I420_A8R8G8B8( filter_t *p_filter, picture_t *p_src,
/* make sure all SSE2 stores are visible thereafter */
SSE2_END;
#elif defined (MODULE_NAME_IS_i420_rgb_mmx) // MMX
i_rewind = (-(p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width)) & 7;
for( i_y = 0; i_y < (p_filter->fmt_in.video.i_y_offset + p_filter->fmt_in.video.i_visible_height); i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_32
MMX_YUV_MUL
MMX_YUV_ADD
MMX_UNPACK_32_ARGB
);
p_y += 8;
p_u += 4;
p_v += 4;
p_buffer += 8;
}
/* Here we do some unaligned reads and duplicate conversions, but
* at least we have all the pixels */
if( i_rewind )
{
p_y -= i_rewind;
p_u -= i_rewind >> 1;
p_v -= i_rewind >> 1;
p_buffer -= i_rewind;
MMX_CALL (
MMX_INIT_32
MMX_YUV_MUL
MMX_YUV_ADD
MMX_UNPACK_32_ARGB
);
p_y += 8;
p_u += 4;
p_v += 4;
}
SCALE_WIDTH;
SCALE_HEIGHT( 420, 4 );
p_y += i_source_margin;
if( i_y % 2 )
{
p_u += i_source_margin_c;
p_v += i_source_margin_c;
}
}
/* re-enable FPU registers */
MMX_END;
#endif
}
@ -1280,6 +1446,60 @@ void I420_R8G8B8A8( filter_t *p_filter, picture_t *p_src, picture_t *p_dest )
/* make sure all SSE2 stores are visible thereafter */
SSE2_END;
#elif defined (MODULE_NAME_IS_i420_rgb_mmx) // MMX
i_rewind = (-(p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width)) & 7;
for( i_y = 0; i_y < (p_filter->fmt_in.video.i_y_offset + p_filter->fmt_in.video.i_visible_height); i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_32
MMX_YUV_MUL
MMX_YUV_ADD
MMX_UNPACK_32_RGBA
);
p_y += 8;
p_u += 4;
p_v += 4;
p_buffer += 8;
}
/* Here we do some unaligned reads and duplicate conversions, but
* at least we have all the pixels */
if( i_rewind )
{
p_y -= i_rewind;
p_u -= i_rewind >> 1;
p_v -= i_rewind >> 1;
p_buffer -= i_rewind;
MMX_CALL (
MMX_INIT_32
MMX_YUV_MUL
MMX_YUV_ADD
MMX_UNPACK_32_RGBA
);
p_y += 8;
p_u += 4;
p_v += 4;
}
SCALE_WIDTH;
SCALE_HEIGHT( 420, 4 );
p_y += i_source_margin;
if( i_y % 2 )
{
p_u += i_source_margin_c;
p_v += i_source_margin_c;
}
}
/* re-enable FPU registers */
MMX_END;
#endif
}
@ -1575,6 +1795,60 @@ void I420_B8G8R8A8( filter_t *p_filter, picture_t *p_src, picture_t *p_dest )
/* make sure all SSE2 stores are visible thereafter */
SSE2_END;
#elif defined (MODULE_NAME_IS_i420_rgb_mmx) // MMX
i_rewind = (-(p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width)) & 7;
for( i_y = 0; i_y < (p_filter->fmt_in.video.i_y_offset + p_filter->fmt_in.video.i_visible_height); i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_32
MMX_YUV_MUL
MMX_YUV_ADD
MMX_UNPACK_32_BGRA
);
p_y += 8;
p_u += 4;
p_v += 4;
p_buffer += 8;
}
/* Here we do some unaligned reads and duplicate conversions, but
* at least we have all the pixels */
if( i_rewind )
{
p_y -= i_rewind;
p_u -= i_rewind >> 1;
p_v -= i_rewind >> 1;
p_buffer -= i_rewind;
MMX_CALL (
MMX_INIT_32
MMX_YUV_MUL
MMX_YUV_ADD
MMX_UNPACK_32_BGRA
);
p_y += 8;
p_u += 4;
p_v += 4;
}
SCALE_WIDTH;
SCALE_HEIGHT( 420, 4 );
p_y += i_source_margin;
if( i_y % 2 )
{
p_u += i_source_margin_c;
p_v += i_source_margin_c;
}
}
/* re-enable FPU registers */
MMX_END;
#endif
}
@ -1870,5 +2144,59 @@ void I420_A8B8G8R8( filter_t *p_filter, picture_t *p_src, picture_t *p_dest )
/* make sure all SSE2 stores are visible thereafter */
SSE2_END;
#elif defined (MODULE_NAME_IS_i420_rgb_mmx) // MMX
i_rewind = (-(p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width)) & 7;
for( i_y = 0; i_y < (p_filter->fmt_in.video.i_y_offset + p_filter->fmt_in.video.i_visible_height); i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_32
MMX_YUV_MUL
MMX_YUV_ADD
MMX_UNPACK_32_ABGR
);
p_y += 8;
p_u += 4;
p_v += 4;
p_buffer += 8;
}
/* Here we do some unaligned reads and duplicate conversions, but
* at least we have all the pixels */
if( i_rewind )
{
p_y -= i_rewind;
p_u -= i_rewind >> 1;
p_v -= i_rewind >> 1;
p_buffer -= i_rewind;
MMX_CALL (
MMX_INIT_32
MMX_YUV_MUL
MMX_YUV_ADD
MMX_UNPACK_32_ABGR
);
p_y += 8;
p_u += 4;
p_v += 4;
}
SCALE_WIDTH;
SCALE_HEIGHT( 420, 4 );
p_y += i_source_margin;
if( i_y % 2 )
{
p_u += i_source_margin_c;
p_v += i_source_margin_c;
}
}
/* re-enable FPU registers */
MMX_END;
#endif
}

View File

@ -0,0 +1,540 @@
/*****************************************************************************
* i420_rgb_mmx.h: MMX YUV transformation assembly
*****************************************************************************
* Copyright (C) 1999-2007 the VideoLAN team
*
* Authors: Olie Lho <ollie@sis.com.tw>
* Gaël Hendryckx <jimmy@via.ecp.fr>
* Samuel Hocevar <sam@zoy.org>
* Damien Fouilleul <damienf@videolan.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
/* hope these constant values are cache line aligned */
static const uint64_t mmx_80w = 0x0080008000800080ULL; /* Will be referenced as %4 in inline asm */
static const uint64_t mmx_10w = 0x1010101010101010ULL; /* -- as %5 */
static const uint64_t mmx_00ffw = 0x00ff00ff00ff00ffULL; /* -- as %6 */
static const uint64_t mmx_Y_coeff = 0x253f253f253f253fULL; /* -- as %7 */
static const uint64_t mmx_U_green = 0xf37df37df37df37dULL; /* -- as %8 */
static const uint64_t mmx_U_blue = 0x4093409340934093ULL; /* -- as %9 */
static const uint64_t mmx_V_red = 0x3312331233123312ULL; /* -- as %10 */
static const uint64_t mmx_V_green = 0xe5fce5fce5fce5fcULL; /* -- as %11 */
static const uint64_t mmx_mask_f8 = 0xf8f8f8f8f8f8f8f8ULL; /* -- as %12 */
static const uint64_t mmx_mask_fc = 0xfcfcfcfcfcfcfcfcULL; /* -- as %13 */
#if defined(CAN_COMPILE_MMX)
/* MMX assembly */
#define MMX_CALL(MMX_INSTRUCTIONS) \
do { \
__asm__ __volatile__( \
".p2align 3 \n\t" \
MMX_INSTRUCTIONS \
: \
: "r" (p_y), "r" (p_u), \
"r" (p_v), "r" (p_buffer), \
"m" (mmx_80w), "m" (mmx_10w), \
"m" (mmx_00ffw), "m" (mmx_Y_coeff), \
"m" (mmx_U_green), "m" (mmx_U_blue), \
"m" (mmx_V_red), "m" (mmx_V_green), \
"m" (mmx_mask_f8), "m" (mmx_mask_fc) \
: "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" ); \
} while(0)
#define MMX_END __asm__ __volatile__ ( "emms" )
#define MMX_INIT_16 " \n\
movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
pxor %%mm4, %%mm4 # zero mm4 \n\
movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
"
#define MMX_INIT_16_GRAY " \n\
movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
#movl $0, (%3) # cache preload for image \n\
"
#define MMX_INIT_32 " \n\
movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
movl $0, (%3) # cache preload for image \n\
movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
pxor %%mm4, %%mm4 # zero mm4 \n\
movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
"
/*
* Do the multiply part of the conversion for even and odd pixels,
* register usage:
* mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
* mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
* mm6 -> Y even, mm7 -> Y odd
*/
#define MMX_YUV_MUL " \n\
# convert the chroma part \n\
punpcklbw %%mm4, %%mm0 # scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\
punpcklbw %%mm4, %%mm1 # scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\
psubsw %4, %%mm0 # Cb -= 128 \n\
psubsw %4, %%mm1 # Cr -= 128 \n\
psllw $3, %%mm0 # Promote precision \n\
psllw $3, %%mm1 # Promote precision \n\
movq %%mm0, %%mm2 # Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\
movq %%mm1, %%mm3 # Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\
pmulhw %8, %%mm2 # Mul Cb with green coeff -> Cb green \n\
pmulhw %11, %%mm3 # Mul Cr with green coeff -> Cr green \n\
pmulhw %9, %%mm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 \n\
pmulhw %10, %%mm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 \n\
paddsw %%mm3, %%mm2 # Cb green + Cr green -> Cgreen \n\
\n\
# convert the luma part \n\
psubusb %5, %%mm6 # Y -= 16 \n\
movq %%mm6, %%mm7 # Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
pand %6, %%mm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\
psrlw $8, %%mm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 \n\
psllw $3, %%mm6 # Promote precision \n\
psllw $3, %%mm7 # Promote precision \n\
pmulhw %7, %%mm6 # Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 \n\
pmulhw %7, %%mm7 # Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 \n\
"
/*
* Do the addition part of the conversion for even and odd pixels,
* register usage:
* mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
* mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
* mm6 -> Y even, mm7 -> Y odd
*/
#define MMX_YUV_ADD " \n\
# Do horizontal and vertical scaling \n\
movq %%mm0, %%mm3 # Copy Cblue \n\
movq %%mm1, %%mm4 # Copy Cred \n\
movq %%mm2, %%mm5 # Copy Cgreen \n\
paddsw %%mm6, %%mm0 # Y even + Cblue 00 B6 00 B4 00 B2 00 B0 \n\
paddsw %%mm7, %%mm3 # Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 \n\
paddsw %%mm6, %%mm1 # Y even + Cred 00 R6 00 R4 00 R2 00 R0 \n\
paddsw %%mm7, %%mm4 # Y odd + Cred 00 R7 00 R5 00 R3 00 R1 \n\
paddsw %%mm6, %%mm2 # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 \n\
paddsw %%mm7, %%mm5 # Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 \n\
\n\
# Limit RGB even to 0..255 \n\
packuswb %%mm0, %%mm0 # B6 B4 B2 B0 B6 B4 B2 B0 \n\
packuswb %%mm1, %%mm1 # R6 R4 R2 R0 R6 R4 R2 R0 \n\
packuswb %%mm2, %%mm2 # G6 G4 G2 G0 G6 G4 G2 G0 \n\
\n\
# Limit RGB odd to 0..255 \n\
packuswb %%mm3, %%mm3 # B7 B5 B3 B1 B7 B5 B3 B1 \n\
packuswb %%mm4, %%mm4 # R7 R5 R3 R1 R7 R5 R3 R1 \n\
packuswb %%mm5, %%mm5 # G7 G5 G3 G1 G7 G5 G3 G1 \n\
\n\
# Interleave RGB even and odd \n\
punpcklbw %%mm3, %%mm0 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
punpcklbw %%mm4, %%mm1 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
punpcklbw %%mm5, %%mm2 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
"
/*
* Grayscale case, only use Y
*/
#define MMX_YUV_GRAY " \n\
# convert the luma part \n\
psubusb %5, %%mm6 \n\
movq %%mm6, %%mm7 \n\
pand %6, %%mm6 \n\
psrlw $8, %%mm7 \n\
psllw $3, %%mm6 \n\
psllw $3, %%mm7 \n\
pmulhw %7, %%mm6 \n\
pmulhw %7, %%mm7 \n\
packuswb %%mm6, %%mm6 \n\
packuswb %%mm7, %%mm7 \n\
punpcklbw %%mm7, %%mm6 \n\
"
#define MMX_UNPACK_16_GRAY " \n\
movq %%mm6, %%mm5 \n\
pand %12, %%mm6 \n\
pand %13, %%mm5 \n\
movq %%mm6, %%mm7 \n\
psrlw $3, %%mm7 \n\
pxor %%mm3, %%mm3 \n\
movq %%mm7, %%mm2 \n\
movq %%mm5, %%mm0 \n\
punpcklbw %%mm3, %%mm5 \n\
punpcklbw %%mm6, %%mm7 \n\
psllw $3, %%mm5 \n\
por %%mm5, %%mm7 \n\
movq %%mm7, (%3) \n\
punpckhbw %%mm3, %%mm0 \n\
punpckhbw %%mm6, %%mm2 \n\
psllw $3, %%mm0 \n\
movq 8(%0), %%mm6 \n\
por %%mm0, %%mm2 \n\
movq %%mm2, 8(%3) \n\
"
/*
* convert RGB plane to RGB 15 bits,
* mm0 -> B, mm1 -> R, mm2 -> G,
* mm4 -> GB, mm5 -> AR pixel 4-7,
* mm6 -> GB, mm7 -> AR pixel 0-3
*/
#define MMX_UNPACK_15 " # Note, much of this shows bit patterns (of a pair of bytes) \n\
# mask unneeded bits off \n\
pand %12, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
pand %12, %%mm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
pand %12, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
psrlw $1,%%mm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\
pxor %%mm4, %%mm4 # zero mm4 \n\
movq %%mm0, %%mm5 # Copy B7-B0 \n\
movq %%mm2, %%mm7 # Copy G7-G0 \n\
\n\
# pack the 3 separate RGB bytes into 2 for pixels 0-3 \n\
punpcklbw %%mm4, %%mm2 # ________ ________ g7g6g5g4 g3______ \n\
punpcklbw %%mm1, %%mm0 # __r7r6r5 r4r3____ ______b7 b6b5b4b3 \n\
psllw $2,%%mm2 # ________ ____g7g6 g5g4g3__ ________ \n\
por %%mm2, %%mm0 # __r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 \n\
movq %%mm0, (%3) # store pixel 0-3 \n\
\n\
# pack the 3 separate RGB bytes into 2 for pixels 4-7 \n\
punpckhbw %%mm4, %%mm7 # ________ ________ g7g6g5g4 g3______ \n\
punpckhbw %%mm1, %%mm5 # __r7r6r5 r4r3____ ______b7 b6b5b4b3 \n\
psllw $2,%%mm7 # ________ ____g7g6 g5g4g3__ ________ \n\
por %%mm7, %%mm5 # __r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 \n\
movq %%mm5, 8(%3) # store pixel 4-7 \n\
"
/*
* convert RGB plane to RGB 16 bits,
* mm0 -> B, mm1 -> R, mm2 -> G,
* mm4 -> GB, mm5 -> AR pixel 4-7,
* mm6 -> GB, mm7 -> AR pixel 0-3
*/
#define MMX_UNPACK_16 " # Note, much of this shows bit patterns (of a pair of bytes) \n\
# mask unneeded bits off \n\
pand %12, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
pand %13, %%mm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
pand %12, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
pxor %%mm4, %%mm4 # zero mm4 \n\
movq %%mm0, %%mm5 # Copy B7-B0 \n\
movq %%mm2, %%mm7 # Copy G7-G0 \n\
\n\
# pack the 3 separate RGB bytes into 2 for pixels 0-3 \n\
punpcklbw %%mm4, %%mm2 # ________ ________ g7g6g5g4 g3g2____ \n\
punpcklbw %%mm1, %%mm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
psllw $3,%%mm2 # ________ __g7g6g5 g4g3g2__ ________ \n\
por %%mm2, %%mm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
movq %%mm0, (%3) # store pixel 0-3 \n\
\n\
# pack the 3 separate RGB bytes into 2 for pixels 4-7 \n\
punpckhbw %%mm4, %%mm7 # ________ ________ g7g6g5g4 g3g2____ \n\
punpckhbw %%mm1, %%mm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
psllw $3,%%mm7 # ________ __g7g6g5 g4g3g2__ ________ \n\
por %%mm7, %%mm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
movq %%mm5, 8(%3) # store pixel 4-7 \n\
"
/*
* convert RGB plane to RGB packed format,
* mm0 -> B, mm1 -> R, mm2 -> G
*/
#define MMX_UNPACK_32_ARGB " \n\
pxor %%mm3, %%mm3 # zero mm3 \n\
movq %%mm0, %%mm4 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
punpcklbw %%mm2, %%mm4 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
movq %%mm1, %%mm5 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
punpcklbw %%mm3, %%mm5 # 00 R3 00 R2 00 R1 00 R0 \n\
movq %%mm4, %%mm6 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
punpcklwd %%mm5, %%mm4 # 00 R1 B1 G1 00 R0 B0 G0 \n\
movq %%mm4, (%3) # Store ARGB1 ARGB0 \n\
punpckhwd %%mm5, %%mm6 # 00 R3 B3 G3 00 R2 B2 G2 \n\
movq %%mm6, 8(%3) # Store ARGB3 ARGB2 \n\
punpckhbw %%mm2, %%mm0 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
punpckhbw %%mm3, %%mm1 # 00 R7 00 R6 00 R5 00 R4 \n\
movq %%mm0, %%mm5 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
punpcklwd %%mm1, %%mm5 # 00 R5 B5 G5 00 R4 B4 G4 \n\
movq %%mm5, 16(%3) # Store ARGB5 ARGB4 \n\
punpckhwd %%mm1, %%mm0 # 00 R7 B7 G7 00 R6 B6 G6 \n\
movq %%mm0, 24(%3) # Store ARGB7 ARGB6 \n\
"
#define MMX_UNPACK_32_RGBA " \n\
pxor %%mm3, %%mm3 # zero mm3 \n\
movq %%mm2, %%mm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
punpcklbw %%mm1, %%mm4 # R3 G3 R2 G2 R1 G1 R0 G0 \n\
punpcklbw %%mm0, %%mm3 # B3 00 B2 00 B1 00 B0 00 \n\
movq %%mm3, %%mm5 # R3 00 R2 00 R1 00 R0 00 \n\
punpcklwd %%mm4, %%mm3 # R1 G1 B1 00 R0 G0 B0 00 \n\
movq %%mm3, (%3) # Store RGBA1 RGBA0 \n\
punpckhwd %%mm4, %%mm5 # R3 G3 B3 00 R2 G2 B2 00 \n\
movq %%mm5, 8(%3) # Store RGBA3 RGBA2 \n\
pxor %%mm6, %%mm6 # zero mm6 \n\
punpckhbw %%mm1, %%mm2 # R7 G7 R6 G6 R5 G5 R4 G4 \n\
punpckhbw %%mm0, %%mm6 # B7 00 B6 00 B5 00 B4 00 \n\
movq %%mm6, %%mm0 # B7 00 B6 00 B5 00 B4 00 \n\
punpcklwd %%mm2, %%mm6 # R5 G5 B5 00 R4 G4 B4 00 \n\
movq %%mm6, 16(%3) # Store RGBA5 RGBA4 \n\
punpckhwd %%mm2, %%mm0 # R7 G7 B7 00 R6 G6 B6 00 \n\
movq %%mm0, 24(%3) # Store RGBA7 RGBA6 \n\
"
#define MMX_UNPACK_32_BGRA " \n\
pxor %%mm3, %%mm3 # zero mm3 \n\
movq %%mm2, %%mm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
punpcklbw %%mm0, %%mm4 # B3 G3 B2 G2 B1 G1 B0 G0 \n\
punpcklbw %%mm1, %%mm3 # R3 00 R2 00 R1 00 R0 00 \n\
movq %%mm3, %%mm5 # R3 00 R2 00 R1 00 R0 00 \n\
punpcklwd %%mm4, %%mm3 # B1 G1 R1 00 B0 G0 R0 00 \n\
movq %%mm3, (%3) # Store BGRA1 BGRA0 \n\
punpckhwd %%mm4, %%mm5 # B3 G3 R3 00 B2 G2 R2 00 \n\
movq %%mm5, 8(%3) # Store BGRA3 BGRA2 \n\
pxor %%mm6, %%mm6 # zero mm6 \n\
punpckhbw %%mm0, %%mm2 # B7 G7 B6 G6 B5 G5 B4 G4 \n\
punpckhbw %%mm1, %%mm6 # R7 00 R6 00 R5 00 R4 00 \n\
movq %%mm6, %%mm0 # R7 00 R6 00 R5 00 R4 00 \n\
punpcklwd %%mm2, %%mm6 # B5 G5 R5 00 B4 G4 R4 00 \n\
movq %%mm6, 16(%3) # Store BGRA5 BGRA4 \n\
punpckhwd %%mm2, %%mm0 # B7 G7 R7 00 B6 G6 R6 00 \n\
movq %%mm0, 24(%3) # Store BGRA7 BGRA6 \n\
"
#define MMX_UNPACK_32_ABGR " \n\
pxor %%mm3, %%mm3 # zero mm3 \n\
movq %%mm1, %%mm4 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
punpcklbw %%mm2, %%mm4 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
movq %%mm0, %%mm5 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
punpcklbw %%mm3, %%mm5 # 00 B3 00 B2 00 B1 00 B0 \n\
movq %%mm4, %%mm6 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
punpcklwd %%mm5, %%mm4 # 00 B1 G1 R1 00 B0 G0 R0 \n\
movq %%mm4, (%3) # Store ABGR1 ABGR0 \n\
punpckhwd %%mm5, %%mm6 # 00 B3 G3 R3 00 B2 G2 R2 \n\
movq %%mm6, 8(%3) # Store ABGR3 ABGR2 \n\
punpckhbw %%mm2, %%mm1 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
punpckhbw %%mm3, %%mm0 # 00 B7 00 B6 00 B5 00 B4 \n\
movq %%mm1, %%mm2 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
punpcklwd %%mm0, %%mm1 # 00 B5 G5 R5 00 B4 G4 R4 \n\
movq %%mm1, 16(%3) # Store ABGR5 ABGR4 \n\
punpckhwd %%mm0, %%mm2 # B7 G7 R7 00 B6 G6 R6 00 \n\
movq %%mm2, 24(%3) # Store ABGR7 ABGR6 \n\
"
#elif defined(HAVE_MMX_INTRINSICS)
/* MMX intrinsics */
#include <mmintrin.h>
#define MMX_CALL(MMX_INSTRUCTIONS) \
do { \
__m64 mm0, mm1, mm2, mm3, \
mm4, mm5, mm6, mm7; \
MMX_INSTRUCTIONS \
} while(0)
#define MMX_END _mm_empty()
#define MMX_INIT_16 \
mm0 = _mm_cvtsi32_si64(*(int*)p_u); \
mm1 = _mm_cvtsi32_si64(*(int*)p_v); \
mm4 = _mm_setzero_si64(); \
mm6 = (__m64)*(uint64_t *)p_y;
#define MMX_INIT_32 \
mm0 = _mm_cvtsi32_si64(*(int*)p_u); \
*(uint16_t *)p_buffer = 0; \
mm1 = _mm_cvtsi32_si64(*(int*)p_v); \
mm4 = _mm_setzero_si64(); \
mm6 = (__m64)*(uint64_t *)p_y;
#define MMX_YUV_MUL \
mm0 = _mm_unpacklo_pi8(mm0, mm4); \
mm1 = _mm_unpacklo_pi8(mm1, mm4); \
mm0 = _mm_subs_pi16(mm0, (__m64)mmx_80w); \
mm1 = _mm_subs_pi16(mm1, (__m64)mmx_80w); \
mm0 = _mm_slli_pi16(mm0, 3); \
mm1 = _mm_slli_pi16(mm1, 3); \
mm2 = mm0; \
mm3 = mm1; \
mm2 = _mm_mulhi_pi16(mm2, (__m64)mmx_U_green); \
mm3 = _mm_mulhi_pi16(mm3, (__m64)mmx_V_green); \
mm0 = _mm_mulhi_pi16(mm0, (__m64)mmx_U_blue); \
mm1 = _mm_mulhi_pi16(mm1, (__m64)mmx_V_red); \
mm2 = _mm_adds_pi16(mm2, mm3); \
\
mm6 = _mm_subs_pu8(mm6, (__m64)mmx_10w); \
mm7 = mm6; \
mm6 = _mm_and_si64(mm6, (__m64)mmx_00ffw); \
mm7 = _mm_srli_pi16(mm7, 8); \
mm6 = _mm_slli_pi16(mm6, 3); \
mm7 = _mm_slli_pi16(mm7, 3); \
mm6 = _mm_mulhi_pi16(mm6, (__m64)mmx_Y_coeff); \
mm7 = _mm_mulhi_pi16(mm7, (__m64)mmx_Y_coeff);
#define MMX_YUV_ADD \
mm3 = mm0; \
mm4 = mm1; \
mm5 = mm2; \
mm0 = _mm_adds_pi16(mm0, mm6); \
mm3 = _mm_adds_pi16(mm3, mm7); \
mm1 = _mm_adds_pi16(mm1, mm6); \
mm4 = _mm_adds_pi16(mm4, mm7); \
mm2 = _mm_adds_pi16(mm2, mm6); \
mm5 = _mm_adds_pi16(mm5, mm7); \
\
mm0 = _mm_packs_pu16(mm0, mm0); \
mm1 = _mm_packs_pu16(mm1, mm1); \
mm2 = _mm_packs_pu16(mm2, mm2); \
\
mm3 = _mm_packs_pu16(mm3, mm3); \
mm4 = _mm_packs_pu16(mm4, mm4); \
mm5 = _mm_packs_pu16(mm5, mm5); \
\
mm0 = _mm_unpacklo_pi8(mm0, mm3); \
mm1 = _mm_unpacklo_pi8(mm1, mm4); \
mm2 = _mm_unpacklo_pi8(mm2, mm5);
#define MMX_UNPACK_15 \
mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
mm0 = _mm_srli_pi16(mm0, 3); \
mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_f8); \
mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
mm1 = _mm_srli_pi16(mm1, 1); \
mm4 = _mm_setzero_si64(); \
mm5 = mm0; \
mm7 = mm2; \
\
mm2 = _mm_unpacklo_pi8(mm2, mm4); \
mm0 = _mm_unpacklo_pi8(mm0, mm1); \
mm2 = _mm_slli_pi16(mm2, 2); \
mm0 = _mm_or_si64(mm0, mm2); \
*(uint64_t *)p_buffer = (uint64_t)mm0; \
\
mm7 = _mm_unpackhi_pi8(mm7, mm4); \
mm5 = _mm_unpackhi_pi8(mm5, mm1); \
mm7 = _mm_slli_pi16(mm7, 2); \
mm5 = _mm_or_si64(mm5, mm7); \
*(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
#define MMX_UNPACK_16 \
mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_fc); \
mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
mm0 = _mm_srli_pi16(mm0, 3); \
mm4 = _mm_setzero_si64(); \
mm5 = mm0; \
mm7 = mm2; \
\
mm2 = _mm_unpacklo_pi8(mm2, mm4); \
mm0 = _mm_unpacklo_pi8(mm0, mm1); \
mm2 = _mm_slli_pi16(mm2, 3); \
mm0 = _mm_or_si64(mm0, mm2); \
*(uint64_t *)p_buffer = (uint64_t)mm0; \
\
mm7 = _mm_unpackhi_pi8(mm7, mm4); \
mm5 = _mm_unpackhi_pi8(mm5, mm1); \
mm7 = _mm_slli_pi16(mm7, 3); \
mm5 = _mm_or_si64(mm5, mm7); \
*(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
#define MMX_UNPACK_32_ARGB \
mm3 = _mm_setzero_si64(); \
mm4 = mm0; \
mm4 = _mm_unpacklo_pi8(mm4, mm2); \
mm5 = mm1; \
mm5 = _mm_unpacklo_pi8(mm5, mm3); \
mm6 = mm4; \
mm4 = _mm_unpacklo_pi16(mm4, mm5); \
*(uint64_t *)p_buffer = (uint64_t)mm4; \
mm6 = _mm_unpackhi_pi16(mm6, mm5); \
*(uint64_t *)(p_buffer + 2) = (uint64_t)mm6;\
mm0 = _mm_unpackhi_pi8(mm0, mm2); \
mm1 = _mm_unpackhi_pi8(mm1, mm3); \
mm5 = mm0; \
mm5 = _mm_unpacklo_pi16(mm5, mm1); \
*(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;\
mm0 = _mm_unpackhi_pi16(mm0, mm1); \
*(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
#define MMX_UNPACK_32_RGBA \
mm3 = _mm_setzero_si64(); \
mm4 = mm2; \
mm4 = _mm_unpacklo_pi8(mm4, mm1); \
mm3 = _mm_unpacklo_pi8(mm3, mm0); \
mm5 = mm3; \
mm3 = _mm_unpacklo_pi16(mm3, mm4); \
*(uint64_t *)p_buffer = (uint64_t)mm3; \
mm5 = _mm_unpackhi_pi16(mm5, mm4); \
*(uint64_t *)(p_buffer + 2) = (uint64_t)mm5;\
mm6 = _mm_setzero_si64(); \
mm2 = _mm_unpackhi_pi8(mm2, mm1); \
mm6 = _mm_unpackhi_pi8(mm6, mm0); \
mm0 = mm6; \
mm6 = _mm_unpacklo_pi16(mm6, mm2); \
*(uint64_t *)(p_buffer + 4) = (uint64_t)mm6;\
mm0 = _mm_unpackhi_pi16(mm0, mm2); \
*(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
#define MMX_UNPACK_32_BGRA \
mm3 = _mm_setzero_si64(); \
mm4 = mm2; \
mm4 = _mm_unpacklo_pi8(mm4, mm0); \
mm3 = _mm_unpacklo_pi8(mm3, mm1); \
mm5 = mm3; \
mm3 = _mm_unpacklo_pi16(mm3, mm4); \
*(uint64_t *)p_buffer = (uint64_t)mm3; \
mm5 = _mm_unpackhi_pi16(mm5, mm4); \
*(uint64_t *)(p_buffer + 2) = (uint64_t)mm5;\
mm6 = _mm_setzero_si64(); \
mm2 = _mm_unpackhi_pi8(mm2, mm0); \
mm6 = _mm_unpackhi_pi8(mm6, mm1); \
mm0 = mm6; \
mm6 = _mm_unpacklo_pi16(mm6, mm2); \
*(uint64_t *)(p_buffer + 4) = (uint64_t)mm6;\
mm0 = _mm_unpackhi_pi16(mm0, mm2); \
*(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
#define MMX_UNPACK_32_ABGR \
mm3 = _mm_setzero_si64(); \
mm4 = mm1; \
mm4 = _mm_unpacklo_pi8(mm4, mm2); \
mm5 = mm0; \
mm5 = _mm_unpacklo_pi8(mm5, mm3); \
mm6 = mm4; \
mm4 = _mm_unpacklo_pi16(mm4, mm5); \
*(uint64_t *)p_buffer = (uint64_t)mm4; \
mm6 = _mm_unpackhi_pi16(mm6, mm5); \
*(uint64_t *)(p_buffer + 2) = (uint64_t)mm6;\
mm1 = _mm_unpackhi_pi8(mm1, mm2); \
mm0 = _mm_unpackhi_pi8(mm0, mm3); \
mm2 = mm1; \
mm1 = _mm_unpacklo_pi16(mm1, mm0); \
*(uint64_t *)(p_buffer + 4) = (uint64_t)mm1;\
mm2 = _mm_unpackhi_pi16(mm2, mm0); \
*(uint64_t *)(p_buffer + 6) = (uint64_t)mm2;
#endif

View File

@ -46,6 +46,9 @@
#if defined (MODULE_NAME_IS_i420_yuy2)
# define DEST_FOURCC "YUY2,YUNV,YVYU,UYVY,UYNV,Y422,IUYV,Y211"
# define VLC_TARGET
#elif defined (MODULE_NAME_IS_i420_yuy2_mmx)
# define DEST_FOURCC "YUY2,YUNV,YVYU,UYVY,UYNV,Y422,IUYV"
# define VLC_TARGET VLC_MMX
#elif defined (MODULE_NAME_IS_i420_yuy2_sse2)
# define DEST_FOURCC "YUY2,YUNV,YVYU,UYVY,UYNV,Y422,IUYV"
# define VLC_TARGET VLC_SSE
@ -85,6 +88,10 @@ vlc_module_begin ()
set_description( N_("Conversions from " SRC_FOURCC " to " DEST_FOURCC) )
set_capability( "video converter", 80 )
# define vlc_CPU_capable() (true)
#elif defined (MODULE_NAME_IS_i420_yuy2_mmx)
set_description( N_("MMX conversions from " SRC_FOURCC " to " DEST_FOURCC) )
set_capability( "video converter", 160 )
# define vlc_CPU_capable() vlc_CPU_MMX()
#elif defined (MODULE_NAME_IS_i420_yuy2_sse2)
set_description( N_("SSE2 conversions from " SRC_FOURCC " to " DEST_FOURCC) )
set_capability( "video converter", 250 )
@ -305,10 +312,14 @@ static void I420_YUY2( filter_t *p_filter, picture_t *p_source,
for( i_x = (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) / 8; i_x-- ; )
{
#if !defined (MODULE_NAME_IS_i420_yuy2_mmx)
C_YUV420_YUYV( );
C_YUV420_YUYV( );
C_YUV420_YUYV( );
C_YUV420_YUYV( );
#else
MMX_CALL( MMX_YUV420_YUYV );
#endif
}
for( i_x = ( (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) % 8 ) / 2; i_x-- ; )
{
@ -321,6 +332,11 @@ static void I420_YUY2( filter_t *p_filter, picture_t *p_source,
p_line2 += i_dest_margin;
}
#if defined (MODULE_NAME_IS_i420_yuy2_mmx)
/* re-enable FPU registers */
MMX_END;
#endif
#if defined (MODULE_NAME_IS_i420_yuy2_altivec)
}
#endif
@ -567,10 +583,14 @@ static void I420_YVYU( filter_t *p_filter, picture_t *p_source,
for( i_x = (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) / 8 ; i_x-- ; )
{
#if !defined (MODULE_NAME_IS_i420_yuy2_mmx)
C_YUV420_YVYU( );
C_YUV420_YVYU( );
C_YUV420_YVYU( );
C_YUV420_YVYU( );
#else
MMX_CALL( MMX_YUV420_YVYU );
#endif
}
for( i_x = ( (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) % 8 ) / 2; i_x-- ; )
{
@ -583,6 +603,11 @@ static void I420_YVYU( filter_t *p_filter, picture_t *p_source,
p_line2 += i_dest_margin;
}
#if defined (MODULE_NAME_IS_i420_yuy2_mmx)
/* re-enable FPU registers */
MMX_END;
#endif
#if defined (MODULE_NAME_IS_i420_yuy2_altivec)
}
#endif
@ -829,10 +854,14 @@ static void I420_UYVY( filter_t *p_filter, picture_t *p_source,
for( i_x = (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) / 8 ; i_x-- ; )
{
#if !defined (MODULE_NAME_IS_i420_yuy2_mmx)
C_YUV420_UYVY( );
C_YUV420_UYVY( );
C_YUV420_UYVY( );
C_YUV420_UYVY( );
#else
MMX_CALL( MMX_YUV420_UYVY );
#endif
}
for( i_x = ( (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) % 8 ) / 2; i_x--; )
{
@ -845,6 +874,11 @@ static void I420_UYVY( filter_t *p_filter, picture_t *p_source,
p_line2 += i_dest_margin;
}
#if defined (MODULE_NAME_IS_i420_yuy2_mmx)
/* re-enable FPU registers */
MMX_END;
#endif
#if defined (MODULE_NAME_IS_i420_yuy2_altivec)
}
#endif

View File

@ -22,7 +22,153 @@
* Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
#if defined( MODULE_NAME_IS_i420_yuy2_sse2 )
#ifdef MODULE_NAME_IS_i420_yuy2_mmx
#if defined(CAN_COMPILE_MMX)
/* MMX assembly */
#define MMX_CALL(MMX_INSTRUCTIONS) \
do { \
__asm__ __volatile__( \
".p2align 3 \n\t \
movd (%0), %%mm1 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
movd (%1), %%mm2 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
movq (%2), %%mm0 # Load 8 Y1 y7 y6 y5 y4 y3 y2 y1 y0 \n\
movq (%3), %%mm3 # Load 8 Y2 Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
" \
: \
: "r" (p_u), "r" (p_v), \
"r" (p_y1), "r" (p_y2) \
: "mm0", "mm1", "mm2", "mm3"); \
__asm__ __volatile__( \
".p2align 3 \n\t" \
MMX_INSTRUCTIONS \
: \
: "r" (p_line1), "r" (p_line2) \
: "mm0", "mm1", "mm2", "mm3"); \
p_line1 += 16; p_line2 += 16; \
p_y1 += 8; p_y2 += 8; \
p_u += 4; p_v += 4; \
} while(0)
#define MMX_END __asm__ __volatile__ ( "emms" )
#define MMX_YUV420_YUYV " \n\
punpcklbw %%mm2, %%mm1 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
movq %%mm0, %%mm2 # y7 y6 y5 y4 y3 y2 y1 y0 \n\
punpcklbw %%mm1, %%mm2 # v1 y3 u1 y2 v0 y1 u0 y0 \n\
movq %%mm2, (%0) # Store low YUYV \n\
punpckhbw %%mm1, %%mm0 # v3 y7 u3 y6 v2 y5 u2 y4 \n\
movq %%mm0, 8(%0) # Store high YUYV \n\
movq %%mm3, %%mm4 # Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
punpcklbw %%mm1, %%mm4 # v1 Y3 u1 Y2 v0 Y1 u0 Y0 \n\
movq %%mm4, (%1) # Store low YUYV \n\
punpckhbw %%mm1, %%mm3 # v3 Y7 u3 Y6 v2 Y5 u2 Y4 \n\
movq %%mm3, 8(%1) # Store high YUYV \n\
"
#define MMX_YUV420_YVYU " \n\
punpcklbw %%mm1, %%mm2 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
movq %%mm0, %%mm1 # y7 y6 y5 y4 y3 y2 y1 y0 \n\
punpcklbw %%mm2, %%mm1 # u1 y3 v1 y2 u0 y1 v0 y0 \n\
movq %%mm1, (%0) # Store low YUYV \n\
punpckhbw %%mm2, %%mm0 # u3 y7 v3 y6 u2 y5 v2 y4 \n\
movq %%mm0, 8(%0) # Store high YUYV \n\
movq %%mm3, %%mm4 # Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
punpcklbw %%mm2, %%mm4 # u1 Y3 v1 Y2 u0 Y1 v0 Y0 \n\
movq %%mm4, (%1) # Store low YUYV \n\
punpckhbw %%mm2, %%mm3 # u3 Y7 v3 Y6 u2 Y5 v2 Y4 \n\
movq %%mm3, 8(%1) # Store high YUYV \n\
"
#define MMX_YUV420_UYVY " \n\
punpcklbw %%mm2, %%mm1 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
movq %%mm1, %%mm2 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
punpcklbw %%mm0, %%mm2 # y3 v1 y2 u1 y1 v0 y0 u0 \n\
movq %%mm2, (%0) # Store low UYVY \n\
movq %%mm1, %%mm2 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
punpckhbw %%mm0, %%mm2 # y7 v3 y6 u3 y5 v2 y4 u2 \n\
movq %%mm2, 8(%0) # Store high UYVY \n\
movq %%mm1, %%mm4 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
punpcklbw %%mm3, %%mm4 # Y3 v1 Y2 u1 Y1 v0 Y0 u0 \n\
movq %%mm4, (%1) # Store low UYVY \n\
punpckhbw %%mm3, %%mm1 # Y7 v3 Y6 u3 Y5 v2 Y4 u2 \n\
movq %%mm1, 8(%1) # Store high UYVY \n\
"
#elif defined(HAVE_MMX_INTRINSICS)
/* MMX intrinsics */
#include <mmintrin.h>
#define MMX_CALL(MMX_INSTRUCTIONS) \
do { \
__m64 mm0, mm1, mm2, mm3, mm4; \
MMX_INSTRUCTIONS \
p_line1 += 16; p_line2 += 16; \
p_y1 += 8; p_y2 += 8; \
p_u += 4; p_v += 4; \
} while(0)
#define MMX_END _mm_empty()
#define MMX_YUV420_YUYV \
mm1 = _mm_cvtsi32_si64(*(int*)p_u); \
mm2 = _mm_cvtsi32_si64(*(int*)p_v); \
mm0 = (__m64)*(uint64_t*)p_y1; \
mm3 = (__m64)*(uint64_t*)p_y2; \
mm1 = _mm_unpacklo_pi8(mm1, mm2); \
mm2 = mm0; \
mm2 = _mm_unpacklo_pi8(mm2, mm1); \
*(uint64_t*)p_line1 = (uint64_t)mm2; \
mm0 = _mm_unpackhi_pi8(mm0, mm1); \
*(uint64_t*)(p_line1+8) = (uint64_t)mm0;\
mm4 = mm3; \
mm4 = _mm_unpacklo_pi8(mm4, mm1); \
*(uint64_t*)p_line2 = (uint64_t)mm4; \
mm3 = _mm_unpackhi_pi8(mm3, mm1); \
*(uint64_t*)(p_line2+8) = (uint64_t)mm3;
#define MMX_YUV420_YVYU \
mm2 = _mm_cvtsi32_si64(*(int*)p_u); \
mm1 = _mm_cvtsi32_si64(*(int*)p_v); \
mm0 = (__m64)*(uint64_t*)p_y1; \
mm3 = (__m64)*(uint64_t*)p_y2; \
mm1 = _mm_unpacklo_pi8(mm1, mm2); \
mm2 = mm0; \
mm2 = _mm_unpacklo_pi8(mm2, mm1); \
*(uint64_t*)p_line1 = (uint64_t)mm2; \
mm0 = _mm_unpackhi_pi8(mm0, mm1); \
*(uint64_t*)(p_line1+8) = (uint64_t)mm0;\
mm4 = mm3; \
mm4 = _mm_unpacklo_pi8(mm4, mm1); \
*(uint64_t*)p_line2 = (uint64_t)mm4; \
mm3 = _mm_unpackhi_pi8(mm3, mm1); \
*(uint64_t*)(p_line2+8) = (uint64_t)mm3;
#define MMX_YUV420_UYVY \
mm1 = _mm_cvtsi32_si64(*(int*)p_u); \
mm2 = _mm_cvtsi32_si64(*(int*)p_v); \
mm0 = (__m64)*(uint64_t*)p_y1; \
mm3 = (__m64)*(uint64_t*)p_y2; \
mm1 = _mm_unpacklo_pi8(mm1, mm2); \
mm2 = mm1; \
mm2 = _mm_unpacklo_pi8(mm2, mm0); \
*(uint64_t*)p_line1 = (uint64_t)mm2; \
mm2 = mm1; \
mm2 = _mm_unpackhi_pi8(mm2, mm0); \
*(uint64_t*)(p_line1+8) = (uint64_t)mm2;\
mm4 = mm1; \
mm4 = _mm_unpacklo_pi8(mm4, mm3); \
*(uint64_t*)p_line2 = (uint64_t)mm4; \
mm1 = _mm_unpackhi_pi8(mm1, mm3); \
*(uint64_t*)(p_line2+8) = (uint64_t)mm1;
#endif
#elif defined( MODULE_NAME_IS_i420_yuy2_sse2 )
#if defined(CAN_COMPILE_SSE2)

View File

@ -71,6 +71,11 @@ vlc_module_begin ()
set_capability( "video converter", 80 )
# define vlc_CPU_capable() (true)
# define VLC_TARGET
#elif defined (MODULE_NAME_IS_i422_yuy2_mmx)
set_description( N_("MMX conversions from " SRC_FOURCC " to " DEST_FOURCC) )
set_capability( "video converter", 100 )
# define vlc_CPU_capable() vlc_CPU_MMX()
# define VLC_TARGET VLC_MMX
#elif defined (MODULE_NAME_IS_i422_yuy2_sse2)
set_description( N_("SSE2 conversions from " SRC_FOURCC " to " DEST_FOURCC) )
set_capability( "video converter", 120 )
@ -279,10 +284,14 @@ static void I422_YUY2( filter_t *p_filter, picture_t *p_source,
{
for( i_x = (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) / 8 ; i_x-- ; )
{
#if defined (MODULE_NAME_IS_i422_yuy2)
C_YUV422_YUYV( p_line, p_y, p_u, p_v );
C_YUV422_YUYV( p_line, p_y, p_u, p_v );
C_YUV422_YUYV( p_line, p_y, p_u, p_v );
C_YUV422_YUYV( p_line, p_y, p_u, p_v );
#elif defined (MODULE_NAME_IS_i422_yuy2_mmx)
MMX_CALL( MMX_YUV422_YUYV );
#endif
}
for( i_x = ( (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) % 8 ) / 2; i_x-- ; )
{
@ -293,6 +302,9 @@ static void I422_YUY2( filter_t *p_filter, picture_t *p_source,
p_v += i_source_margin_c;
p_line += i_dest_margin;
}
#if defined (MODULE_NAME_IS_i422_yuy2_mmx)
MMX_END;
#endif
#endif
}
@ -421,10 +433,14 @@ static void I422_YVYU( filter_t *p_filter, picture_t *p_source,
{
for( i_x = (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) / 8 ; i_x-- ; )
{
#if defined (MODULE_NAME_IS_i422_yuy2)
C_YUV422_YVYU( p_line, p_y, p_u, p_v );
C_YUV422_YVYU( p_line, p_y, p_u, p_v );
C_YUV422_YVYU( p_line, p_y, p_u, p_v );
C_YUV422_YVYU( p_line, p_y, p_u, p_v );
#elif defined (MODULE_NAME_IS_i422_yuy2_mmx)
MMX_CALL( MMX_YUV422_YVYU );
#endif
}
for( i_x = ( (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) % 8 ) / 2; i_x-- ; )
{
@ -435,6 +451,9 @@ static void I422_YVYU( filter_t *p_filter, picture_t *p_source,
p_v += i_source_margin_c;
p_line += i_dest_margin;
}
#if defined (MODULE_NAME_IS_i422_yuy2_mmx)
MMX_END;
#endif
#endif
}
@ -563,10 +582,14 @@ static void I422_UYVY( filter_t *p_filter, picture_t *p_source,
{
for( i_x = (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) / 8 ; i_x-- ; )
{
#if defined (MODULE_NAME_IS_i422_yuy2)
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
#elif defined (MODULE_NAME_IS_i422_yuy2_mmx)
MMX_CALL( MMX_YUV422_UYVY );
#endif
}
for( i_x = ( (p_filter->fmt_in.video.i_x_offset + p_filter->fmt_in.video.i_visible_width) % 8 ) / 2; i_x-- ; )
{
@ -577,6 +600,9 @@ static void I422_UYVY( filter_t *p_filter, picture_t *p_source,
p_v += i_source_margin_c;
p_line += i_dest_margin;
}
#if defined (MODULE_NAME_IS_i422_yuy2_mmx)
MMX_END;
#endif
#endif
}

View File

@ -22,7 +22,115 @@
* Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
#if defined( MODULE_NAME_IS_i422_yuy2_sse2 )
#ifdef MODULE_NAME_IS_i422_yuy2_mmx
#if defined(CAN_COMPILE_MMX)
/* MMX assembly */
#define MMX_CALL(MMX_INSTRUCTIONS) \
do { \
__asm__ __volatile__( \
".p2align 3 \n\t" \
MMX_INSTRUCTIONS \
: \
: "r" (p_line), "r" (p_y), \
"r" (p_u), "r" (p_v) \
: "mm0", "mm1", "mm2" ); \
p_line += 16; p_y += 8; \
p_u += 4; p_v += 4; \
} while(0)
#define MMX_END __asm__ __volatile__ ( "emms" )
#define MMX_YUV422_YUYV " \n\
movq (%1), %%mm0 # Load 8 Y y7 y6 y5 y4 y3 y2 y1 y0 \n\
movd (%2), %%mm1 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
movd (%3), %%mm2 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
punpcklbw %%mm2, %%mm1 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
movq %%mm0, %%mm2 # y7 y6 y5 y4 y3 y2 y1 y0 \n\
punpcklbw %%mm1, %%mm2 # v1 y3 u1 y2 v0 y1 u0 y0 \n\
movq %%mm2, (%0) # Store low YUYV \n\
punpckhbw %%mm1, %%mm0 # v3 y7 u3 y6 v2 y5 u2 y4 \n\
movq %%mm0, 8(%0) # Store high YUYV \n\
"
#define MMX_YUV422_YVYU " \n\
movq (%1), %%mm0 # Load 8 Y y7 y6 y5 y4 y3 y2 y1 y0 \n\
movd (%2), %%mm2 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
movd (%3), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
punpcklbw %%mm2, %%mm1 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
movq %%mm0, %%mm2 # y7 y6 y5 y4 y3 y2 y1 y0 \n\
punpcklbw %%mm1, %%mm2 # u1 y3 v1 y2 u0 y1 v0 y0 \n\
movq %%mm2, (%0) # Store low YUYV \n\
punpckhbw %%mm1, %%mm0 # u3 y7 v3 y6 u2 y5 v2 y4 \n\
movq %%mm0, 8(%0) # Store high YUYV \n\
"
#define MMX_YUV422_UYVY " \n\
movq (%1), %%mm0 # Load 8 Y y7 y6 y5 y4 y3 y2 y1 y0 \n\
movd (%2), %%mm1 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
movd (%3), %%mm2 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
punpcklbw %%mm2, %%mm1 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
movq %%mm1, %%mm2 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
punpcklbw %%mm0, %%mm2 # y3 v1 y2 u1 y1 v0 y0 u0 \n\
movq %%mm2, (%0) # Store low UYVY \n\
punpckhbw %%mm0, %%mm1 # y7 v3 y6 u3 y5 v2 y4 u2 \n\
movq %%mm1, 8(%0) # Store high UYVY \n\
"
#elif defined(HAVE_MMX_INTRINSICS)
/* MMX intrinsics */
#include <mmintrin.h>
#define MMX_CALL(MMX_INSTRUCTIONS) \
do { \
__m64 mm0, mm1, mm2; \
MMX_INSTRUCTIONS \
p_line += 16; p_y += 8; \
p_u += 4; p_v += 4; \
} while(0)
#define MMX_END _mm_empty()
#define MMX_YUV422_YUYV \
mm0 = (__m64)*(uint64_t*)p_y; \
mm1 = _mm_cvtsi32_si64(*(int*)p_u); \
mm2 = _mm_cvtsi32_si64(*(int*)p_v); \
mm1 = _mm_unpacklo_pi8(mm1, mm2); \
mm2 = mm0; \
mm2 = _mm_unpacklo_pi8(mm2, mm1); \
*(uint64_t*)p_line = (uint64_t)mm2; \
mm0 = _mm_unpackhi_pi8(mm0, mm1); \
*(uint64_t*)(p_line+8) = (uint64_t)mm0;
#define MMX_YUV422_YVYU \
mm0 = (__m64)*(uint64_t*)p_y; \
mm2 = _mm_cvtsi32_si64(*(int*)p_u); \
mm1 = _mm_cvtsi32_si64(*(int*)p_v); \
mm1 = _mm_unpacklo_pi8(mm1, mm2); \
mm2 = mm0; \
mm2 = _mm_unpacklo_pi8(mm2, mm1); \
*(uint64_t*)p_line = (uint64_t)mm2; \
mm0 = _mm_unpackhi_pi8(mm0, mm1); \
*(uint64_t*)(p_line+8) = (uint64_t)mm0;
#define MMX_YUV422_UYVY \
mm0 = (__m64)*(uint64_t*)p_y; \
mm1 = _mm_cvtsi32_si64(*(int*)p_u); \
mm2 = _mm_cvtsi32_si64(*(int*)p_v); \
mm1 = _mm_unpacklo_pi8(mm1, mm2); \
mm2 = mm1; \
mm2 = _mm_unpacklo_pi8(mm2, mm0); \
*(uint64_t*)p_line = (uint64_t)mm2; \
mm1 = _mm_unpackhi_pi8(mm1, mm0); \
*(uint64_t*)(p_line+8) = (uint64_t)mm1;
#endif
#elif defined( MODULE_NAME_IS_i422_yuy2_sse2 )
#if defined(CAN_COMPILE_SSE2)
@ -381,3 +489,4 @@ vmovdqu [[l]+32], ymm1 ; Store high UYVY \
*(p_line)++ = *(p_y); p_y += 2; \
*(p_line)++ = *(p_v) - 0x80; p_v += 2; \