Use "d" suffix for general-purpose registers used with movd.

This increases compatibilty with nasm and is also more consistent,
e.g. with h264_intrapred.asm and h264_chromamc.asm that already
do it that way.

Originally committed as revision 25042 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
Reimar Döffinger 2010-09-05 10:10:16 +00:00
parent 87db37356c
commit b1c32fb5e5
4 changed files with 30 additions and 30 deletions

View File

@ -40,9 +40,9 @@ SECTION .text
%macro WEIGHT_SETUP 0 %macro WEIGHT_SETUP 0
add r4, r4 add r4, r4
inc r4 inc r4
movd m3, r3 movd m3, r3d
movd m5, r4 movd m5, r4d
movd m6, r2 movd m6, r2d
pslld m5, m6 pslld m5, m6
psrld m5, 1 psrld m5, 1
%if mmsize == 16 %if mmsize == 16
@ -156,10 +156,10 @@ WEIGHT_FUNC_HALF_MM 8, 4, 16, 8, sse2
add r6, 1 add r6, 1
or r6, 1 or r6, 1
add r3, 1 add r3, 1
movd m3, r4 movd m3, r4d
movd m4, r5 movd m4, r5d
movd m5, r6 movd m5, r6d
movd m6, r3 movd m6, r3d
pslld m5, m6 pslld m5, m6
psrld m5, 1 psrld m5, 1
%if mmsize == 16 %if mmsize == 16
@ -291,10 +291,10 @@ BIWEIGHT_FUNC_HALF_MM 8, 4, 16, 8, sse2
add r6, 1 add r6, 1
or r6, 1 or r6, 1
add r3, 1 add r3, 1
movd m4, r4 movd m4, r4d
movd m0, r5 movd m0, r5d
movd m5, r6 movd m5, r6d
movd m6, r3 movd m6, r3d
pslld m5, m6 pslld m5, m6
psrld m5, 1 psrld m5, 1
punpcklbw m4, m0 punpcklbw m4, m0

View File

@ -36,7 +36,7 @@ section .text
%endmacro %endmacro
%macro STORE_4_WORDS_MMX 6 %macro STORE_4_WORDS_MMX 6
movd %6, %5 movd %6d, %5
%if mmsize==16 %if mmsize==16
psrldq %5, 4 psrldq %5, 4
%else %else
@ -45,7 +45,7 @@ section .text
mov %1, %6w mov %1, %6w
shr %6, 16 shr %6, 16
mov %2, %6w mov %2, %6w
movd %6, %5 movd %6d, %5
mov %3, %6w mov %3, %6w
shr %6, 16 shr %6, 16
mov %4, %6w mov %4, %6w
@ -88,7 +88,7 @@ section .text
pxor m7, m3 ; d_sign ^= a0_sign pxor m7, m3 ; d_sign ^= a0_sign
pxor m5, m5 pxor m5, m5
movd m3, r2 movd m3, r2d
%if %1 > 4 %if %1 > 4
punpcklbw m3, m3 punpcklbw m3, m3
%endif %endif

View File

@ -93,12 +93,12 @@ SECTION .text
%endmacro %endmacro
%macro STORE_4_WORDS 1 %macro STORE_4_WORDS 1
movd r2, %1 movd r2d, %1
mov [r0 -1], r2w mov [r0 -1], r2w
psrlq %1, 32 psrlq %1, 32
shr r2, 16 shr r2, 16
mov [r0+r1 -1], r2w mov [r0+r1 -1], r2w
movd r2, %1 movd r2d, %1
mov [r0+r1*2-1], r2w mov [r0+r1*2-1], r2w
shr r2, 16 shr r2, 16
mov [r0+r3 -1], r2w mov [r0+r3 -1], r2w
@ -606,7 +606,7 @@ cglobal vp3_idct_dc_add_mmx2, 3, 4
movsx r2, word [r2] movsx r2, word [r2]
add r2, 15 add r2, 15
sar r2, 5 sar r2, 5
movd m0, r2 movd m0, r2d
pshufw m0, m0, 0x0 pshufw m0, m0, 0x0
pxor m1, m1 pxor m1, m1
psubw m1, m0 psubw m1, m0

View File

@ -1342,7 +1342,7 @@ VP8_DC_WHT sse
psrldq m%2, 4 psrldq m%2, 4
%if %10 == 8 %if %10 == 8
movd [%5+%8*2], m%1 movd [%5+%8*2], m%1
movd %5, m%3 movd %5d, m%3
%endif %endif
psrldq m%3, 4 psrldq m%3, 4
psrldq m%4, 4 psrldq m%4, 4
@ -1379,26 +1379,26 @@ VP8_DC_WHT sse
; 4 is a pointer to the destination's 4th line ; 4 is a pointer to the destination's 4th line
; 5/6 is -stride and +stride ; 5/6 is -stride and +stride
%macro WRITE_2x4W 6 %macro WRITE_2x4W 6
movd %3, %1 movd %3d, %1
punpckhdq %1, %1 punpckhdq %1, %1
mov [%4+%5*4], %3w mov [%4+%5*4], %3w
shr %3, 16 shr %3, 16
add %4, %6 add %4, %6
mov [%4+%5*4], %3w mov [%4+%5*4], %3w
movd %3, %1 movd %3d, %1
add %4, %5 add %4, %5
mov [%4+%5*2], %3w mov [%4+%5*2], %3w
shr %3, 16 shr %3, 16
mov [%4+%5 ], %3w mov [%4+%5 ], %3w
movd %3, %2 movd %3d, %2
punpckhdq %2, %2 punpckhdq %2, %2
mov [%4 ], %3w mov [%4 ], %3w
shr %3, 16 shr %3, 16
mov [%4+%6 ], %3w mov [%4+%6 ], %3w
movd %3, %2 movd %3d, %2
add %4, %6 add %4, %6
mov [%4+%6 ], %3w mov [%4+%6 ], %3w
shr %3, 16 shr %3, 16
@ -1407,27 +1407,27 @@ VP8_DC_WHT sse
%endmacro %endmacro
%macro WRITE_8W_SSE2 5 %macro WRITE_8W_SSE2 5
movd %2, %1 movd %2d, %1
psrldq %1, 4 psrldq %1, 4
mov [%3+%4*4], %2w mov [%3+%4*4], %2w
shr %2, 16 shr %2, 16
add %3, %5 add %3, %5
mov [%3+%4*4], %2w mov [%3+%4*4], %2w
movd %2, %1 movd %2d, %1
psrldq %1, 4 psrldq %1, 4
add %3, %4 add %3, %4
mov [%3+%4*2], %2w mov [%3+%4*2], %2w
shr %2, 16 shr %2, 16
mov [%3+%4 ], %2w mov [%3+%4 ], %2w
movd %2, %1 movd %2d, %1
psrldq %1, 4 psrldq %1, 4
mov [%3 ], %2w mov [%3 ], %2w
shr %2, 16 shr %2, 16
mov [%3+%5 ], %2w mov [%3+%5 ], %2w
movd %2, %1 movd %2d, %1
add %3, %5 add %3, %5
mov [%3+%5 ], %2w mov [%3+%5 ], %2w
shr %2, 16 shr %2, 16
@ -1446,27 +1446,27 @@ VP8_DC_WHT sse
%endmacro %endmacro
%macro SPLATB_REG_MMX 2-3 %macro SPLATB_REG_MMX 2-3
movd %1, %2 movd %1, %2d
punpcklbw %1, %1 punpcklbw %1, %1
punpcklwd %1, %1 punpcklwd %1, %1
punpckldq %1, %1 punpckldq %1, %1
%endmacro %endmacro
%macro SPLATB_REG_MMXEXT 2-3 %macro SPLATB_REG_MMXEXT 2-3
movd %1, %2 movd %1, %2d
punpcklbw %1, %1 punpcklbw %1, %1
pshufw %1, %1, 0x0 pshufw %1, %1, 0x0
%endmacro %endmacro
%macro SPLATB_REG_SSE2 2-3 %macro SPLATB_REG_SSE2 2-3
movd %1, %2 movd %1, %2d
punpcklbw %1, %1 punpcklbw %1, %1
pshuflw %1, %1, 0x0 pshuflw %1, %1, 0x0
punpcklqdq %1, %1 punpcklqdq %1, %1
%endmacro %endmacro
%macro SPLATB_REG_SSSE3 3 %macro SPLATB_REG_SSSE3 3
movd %1, %2 movd %1, %2d
pshufb %1, %3 pshufb %1, %3
%endmacro %endmacro