1
mirror of https://github.com/mpv-player/mpv synced 2025-05-09 10:21:43 +02:00

Replace all occurrences of '__volatile__' and '__volatile' by plain 'volatile'.

We were using an inconsistent mix of the three variants and 'volatile' should
be the most correct and portable variant.


git-svn-id: svn://svn.mplayerhq.hu/mplayer/trunk@27791 b3059339-0415-0410-9bf9-f77b7e298cf2
This commit is contained in:
diego 2008-10-16 20:17:56 +00:00
parent 2195547220
commit 26b29f4f2d
25 changed files with 101 additions and 101 deletions

@ -86,7 +86,7 @@ static unsigned int GetTimer(){
static inline unsigned long long int read_tsc( void ) static inline unsigned long long int read_tsc( void )
{ {
unsigned long long int retval; unsigned long long int retval;
__asm__ __volatile ("rdtsc":"=A"(retval)::"memory"); __asm__ volatile ("rdtsc":"=A"(retval)::"memory");
return retval; return retval;
} }

10
configure vendored

@ -1595,7 +1595,7 @@ if x86 && test "$_runtime_cpudetection" = no ; then
void catch() { exit(1); } void catch() { exit(1); }
int main(void) { int main(void) {
signal(SIGILL, catch); signal(SIGILL, catch);
__asm__ __volatile__ ("$3":::"memory"); return 0; __asm__ volatile ("$3":::"memory"); return 0;
} }
EOF EOF
@ -2443,7 +2443,7 @@ if arm ; then
echocheck "ARMv5TE (Enhanced DSP Extensions)" echocheck "ARMv5TE (Enhanced DSP Extensions)"
if test $_armv5te = "auto" ; then if test $_armv5te = "auto" ; then
cat > $TMPC << EOF cat > $TMPC << EOF
int main(void) { __asm__ __volatile__ ("qadd r0, r0, r0"); return 0; } int main(void) { __asm__ volatile ("qadd r0, r0, r0"); return 0; }
EOF EOF
_armv5te=no _armv5te=no
cc_check && _armv5te=yes cc_check && _armv5te=yes
@ -2453,7 +2453,7 @@ EOF
echocheck "ARMv6 (SIMD instructions)" echocheck "ARMv6 (SIMD instructions)"
if test $_armv6 = "auto" ; then if test $_armv6 = "auto" ; then
cat > $TMPC << EOF cat > $TMPC << EOF
int main(void) { __asm__ __volatile__ ("sadd16 r0, r0, r0"); return 0; } int main(void) { __asm__ volatile ("sadd16 r0, r0, r0"); return 0; }
EOF EOF
_armv6=no _armv6=no
cc_check && _armv6=yes cc_check && _armv6=yes
@ -2463,7 +2463,7 @@ EOF
echocheck "ARM VFP" echocheck "ARM VFP"
if test $_armvfp = "auto" ; then if test $_armvfp = "auto" ; then
cat > $TMPC << EOF cat > $TMPC << EOF
int main(void) { __asm__ __volatile__ ("fadds s0, s0, s0"); return 0; } int main(void) { __asm__ volatile ("fadds s0, s0, s0"); return 0; }
EOF EOF
_armvfp=no _armvfp=no
cc_check && _armvfp=yes cc_check && _armvfp=yes
@ -2473,7 +2473,7 @@ EOF
echocheck "iWMMXt (Intel XScale SIMD instructions)" echocheck "iWMMXt (Intel XScale SIMD instructions)"
if test $_iwmmxt = "auto" ; then if test $_iwmmxt = "auto" ; then
cat > $TMPC << EOF cat > $TMPC << EOF
int main(void) { __asm__ __volatile__ ("wunpckelub wr6, wr4"); return 0; } int main(void) { __asm__ volatile ("wunpckelub wr6, wr4"); return 0; }
EOF EOF
_iwmmxt=no _iwmmxt=no
cc_check && _iwmmxt=yes cc_check && _iwmmxt=yes

@ -57,7 +57,7 @@ static int has_cpuid(void)
long a, c; long a, c;
// code from libavcodec: // code from libavcodec:
__asm__ __volatile__ ( __asm__ volatile (
/* See if CPUID instruction is supported ... */ /* See if CPUID instruction is supported ... */
/* ... Get copies of EFLAGS into eax and ecx */ /* ... Get copies of EFLAGS into eax and ecx */
"pushf\n\t" "pushf\n\t"
@ -85,14 +85,14 @@ static void
do_cpuid(unsigned int ax, unsigned int *p) do_cpuid(unsigned int ax, unsigned int *p)
{ {
#if 0 #if 0
__asm__ __volatile( __asm__ volatile(
"cpuid;" "cpuid;"
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax) : "0" (ax)
); );
#else #else
// code from libavcodec: // code from libavcodec:
__asm__ __volatile__ __asm__ volatile
("mov %%"REG_b", %%"REG_S"\n\t" ("mov %%"REG_b", %%"REG_S"\n\t"
"cpuid\n\t" "cpuid\n\t"
"xchg %%"REG_b", %%"REG_S "xchg %%"REG_b", %%"REG_S
@ -400,7 +400,7 @@ static void check_os_katmai_support( void )
if ( gCpuCaps.hasSSE ) { if ( gCpuCaps.hasSSE ) {
mp_msg(MSGT_CPUDETECT,MSGL_V, "Testing OS support for SSE... " ); mp_msg(MSGT_CPUDETECT,MSGL_V, "Testing OS support for SSE... " );
exc_fil = SetUnhandledExceptionFilter(win32_sig_handler_sse); exc_fil = SetUnhandledExceptionFilter(win32_sig_handler_sse);
__asm__ __volatile ("xorps %xmm0, %xmm0"); __asm__ volatile ("xorps %xmm0, %xmm0");
SetUnhandledExceptionFilter(exc_fil); SetUnhandledExceptionFilter(exc_fil);
mp_msg(MSGT_CPUDETECT,MSGL_V, gCpuCaps.hasSSE ? "yes.\n" : "no!\n" ); mp_msg(MSGT_CPUDETECT,MSGL_V, gCpuCaps.hasSSE ? "yes.\n" : "no!\n" );
} }
@ -409,7 +409,7 @@ static void check_os_katmai_support( void )
if ( gCpuCaps.hasSSE ) { if ( gCpuCaps.hasSSE ) {
mp_msg(MSGT_CPUDETECT,MSGL_V, "Testing OS support for SSE... " ); mp_msg(MSGT_CPUDETECT,MSGL_V, "Testing OS support for SSE... " );
DosSetExceptionHandler( &RegRec ); DosSetExceptionHandler( &RegRec );
__asm__ __volatile ("xorps %xmm0, %xmm0"); __asm__ volatile ("xorps %xmm0, %xmm0");
DosUnsetExceptionHandler( &RegRec ); DosUnsetExceptionHandler( &RegRec );
mp_msg(MSGT_CPUDETECT,MSGL_V, gCpuCaps.hasSSE ? "yes.\n" : "no!\n" ); mp_msg(MSGT_CPUDETECT,MSGL_V, gCpuCaps.hasSSE ? "yes.\n" : "no!\n" );
} }
@ -432,8 +432,8 @@ static void check_os_katmai_support( void )
if ( gCpuCaps.hasSSE ) { if ( gCpuCaps.hasSSE ) {
mp_msg(MSGT_CPUDETECT,MSGL_V, "Testing OS support for SSE... " ); mp_msg(MSGT_CPUDETECT,MSGL_V, "Testing OS support for SSE... " );
// __asm__ __volatile ("xorps %%xmm0, %%xmm0"); // __asm__ volatile ("xorps %%xmm0, %%xmm0");
__asm__ __volatile ("xorps %xmm0, %xmm0"); __asm__ volatile ("xorps %xmm0, %xmm0");
mp_msg(MSGT_CPUDETECT,MSGL_V, gCpuCaps.hasSSE ? "yes.\n" : "no!\n" ); mp_msg(MSGT_CPUDETECT,MSGL_V, gCpuCaps.hasSSE ? "yes.\n" : "no!\n" );
} }

@ -129,7 +129,7 @@ static int swap_fourcc __initdata = 0;
static inline double FastSin(double x) static inline double FastSin(double x)
{ {
register double res; register double res;
__asm__ __volatile("fsin":"=t"(res):"0"(x)); __asm__ volatile("fsin":"=t"(res):"0"(x));
return res; return res;
} }
#undef sin #undef sin
@ -138,7 +138,7 @@ static inline double FastSin(double x)
static inline double FastCos(double x) static inline double FastCos(double x)
{ {
register double res; register double res;
__asm__ __volatile("fcos":"=t"(res):"0"(x)); __asm__ volatile("fcos":"=t"(res):"0"(x));
return res; return res;
} }
#undef cos #undef cos

@ -45,7 +45,7 @@ static void FFT_4_3DNOW(complex_t *x)
/* delta_p = 1 here */ /* delta_p = 1 here */
/* x[k] = sum_{i=0..3} x[i] * w^{i*k}, w=e^{-2*pi/4} /* x[k] = sum_{i=0..3} x[i] * w^{i*k}, w=e^{-2*pi/4}
*/ */
__asm__ __volatile__( __asm__ volatile(
"movq 24(%1), %%mm3\n\t" "movq 24(%1), %%mm3\n\t"
"movq 8(%1), %%mm1\n\t" "movq 8(%1), %%mm1\n\t"
"pxor %2, %%mm3\n\t" /* mm3.re | -mm3.im */ "pxor %2, %%mm3\n\t" /* mm3.re | -mm3.im */
@ -90,7 +90,7 @@ static void FFT_8_3DNOW(complex_t *x)
*/ */
complex_t wT1, wB1, wB2; complex_t wT1, wB1, wB2;
__asm__ __volatile__( __asm__ volatile(
"movq 8(%2), %%mm0\n\t" "movq 8(%2), %%mm0\n\t"
"movq 24(%2), %%mm1\n\t" "movq 24(%2), %%mm1\n\t"
"movq %%mm0, %0\n\t" /* wT1 = x[1]; */ "movq %%mm0, %0\n\t" /* wT1 = x[1]; */
@ -99,7 +99,7 @@ static void FFT_8_3DNOW(complex_t *x)
:"r"(x) :"r"(x)
:"memory"); :"memory");
__asm__ __volatile__( __asm__ volatile(
"movq 16(%0), %%mm2\n\t" "movq 16(%0), %%mm2\n\t"
"movq 32(%0), %%mm3\n\t" "movq 32(%0), %%mm3\n\t"
"movq %%mm2, 8(%0)\n\t" /* x[1] = x[2]; */ "movq %%mm2, 8(%0)\n\t" /* x[1] = x[2]; */
@ -114,7 +114,7 @@ static void FFT_8_3DNOW(complex_t *x)
/* x[0] x[4] x[2] x[6] */ /* x[0] x[4] x[2] x[6] */
__asm__ __volatile__( __asm__ volatile(
"movq 40(%1), %%mm0\n\t" "movq 40(%1), %%mm0\n\t"
"movq %%mm0, %%mm3\n\t" "movq %%mm0, %%mm3\n\t"
"movq 56(%1), %%mm1\n\t" "movq 56(%1), %%mm1\n\t"
@ -153,7 +153,7 @@ static void FFT_8_3DNOW(complex_t *x)
:"memory"); :"memory");
/* x[1] x[5] */ /* x[1] x[5] */
__asm__ __volatile__ ( __asm__ volatile (
"movq %6, %%mm6\n\t" "movq %6, %%mm6\n\t"
"movq %5, %%mm7\n\t" "movq %5, %%mm7\n\t"
"movq %1, %%mm0\n\t" "movq %1, %%mm0\n\t"
@ -203,7 +203,7 @@ static void FFT_8_3DNOW(complex_t *x)
/* x[3] x[7] */ /* x[3] x[7] */
__asm__ __volatile__( __asm__ volatile(
"movq %1, %%mm0\n\t" "movq %1, %%mm0\n\t"
#ifdef HAVE_3DNOWEX #ifdef HAVE_3DNOWEX
"pswapd %3, %%mm1\n\t" "pswapd %3, %%mm1\n\t"
@ -358,13 +358,13 @@ imdct_do_512_3dnow
/* Pre IFFT complex multiply plus IFFT cmplx conjugate & reordering*/ /* Pre IFFT complex multiply plus IFFT cmplx conjugate & reordering*/
#if 1 #if 1
__asm__ __volatile__ ( __asm__ volatile (
"movq %0, %%mm7\n\t" "movq %0, %%mm7\n\t"
::"m"(x_plus_minus_3dnow) ::"m"(x_plus_minus_3dnow)
:"memory"); :"memory");
for( i=0; i < 128; i++) { for( i=0; i < 128; i++) {
int j = pm128[i]; int j = pm128[i];
__asm__ __volatile__ ( __asm__ volatile (
"movd %1, %%mm0\n\t" "movd %1, %%mm0\n\t"
"movd %3, %%mm1\n\t" "movd %3, %%mm1\n\t"
"punpckldq %2, %%mm0\n\t" /* mm0 = data[256-2*j-1] | data[2*j]*/ "punpckldq %2, %%mm0\n\t" /* mm0 = data[256-2*j-1] | data[2*j]*/
@ -394,7 +394,7 @@ imdct_do_512_3dnow
buf[i].im = (data[256-2*j-1] * xsin1[j] + data[2*j] * xcos1[j])*(-1.0);*/ buf[i].im = (data[256-2*j-1] * xsin1[j] + data[2*j] * xcos1[j])*(-1.0);*/
} }
#else #else
__asm__ __volatile__ ("femms":::"memory"); __asm__ volatile ("femms":::"memory");
for( i=0; i < 128; i++) { for( i=0; i < 128; i++) {
/* z[i] = (X[256-2*i-1] + j * X[2*i]) * (xcos1[i] + j * xsin1[i]) ; */ /* z[i] = (X[256-2*i-1] + j * X[2*i]) * (xcos1[i] + j * xsin1[i]) ; */
int j= pm128[i]; int j= pm128[i];
@ -435,14 +435,14 @@ imdct_do_512_3dnow
/* Post IFFT complex multiply plus IFFT complex conjugate*/ /* Post IFFT complex multiply plus IFFT complex conjugate*/
#if 1 #if 1
__asm__ __volatile__ ( __asm__ volatile (
"movq %0, %%mm7\n\t" "movq %0, %%mm7\n\t"
"movq %1, %%mm6\n\t" "movq %1, %%mm6\n\t"
::"m"(x_plus_minus_3dnow), ::"m"(x_plus_minus_3dnow),
"m"(x_minus_plus_3dnow) "m"(x_minus_plus_3dnow)
:"eax","memory"); :"eax","memory");
for (i=0; i < 128; i++) { for (i=0; i < 128; i++) {
__asm__ __volatile__ ( __asm__ volatile (
"movq %1, %%mm0\n\t" /* ac3_buf[i].re | ac3_buf[i].im */ "movq %1, %%mm0\n\t" /* ac3_buf[i].re | ac3_buf[i].im */
"movq %%mm0, %%mm1\n\t" /* ac3_buf[i].re | ac3_buf[i].im */ "movq %%mm0, %%mm1\n\t" /* ac3_buf[i].re | ac3_buf[i].im */
#ifndef HAVE_3DNOWEX #ifndef HAVE_3DNOWEX
@ -473,7 +473,7 @@ imdct_do_512_3dnow
ac3_buf[i].im =(tmp_a_r * ac3_xsin1[i]) - (tmp_a_i * ac3_xcos1[i]);*/ ac3_buf[i].im =(tmp_a_r * ac3_xsin1[i]) - (tmp_a_i * ac3_xcos1[i]);*/
} }
#else #else
__asm__ __volatile__ ("femms":::"memory"); __asm__ volatile ("femms":::"memory");
for( i=0; i < 128; i++) { for( i=0; i < 128; i++) {
/* y[n] = z[n] * (xcos1[n] + j * xsin1[n]) ; */ /* y[n] = z[n] * (xcos1[n] + j * xsin1[n]) ; */
tmp_a_r = buf[i].real; tmp_a_r = buf[i].real;
@ -496,7 +496,7 @@ imdct_do_512_3dnow
); );
for (i=0; i< 64; i++) { for (i=0; i< 64; i++) {
/* merge two loops in one to enable working of 2 decoders */ /* merge two loops in one to enable working of 2 decoders */
__asm__ __volatile__ ( __asm__ volatile (
"movd 516(%1), %%mm0\n\t" "movd 516(%1), %%mm0\n\t"
"movd (%1), %%mm1\n\t" /**data_ptr++=-buf[64+i].im**window_ptr+++*delay_ptr++;*/ "movd (%1), %%mm1\n\t" /**data_ptr++=-buf[64+i].im**window_ptr+++*delay_ptr++;*/
"punpckldq (%2), %%mm0\n\t"/*data_ptr[128]=-buf[i].re*window_ptr[128]+delay_ptr[128];*/ "punpckldq (%2), %%mm0\n\t"/*data_ptr[128]=-buf[i].re*window_ptr[128]+delay_ptr[128];*/
@ -520,7 +520,7 @@ imdct_do_512_3dnow
} }
window_ptr += 128; window_ptr += 128;
#else #else
__asm__ __volatile__ ("femms":::"memory"); __asm__ volatile ("femms":::"memory");
for(i=0; i< 64; i++) { for(i=0; i< 64; i++) {
*data_ptr++ = -buf[64+i].imag * *window_ptr++ + *delay_ptr++ + bias; *data_ptr++ = -buf[64+i].imag * *window_ptr++ + *delay_ptr++ + bias;
*data_ptr++ = buf[64-i-1].real * *window_ptr++ + *delay_ptr++ + bias; *data_ptr++ = buf[64-i-1].real * *window_ptr++ + *delay_ptr++ + bias;
@ -538,7 +538,7 @@ imdct_do_512_3dnow
for(i=0; i< 64; i++) { for(i=0; i< 64; i++) {
/* merge two loops in one to enable working of 2 decoders */ /* merge two loops in one to enable working of 2 decoders */
window_ptr -=2; window_ptr -=2;
__asm__ __volatile__( __asm__ volatile(
"movd 508(%1), %%mm0\n\t" "movd 508(%1), %%mm0\n\t"
"movd (%1), %%mm1\n\t" "movd (%1), %%mm1\n\t"
"punpckldq (%2), %%mm0\n\t" "punpckldq (%2), %%mm0\n\t"
@ -565,9 +565,9 @@ imdct_do_512_3dnow
:"memory"); :"memory");
delay_ptr += 2; delay_ptr += 2;
} }
__asm__ __volatile__ ("femms":::"memory"); __asm__ volatile ("femms":::"memory");
#else #else
__asm__ __volatile__ ("femms":::"memory"); __asm__ volatile ("femms":::"memory");
for(i=0; i< 64; i++) { for(i=0; i< 64; i++) {
*delay_ptr++ = -buf[64+i].real * *--window_ptr; *delay_ptr++ = -buf[64+i].real * *--window_ptr;
*delay_ptr++ = buf[64-i-1].imag * *--window_ptr; *delay_ptr++ = buf[64-i-1].imag * *--window_ptr;

@ -39,7 +39,7 @@ typedef struct
}i_cmplx_t; }i_cmplx_t;
#define TRANS_FILL_MM6_MM7_3DNOW()\ #define TRANS_FILL_MM6_MM7_3DNOW()\
__asm__ __volatile__(\ __asm__ volatile(\
"movq %1, %%mm7\n\t"\ "movq %1, %%mm7\n\t"\
"movq %0, %%mm6\n\t"\ "movq %0, %%mm6\n\t"\
::"m"(x_plus_minus_3dnow),\ ::"m"(x_plus_minus_3dnow),\
@ -66,7 +66,7 @@ typedef struct
#define TRANSZERO_3DNOW(A0,A4,A8,A12) \ #define TRANSZERO_3DNOW(A0,A4,A8,A12) \
{ \ { \
__asm__ __volatile__(\ __asm__ volatile(\
"movq %4, %%mm0\n\t" /* mm0 = wTB[0]*/\ "movq %4, %%mm0\n\t" /* mm0 = wTB[0]*/\
"movq %5, %%mm1\n\t" /* mm1 = wTB[k*2]*/ \ "movq %5, %%mm1\n\t" /* mm1 = wTB[k*2]*/ \
"movq %%mm0, %%mm5\n\t"/*u.re = wTB[0].re + wTB[k*2].re;*/\ "movq %%mm0, %%mm5\n\t"/*u.re = wTB[0].re + wTB[k*2].re;*/\
@ -95,7 +95,7 @@ typedef struct
#define TRANSHALF_16_3DNOW(A2,A6,A10,A14)\ #define TRANSHALF_16_3DNOW(A2,A6,A10,A14)\
{\ {\
__asm__ __volatile__(\ __asm__ volatile(\
"movq %4, %%mm0\n\t"/*u.re = wTB[2].im + wTB[2].re;*/\ "movq %4, %%mm0\n\t"/*u.re = wTB[2].im + wTB[2].re;*/\
"movq %%mm0, %%mm1\n\t"\ "movq %%mm0, %%mm1\n\t"\
"pxor %%mm7, %%mm1\n\t"\ "pxor %%mm7, %%mm1\n\t"\
@ -136,7 +136,7 @@ typedef struct
#define TRANS_3DNOW(A1,A5,A9,A13,WT,WB,D,D3)\ #define TRANS_3DNOW(A1,A5,A9,A13,WT,WB,D,D3)\
{ \ { \
__asm__ __volatile__(\ __asm__ volatile(\
"movq %1, %%mm4\n\t"\ "movq %1, %%mm4\n\t"\
"movq %%mm4, %%mm5\n\t"\ "movq %%mm4, %%mm5\n\t"\
"punpckldq %%mm4, %%mm4\n\t"/*mm4 = D.re | D.re */\ "punpckldq %%mm4, %%mm4\n\t"/*mm4 = D.re | D.re */\
@ -166,7 +166,7 @@ typedef struct
:\ :\
:"m"(WT), "m"(D), "m"(WB), "m"(D3)\ :"m"(WT), "m"(D), "m"(WB), "m"(D3)\
:"memory");\ :"memory");\
__asm__ __volatile__(\ __asm__ volatile(\
"movq %4, %%mm0\n\t"/* a1 = A1*/\ "movq %4, %%mm0\n\t"/* a1 = A1*/\
"movq %5, %%mm2\n\t"/* a1 = A5*/\ "movq %5, %%mm2\n\t"/* a1 = A5*/\
"movq %%mm0, %%mm1\n\t"\ "movq %%mm0, %%mm1\n\t"\

@ -374,10 +374,10 @@ void *decode_video(sh_video_t *sh_video, unsigned char *start, int in_size,
// some codecs are broken, and doesn't restore MMX state :( // some codecs are broken, and doesn't restore MMX state :(
// it happens usually with broken/damaged files. // it happens usually with broken/damaged files.
if (gCpuCaps.has3DNow) { if (gCpuCaps.has3DNow) {
__asm__ __volatile ("femms\n\t":::"memory"); __asm__ volatile ("femms\n\t":::"memory");
} }
else if (gCpuCaps.hasMMX) { else if (gCpuCaps.hasMMX) {
__asm__ __volatile ("emms\n\t":::"memory"); __asm__ volatile ("emms\n\t":::"memory");
} }
#endif #endif

@ -57,24 +57,24 @@ typedef union {
#define mmx_i2r(op,imm,reg) \ #define mmx_i2r(op,imm,reg) \
__asm__ __volatile__ (#op " %0, %%" #reg \ __asm__ volatile (#op " %0, %%" #reg \
: /* nothing */ \ : /* nothing */ \
: "i" (imm) ) : "i" (imm) )
#define mmx_m2r(op, mem, reg) \ #define mmx_m2r(op, mem, reg) \
__asm__ __volatile__ (#op " %0, %%" #reg \ __asm__ volatile (#op " %0, %%" #reg \
: /* nothing */ \ : /* nothing */ \
: "m" (mem)) : "m" (mem))
#define mmx_r2m(op, reg, mem) \ #define mmx_r2m(op, reg, mem) \
__asm__ __volatile__ (#op " %%" #reg ", %0" \ __asm__ volatile (#op " %%" #reg ", %0" \
: "=m" (mem) \ : "=m" (mem) \
: /* nothing */ ) : /* nothing */ )
#define mmx_r2r(op, regs, regd) \ #define mmx_r2r(op, regs, regd) \
__asm__ __volatile__ (#op " %" #regs ", %" #regd) __asm__ volatile (#op " %" #regs ", %" #regd)
#define emms() __asm__ __volatile__ ("emms") #define emms() __asm__ volatile ("emms")
#endif /* MPLAYER_MMX_H */ #endif /* MPLAYER_MMX_H */

@ -84,7 +84,7 @@ If you have questions please contact with me: Nick Kurshev: nickols_k@mail.ru.
#define small_memcpy(to,from,n)\ #define small_memcpy(to,from,n)\
{\ {\
register unsigned long int dummy;\ register unsigned long int dummy;\
__asm__ __volatile__(\ __asm__ volatile(\
"rep; movsb"\ "rep; movsb"\
:"=&D"(to), "=&S"(from), "=&c"(dummy)\ :"=&D"(to), "=&S"(from), "=&c"(dummy)\
/* It's most portable way to notify compiler */\ /* It's most portable way to notify compiler */\
@ -153,7 +153,7 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
#endif #endif
#ifndef HAVE_ONLY_MMX1 #ifndef HAVE_ONLY_MMX1
/* PREFETCH has effect even for MOVSB instruction ;) */ /* PREFETCH has effect even for MOVSB instruction ;) */
__asm__ __volatile__ ( __asm__ volatile (
PREFETCH" (%0)\n" PREFETCH" (%0)\n"
PREFETCH" 64(%0)\n" PREFETCH" 64(%0)\n"
PREFETCH" 128(%0)\n" PREFETCH" 128(%0)\n"
@ -188,7 +188,7 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
/* if SRC is misaligned */ /* if SRC is misaligned */
for(; i>0; i--) for(; i>0; i--)
{ {
__asm__ __volatile__ ( __asm__ volatile (
PREFETCH" 320(%0)\n" PREFETCH" 320(%0)\n"
"movups (%0), %%xmm0\n" "movups (%0), %%xmm0\n"
"movups 16(%0), %%xmm1\n" "movups 16(%0), %%xmm1\n"
@ -210,7 +210,7 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
*/ */
for(; i>0; i--) for(; i>0; i--)
{ {
__asm__ __volatile__ ( __asm__ volatile (
PREFETCH" 320(%0)\n" PREFETCH" 320(%0)\n"
"movaps (%0), %%xmm0\n" "movaps (%0), %%xmm0\n"
"movaps 16(%0), %%xmm1\n" "movaps 16(%0), %%xmm1\n"
@ -228,7 +228,7 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
// Align destination at BLOCK_SIZE boundary // Align destination at BLOCK_SIZE boundary
for(; ((int)to & (BLOCK_SIZE-1)) && i>0; i--) for(; ((int)to & (BLOCK_SIZE-1)) && i>0; i--)
{ {
__asm__ __volatile__ ( __asm__ volatile (
#ifndef HAVE_ONLY_MMX1 #ifndef HAVE_ONLY_MMX1
PREFETCH" 320(%0)\n" PREFETCH" 320(%0)\n"
#endif #endif
@ -317,7 +317,7 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
for(; i>0; i--) for(; i>0; i--)
{ {
__asm__ __volatile__ ( __asm__ volatile (
#ifndef HAVE_ONLY_MMX1 #ifndef HAVE_ONLY_MMX1
PREFETCH" 320(%0)\n" PREFETCH" 320(%0)\n"
#endif #endif
@ -346,11 +346,11 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
#ifdef HAVE_MMX2 #ifdef HAVE_MMX2
/* since movntq is weakly-ordered, a "sfence" /* since movntq is weakly-ordered, a "sfence"
* is needed to become ordered again. */ * is needed to become ordered again. */
__asm__ __volatile__ ("sfence":::"memory"); __asm__ volatile ("sfence":::"memory");
#endif #endif
#ifndef HAVE_SSE #ifndef HAVE_SSE
/* enables to use FPU */ /* enables to use FPU */
__asm__ __volatile__ (EMMS:::"memory"); __asm__ volatile (EMMS:::"memory");
#endif #endif
} }
/* /*
@ -405,7 +405,7 @@ static void * RENAME(mem2agpcpy)(void * to, const void * from, size_t len)
*/ */
for(; i>0; i--) for(; i>0; i--)
{ {
__asm__ __volatile__ ( __asm__ volatile (
PREFETCH" 320(%0)\n" PREFETCH" 320(%0)\n"
"movq (%0), %%mm0\n" "movq (%0), %%mm0\n"
"movq 8(%0), %%mm1\n" "movq 8(%0), %%mm1\n"
@ -430,10 +430,10 @@ static void * RENAME(mem2agpcpy)(void * to, const void * from, size_t len)
#ifdef HAVE_MMX2 #ifdef HAVE_MMX2
/* since movntq is weakly-ordered, a "sfence" /* since movntq is weakly-ordered, a "sfence"
* is needed to become ordered again. */ * is needed to become ordered again. */
__asm__ __volatile__ ("sfence":::"memory"); __asm__ volatile ("sfence":::"memory");
#endif #endif
/* enables to use FPU */ /* enables to use FPU */
__asm__ __volatile__ (EMMS:::"memory"); __asm__ volatile (EMMS:::"memory");
} }
/* /*
* Now do the tail of the block * Now do the tail of the block

@ -52,7 +52,7 @@ extern char* def_path;
#else #else
// this asm code is no longer needed // this asm code is no longer needed
#define STORE_ALL \ #define STORE_ALL \
__asm__ __volatile__ ( \ __asm__ volatile ( \
"push %%ebx\n\t" \ "push %%ebx\n\t" \
"push %%ecx\n\t" \ "push %%ecx\n\t" \
"push %%edx\n\t" \ "push %%edx\n\t" \
@ -60,7 +60,7 @@ extern char* def_path;
"push %%edi\n\t"::) "push %%edi\n\t"::)
#define REST_ALL \ #define REST_ALL \
__asm__ __volatile__ ( \ __asm__ volatile ( \
"pop %%edi\n\t" \ "pop %%edi\n\t" \
"pop %%esi\n\t" \ "pop %%esi\n\t" \
"pop %%edx\n\t" \ "pop %%edx\n\t" \
@ -98,7 +98,7 @@ LRESULT WINAPI SendDriverMessage(HDRVR hDriver, UINT message,
#endif #endif
if (!module || !module->hDriverModule || !module->DriverProc) return -1; if (!module || !module->hDriverModule || !module->DriverProc) return -1;
#ifndef __svr4__ #ifndef __svr4__
__asm__ __volatile__ ("fsave (%0)\n\t": :"r"(&qw)); __asm__ volatile ("fsave (%0)\n\t": :"r"(&qw));
#endif #endif
#ifdef WIN32_LOADER #ifdef WIN32_LOADER
@ -110,7 +110,7 @@ LRESULT WINAPI SendDriverMessage(HDRVR hDriver, UINT message,
REST_ALL; REST_ALL;
#ifndef __svr4__ #ifndef __svr4__
__asm__ __volatile__ ("frstor (%0)\n\t": :"r"(&qw)); __asm__ volatile ("frstor (%0)\n\t": :"r"(&qw));
#endif #endif
#ifdef DETAILED_OUT #ifdef DETAILED_OUT

@ -138,7 +138,7 @@ void Setup_FS_Segment(void)
{ {
unsigned int ldt_desc = LDT_SEL(fs_ldt); unsigned int ldt_desc = LDT_SEL(fs_ldt);
__asm__ __volatile__( __asm__ volatile(
"movl %0,%%eax; movw %%ax, %%fs" : : "r" (ldt_desc) "movl %0,%%eax; movw %%ax, %%fs" : : "r" (ldt_desc)
:"eax" :"eax"
); );
@ -154,7 +154,7 @@ static int LDT_Modify( int func, struct modify_ldt_ldt_s *ptr,
{ {
int res; int res;
#ifdef __PIC__ #ifdef __PIC__
__asm__ __volatile__( "pushl %%ebx\n\t" __asm__ volatile( "pushl %%ebx\n\t"
"movl %2,%%ebx\n\t" "movl %2,%%ebx\n\t"
"int $0x80\n\t" "int $0x80\n\t"
"popl %%ebx" "popl %%ebx"
@ -165,7 +165,7 @@ static int LDT_Modify( int func, struct modify_ldt_ldt_s *ptr,
"d"(16)//sizeof(*ptr) from kernel point of view "d"(16)//sizeof(*ptr) from kernel point of view
:"esi" ); :"esi" );
#else #else
__asm__ __volatile__("int $0x80" __asm__ volatile("int $0x80"
: "=a" (res) : "=a" (res)
: "0" (__NR_modify_ldt), : "0" (__NR_modify_ldt),
"b" (func), "b" (func),

@ -104,7 +104,7 @@ int main(int argc, char *argv[]){
printf("params: flags: %d, paramSize: %d, what: %d, params[0] = %x\n", printf("params: flags: %d, paramSize: %d, what: %d, params[0] = %x\n",
params->flags, params->paramSize, params->what, params->params[0]); params->flags, params->paramSize, params->what, params->params[0]);
// __asm__ __volatile__ ("movl %%esp, %0\n\t" : "=a" (esp) :: "memory" ); // __asm__ volatile ("movl %%esp, %0\n\t" : "=a" (esp) :: "memory" );
// printf("ESP=%p\n",esp); // printf("ESP=%p\n",esp);
*((void**)0x62b7d640) = &x_table[0]; //malloc(0x00001837 * 4); // ugly hack? *((void**)0x62b7d640) = &x_table[0]; //malloc(0x00001837 * 4); // ugly hack?
@ -113,7 +113,7 @@ int main(int argc, char *argv[]){
ret = dispatcher(params, &globals); ret = dispatcher(params, &globals);
// __asm__ __volatile__ ("movl %%esp, %0\n\t" : "=a" (esp) :: "memory" ); // __asm__ volatile ("movl %%esp, %0\n\t" : "=a" (esp) :: "memory" );
// printf("ESP=%p\n",esp); // printf("ESP=%p\n",esp);
printf("!!! CDComponentDispatch() => %d glob=%p\n",ret,globals); printf("!!! CDComponentDispatch() => %d glob=%p\n",ret,globals);

@ -79,7 +79,7 @@ char* def_path = WIN32_PATH;
static void do_cpuid(unsigned int ax, unsigned int *regs) static void do_cpuid(unsigned int ax, unsigned int *regs)
{ {
__asm__ __volatile__ __asm__ volatile
( (
"pushl %%ebx; pushl %%ecx; pushl %%edx;" "pushl %%ebx; pushl %%ecx; pushl %%edx;"
".byte 0x0f, 0xa2;" ".byte 0x0f, 0xa2;"
@ -95,7 +95,7 @@ static void do_cpuid(unsigned int ax, unsigned int *regs)
static unsigned int c_localcount_tsc() static unsigned int c_localcount_tsc()
{ {
int a; int a;
__asm__ __volatile__ __asm__ volatile
( (
"rdtsc\n\t" "rdtsc\n\t"
:"=a"(a) :"=a"(a)
@ -106,7 +106,7 @@ static unsigned int c_localcount_tsc()
} }
static void c_longcount_tsc(long long* z) static void c_longcount_tsc(long long* z)
{ {
__asm__ __volatile__ __asm__ volatile
( (
"pushl %%ebx\n\t" "pushl %%ebx\n\t"
"movl %%eax, %%ebx\n\t" "movl %%eax, %%ebx\n\t"
@ -2867,7 +2867,7 @@ static int WINAPI expIsBadStringPtrA(const char* string, int nchars)
static long WINAPI expInterlockedExchangeAdd( long* dest, long incr ) static long WINAPI expInterlockedExchangeAdd( long* dest, long incr )
{ {
long ret; long ret;
__asm__ __volatile__ __asm__ volatile
( (
"lock; xaddl %0,(%1)" "lock; xaddl %0,(%1)"
: "=r" (ret) : "=r" (ret)
@ -4006,7 +4006,7 @@ static int exp_initterm(INITTERMFUNC *start, INITTERMFUNC *end)
// ok this trick with push/pop is necessary as otherwice // ok this trick with push/pop is necessary as otherwice
// edi/esi registers are being trashed // edi/esi registers are being trashed
void* p = *start; void* p = *start;
__asm__ __volatile__ __asm__ volatile
( (
"pushl %%ebx \n\t" "pushl %%ebx \n\t"
"pushl %%ecx \n\t" "pushl %%ecx \n\t"
@ -4272,7 +4272,7 @@ static double expcos(double x)
static void explog10(void) static void explog10(void)
{ {
__asm__ __volatile__ __asm__ volatile
( (
"fldl 8(%esp) \n\t" "fldl 8(%esp) \n\t"
"fldln2 \n\t" "fldln2 \n\t"
@ -4283,7 +4283,7 @@ static void explog10(void)
static void expcos(void) static void expcos(void)
{ {
__asm__ __volatile__ __asm__ volatile
( (
"fldl 8(%esp) \n\t" "fldl 8(%esp) \n\t"
"fcos \n\t" "fcos \n\t"
@ -4300,7 +4300,7 @@ static void expcos(void)
static void exp_ftol(void) static void exp_ftol(void)
{ {
__asm__ __volatile__ __asm__ volatile
( (
"sub $12, %esp \n\t" "sub $12, %esp \n\t"
"fstcw -2(%ebp) \n\t" "fstcw -2(%ebp) \n\t"
@ -4319,8 +4319,8 @@ static void exp_ftol(void)
} }
#define FPU_DOUBLES(var1,var2) double var1,var2; \ #define FPU_DOUBLES(var1,var2) double var1,var2; \
__asm__ __volatile__( "fstpl %0;fwait" : "=m" (var2) : ); \ __asm__ volatile( "fstpl %0;fwait" : "=m" (var2) : ); \
__asm__ __volatile__( "fstpl %0;fwait" : "=m" (var1) : ) __asm__ volatile( "fstpl %0;fwait" : "=m" (var1) : )
static double exp_CIpow(void) static double exp_CIpow(void)
{ {
@ -4361,7 +4361,7 @@ static int exp_setjmp3(void* jmpbuf, int x)
{ {
//dbgprintf("!!!!UNIMPLEMENTED: setjmp3(%p, %d) => 0\n", jmpbuf, x); //dbgprintf("!!!!UNIMPLEMENTED: setjmp3(%p, %d) => 0\n", jmpbuf, x);
//return 0; //return 0;
__asm__ __volatile__ __asm__ volatile
( (
//"mov 4(%%esp), %%edx \n\t" //"mov 4(%%esp), %%edx \n\t"
"mov (%%esp), %%eax \n\t" "mov (%%esp), %%eax \n\t"
@ -4383,7 +4383,7 @@ static int exp_setjmp3(void* jmpbuf, int x)
: "eax" : "eax"
); );
#if 1 #if 1
__asm__ __volatile__ __asm__ volatile
( (
"mov %%fs:0, %%eax \n\t" // unsure "mov %%fs:0, %%eax \n\t" // unsure
"mov %%eax, 24(%%edx) \n\t" "mov %%eax, 24(%%edx) \n\t"
@ -4762,7 +4762,7 @@ static double expfloor(double x)
} }
#define FPU_DOUBLE(var) double var; \ #define FPU_DOUBLE(var) double var; \
__asm__ __volatile__( "fstpl %0;fwait" : "=m" (var) : ) __asm__ volatile( "fstpl %0;fwait" : "=m" (var) : )
static double exp_CIcos(void) static double exp_CIcos(void)
{ {

@ -94,7 +94,7 @@ extern "C" {
# ifndef _EGCS_ # ifndef _EGCS_
#define __stdcall __attribute__((__stdcall__)) #define __stdcall __attribute__((__stdcall__))
#define __cdecl __attribute__((__cdecl__)) #define __cdecl __attribute__((__cdecl__))
# define RESTORE_ES __asm__ __volatile__("pushl %ds\n\tpopl %es") # define RESTORE_ES __asm__ volatile("pushl %ds\n\tpopl %es")
# endif # endif
# else # else
// # error You need gcc >= 2.7 to build Wine on a 386 // # error You need gcc >= 2.7 to build Wine on a 386

@ -39,7 +39,7 @@ void dct36_3dnow(real *inbuf, real *o1,
real *o2, real *wintab, real *tsbuf) real *o2, real *wintab, real *tsbuf)
#endif #endif
{ {
__asm__ __volatile__( __asm__ volatile(
"movq (%%eax),%%mm0\n\t" "movq (%%eax),%%mm0\n\t"
"movq 4(%%eax),%%mm1\n\t" "movq 4(%%eax),%%mm1\n\t"
"pfadd %%mm1,%%mm0\n\t" "pfadd %%mm1,%%mm0\n\t"

@ -18,7 +18,7 @@ static float attribute_used plus_1f = 1.0;
void dct64_MMX_3dnow(short *a,short *b,real *c) void dct64_MMX_3dnow(short *a,short *b,real *c)
{ {
char tmp[256]; char tmp[256];
__asm__ __volatile( __asm__ volatile(
" movl %2,%%eax\n\t" " movl %2,%%eax\n\t"
" leal 128+%3,%%edx\n\t" " leal 128+%3,%%edx\n\t"

@ -18,7 +18,7 @@ static float attribute_used plus_1f = 1.0;
void dct64_MMX_3dnowex(short *a,short *b,real *c) void dct64_MMX_3dnowex(short *a,short *b,real *c)
{ {
char tmp[256]; char tmp[256];
__asm__ __volatile( __asm__ volatile(
" movl %2,%%eax\n\t" " movl %2,%%eax\n\t"
" leal 128+%3,%%edx\n\t" " leal 128+%3,%%edx\n\t"

@ -9,7 +9,7 @@
void dct64_MMX(short *a,short *b,real *c) void dct64_MMX(short *a,short *b,real *c)
{ {
char tmp[256]; char tmp[256];
__asm__ __volatile( __asm__ volatile(
" movl %2,%%eax\n\t" " movl %2,%%eax\n\t"
/* Phase 1*/ /* Phase 1*/
" flds (%%eax)\n\t" " flds (%%eax)\n\t"

@ -41,7 +41,7 @@ int synth_1to1_pent(real *bandPtr, int channel, short *samples)
{ {
real tmp[3]; real tmp[3];
register int retval; register int retval;
__asm__ __volatile( __asm__ volatile(
" movl %%ebp,"MANGLE(saved_ebp)"\n\t" " movl %%ebp,"MANGLE(saved_ebp)"\n\t"
" movl %1,%%eax\n\t"/*bandPtr*/ " movl %1,%%eax\n\t"/*bandPtr*/
" movl %3,%%esi\n\t" " movl %3,%%esi\n\t"

@ -217,7 +217,7 @@ int synth_1to1_MMX(real *bandPtr, int channel, short *samples)
dct64_MMX_func(a, b, bandPtr); dct64_MMX_func(a, b, bandPtr);
window = mp3lib_decwins + 16 - bo1; window = mp3lib_decwins + 16 - bo1;
//printf("DEBUG: channel %d, bo %d, off %d\n", channel, bo, 16 - bo1); //printf("DEBUG: channel %d, bo %d, off %d\n", channel, bo, 16 - bo1);
__asm__ __volatile( __asm__ volatile(
ASMALIGN(4) ASMALIGN(4)
".L03:\n\t" ".L03:\n\t"
"movq (%1),%%mm0\n\t" "movq (%1),%%mm0\n\t"

@ -372,7 +372,7 @@ static int nv_probe(int verbose, int force){
* PCI-Memory IO access macros. * PCI-Memory IO access macros.
*/ */
#define MEM_BARRIER() __asm__ __volatile__ ("" : : : "memory") #define MEM_BARRIER() __asm__ volatile ("" : : : "memory")
#undef VID_WR08 #undef VID_WR08
#define VID_WR08(p,i,val) ({ MEM_BARRIER(); ((uint8_t *)(p))[(i)]=(val); }) #define VID_WR08(p,i,val) ({ MEM_BARRIER(); ((uint8_t *)(p))[(i)]=(val); })

@ -196,7 +196,7 @@ do { \
#ifndef USE_RMW_CYCLES #ifndef USE_RMW_CYCLES
/* Can be used to inhibit READ-MODIFY-WRITE cycles. On by default. */ /* Can be used to inhibit READ-MODIFY-WRITE cycles. On by default. */
#define MEM_BARRIER() __asm__ __volatile__ ("" : : : "memory") #define MEM_BARRIER() __asm__ volatile ("" : : : "memory")
#undef VID_WR08 #undef VID_WR08
#define VID_WR08(p,i,val) ({ MEM_BARRIER(); ((uint8_t *)(p))[(i)]=(val); }) #define VID_WR08(p,i,val) ({ MEM_BARRIER(); ((uint8_t *)(p))[(i)]=(val); })

@ -68,7 +68,7 @@ extern unsigned char *ioBase;
static __inline__ volatile void eieio() static __inline__ volatile void eieio()
{ {
__asm__ __volatile__ ("eieio"); __asm__ volatile ("eieio");
} }
static __inline__ void outb(short port, unsigned char value) static __inline__ void outb(short port, unsigned char value)

@ -68,37 +68,37 @@
static __inline__ void outb(unsigned long port, char val) static __inline__ void outb(unsigned long port, char val)
{ {
__asm__ __volatile__("stba %0, [%1] %2" : : "r" (val), "r" (port), "i" (ASI_PL)); __asm__ volatile("stba %0, [%1] %2" : : "r" (val), "r" (port), "i" (ASI_PL));
} }
static __inline__ void outw(unsigned long port, char val) static __inline__ void outw(unsigned long port, char val)
{ {
__asm__ __volatile__("stha %0, [%1] %2" : : "r" (val), "r" (port), "i" (ASI_PL)); __asm__ volatile("stha %0, [%1] %2" : : "r" (val), "r" (port), "i" (ASI_PL));
} }
static __inline__ void outl(unsigned long port, char val) static __inline__ void outl(unsigned long port, char val)
{ {
__asm__ __volatile__("sta %0, [%1] %2" : : "r" (val), "r" (port), "i" (ASI_PL)); __asm__ volatile("sta %0, [%1] %2" : : "r" (val), "r" (port), "i" (ASI_PL));
} }
static __inline__ unsigned int inb(unsigned long port) static __inline__ unsigned int inb(unsigned long port)
{ {
unsigned char ret; unsigned char ret;
__asm__ __volatile__("lduba [%1] %2, %0" : "=r" (ret) : "r" (port), "i" (ASI_PL)); __asm__ volatile("lduba [%1] %2, %0" : "=r" (ret) : "r" (port), "i" (ASI_PL));
return ret; return ret;
} }
static __inline__ unsigned int inw(unsigned long port) static __inline__ unsigned int inw(unsigned long port)
{ {
unsigned char ret; unsigned char ret;
__asm__ __volatile__("lduha [%1] %2, %0" : "=r" (ret) : "r" (port), "i" (ASI_PL)); __asm__ volatile("lduha [%1] %2, %0" : "=r" (ret) : "r" (port), "i" (ASI_PL));
return ret; return ret;
} }
static __inline__ unsigned int inl(unsigned long port) static __inline__ unsigned int inl(unsigned long port)
{ {
unsigned char ret; unsigned char ret;
__asm__ __volatile__("lda [%1] %2, %0" : "=r" (ret) : "r" (port), "i" (ASI_PL)); __asm__ volatile("lda [%1] %2, %0" : "=r" (ret) : "r" (port), "i" (ASI_PL));
return ret; return ret;
} }

@ -171,7 +171,7 @@ static __inline__ void outb(short port,char val)
} }
else else
#endif #endif
__asm__ __volatile__("outb %0,%1" : :"a" (val), "d" (port)); __asm__ volatile("outb %0,%1" : :"a" (val), "d" (port));
return; return;
} }
@ -199,7 +199,7 @@ static __inline__ void outw(short port,short val)
} }
else else
#endif #endif
__asm__ __volatile__("outw %0,%1" : :"a" (val), "d" (port)); __asm__ volatile("outw %0,%1" : :"a" (val), "d" (port));
return; return;
} }
@ -227,7 +227,7 @@ static __inline__ void outl(short port,unsigned int val)
} }
else else
#endif #endif
__asm__ __volatile__("outl %0,%1" : :"a" (val), "d" (port)); __asm__ volatile("outl %0,%1" : :"a" (val), "d" (port));
return; return;
} }
@ -255,7 +255,7 @@ static __inline__ unsigned int inb(short port)
} }
else else
#endif #endif
__asm__ __volatile__("inb %1,%0" : __asm__ volatile("inb %1,%0" :
"=a" (ret) : "=a" (ret) :
"d" (port)); "d" (port));
return ret; return ret;
@ -285,7 +285,7 @@ static __inline__ unsigned int inw(short port)
} }
else else
#endif #endif
__asm__ __volatile__("inw %1,%0" : __asm__ volatile("inw %1,%0" :
"=a" (ret) : "=a" (ret) :
"d" (port)); "d" (port));
return ret; return ret;
@ -315,7 +315,7 @@ static __inline__ unsigned int inl(short port)
} }
else else
#endif #endif
__asm__ __volatile__("inl %1,%0" : __asm__ volatile("inl %1,%0" :
"=a" (ret) : "=a" (ret) :
"d" (port)); "d" (port));
return ret; return ret;
@ -327,7 +327,7 @@ static __inline__ void intr_disable()
if (svgahelper_initialized == 1) if (svgahelper_initialized == 1)
return; return;
#endif #endif
__asm__ __volatile__("cli"); __asm__ volatile("cli");
} }
static __inline__ void intr_enable() static __inline__ void intr_enable()
@ -336,7 +336,7 @@ static __inline__ void intr_enable()
if (svgahelper_initialized == 1) if (svgahelper_initialized == 1)
return; return;
#endif #endif
__asm__ __volatile__("sti"); __asm__ volatile("sti");
} }
#endif /* MPLAYER_ASMMACROS_X86_H */ #endif /* MPLAYER_ASMMACROS_X86_H */