1
mirror of https://github.com/mpv-player/mpv synced 2024-12-24 07:33:46 +01:00

Replace all occurrences of '__volatile__' and '__volatile' by plain 'volatile'.

We were using an inconsistent mix of the three variants and 'volatile' should
be the most correct and portable variant.


git-svn-id: svn://svn.mplayerhq.hu/mplayer/trunk@27791 b3059339-0415-0410-9bf9-f77b7e298cf2
This commit is contained in:
diego 2008-10-16 20:17:56 +00:00
parent 2195547220
commit 26b29f4f2d
25 changed files with 101 additions and 101 deletions

View File

@ -86,7 +86,7 @@ static unsigned int GetTimer(){
static inline unsigned long long int read_tsc( void )
{
unsigned long long int retval;
__asm__ __volatile ("rdtsc":"=A"(retval)::"memory");
__asm__ volatile ("rdtsc":"=A"(retval)::"memory");
return retval;
}

10
configure vendored
View File

@ -1595,7 +1595,7 @@ if x86 && test "$_runtime_cpudetection" = no ; then
void catch() { exit(1); }
int main(void) {
signal(SIGILL, catch);
__asm__ __volatile__ ("$3":::"memory"); return 0;
__asm__ volatile ("$3":::"memory"); return 0;
}
EOF
@ -2443,7 +2443,7 @@ if arm ; then
echocheck "ARMv5TE (Enhanced DSP Extensions)"
if test $_armv5te = "auto" ; then
cat > $TMPC << EOF
int main(void) { __asm__ __volatile__ ("qadd r0, r0, r0"); return 0; }
int main(void) { __asm__ volatile ("qadd r0, r0, r0"); return 0; }
EOF
_armv5te=no
cc_check && _armv5te=yes
@ -2453,7 +2453,7 @@ EOF
echocheck "ARMv6 (SIMD instructions)"
if test $_armv6 = "auto" ; then
cat > $TMPC << EOF
int main(void) { __asm__ __volatile__ ("sadd16 r0, r0, r0"); return 0; }
int main(void) { __asm__ volatile ("sadd16 r0, r0, r0"); return 0; }
EOF
_armv6=no
cc_check && _armv6=yes
@ -2463,7 +2463,7 @@ EOF
echocheck "ARM VFP"
if test $_armvfp = "auto" ; then
cat > $TMPC << EOF
int main(void) { __asm__ __volatile__ ("fadds s0, s0, s0"); return 0; }
int main(void) { __asm__ volatile ("fadds s0, s0, s0"); return 0; }
EOF
_armvfp=no
cc_check && _armvfp=yes
@ -2473,7 +2473,7 @@ EOF
echocheck "iWMMXt (Intel XScale SIMD instructions)"
if test $_iwmmxt = "auto" ; then
cat > $TMPC << EOF
int main(void) { __asm__ __volatile__ ("wunpckelub wr6, wr4"); return 0; }
int main(void) { __asm__ volatile ("wunpckelub wr6, wr4"); return 0; }
EOF
_iwmmxt=no
cc_check && _iwmmxt=yes

View File

@ -57,7 +57,7 @@ static int has_cpuid(void)
long a, c;
// code from libavcodec:
__asm__ __volatile__ (
__asm__ volatile (
/* See if CPUID instruction is supported ... */
/* ... Get copies of EFLAGS into eax and ecx */
"pushf\n\t"
@ -85,14 +85,14 @@ static void
do_cpuid(unsigned int ax, unsigned int *p)
{
#if 0
__asm__ __volatile(
__asm__ volatile(
"cpuid;"
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax)
);
#else
// code from libavcodec:
__asm__ __volatile__
__asm__ volatile
("mov %%"REG_b", %%"REG_S"\n\t"
"cpuid\n\t"
"xchg %%"REG_b", %%"REG_S
@ -400,7 +400,7 @@ static void check_os_katmai_support( void )
if ( gCpuCaps.hasSSE ) {
mp_msg(MSGT_CPUDETECT,MSGL_V, "Testing OS support for SSE... " );
exc_fil = SetUnhandledExceptionFilter(win32_sig_handler_sse);
__asm__ __volatile ("xorps %xmm0, %xmm0");
__asm__ volatile ("xorps %xmm0, %xmm0");
SetUnhandledExceptionFilter(exc_fil);
mp_msg(MSGT_CPUDETECT,MSGL_V, gCpuCaps.hasSSE ? "yes.\n" : "no!\n" );
}
@ -409,7 +409,7 @@ static void check_os_katmai_support( void )
if ( gCpuCaps.hasSSE ) {
mp_msg(MSGT_CPUDETECT,MSGL_V, "Testing OS support for SSE... " );
DosSetExceptionHandler( &RegRec );
__asm__ __volatile ("xorps %xmm0, %xmm0");
__asm__ volatile ("xorps %xmm0, %xmm0");
DosUnsetExceptionHandler( &RegRec );
mp_msg(MSGT_CPUDETECT,MSGL_V, gCpuCaps.hasSSE ? "yes.\n" : "no!\n" );
}
@ -432,8 +432,8 @@ static void check_os_katmai_support( void )
if ( gCpuCaps.hasSSE ) {
mp_msg(MSGT_CPUDETECT,MSGL_V, "Testing OS support for SSE... " );
// __asm__ __volatile ("xorps %%xmm0, %%xmm0");
__asm__ __volatile ("xorps %xmm0, %xmm0");
// __asm__ volatile ("xorps %%xmm0, %%xmm0");
__asm__ volatile ("xorps %xmm0, %xmm0");
mp_msg(MSGT_CPUDETECT,MSGL_V, gCpuCaps.hasSSE ? "yes.\n" : "no!\n" );
}

View File

@ -129,7 +129,7 @@ static int swap_fourcc __initdata = 0;
static inline double FastSin(double x)
{
register double res;
__asm__ __volatile("fsin":"=t"(res):"0"(x));
__asm__ volatile("fsin":"=t"(res):"0"(x));
return res;
}
#undef sin
@ -138,7 +138,7 @@ static inline double FastSin(double x)
static inline double FastCos(double x)
{
register double res;
__asm__ __volatile("fcos":"=t"(res):"0"(x));
__asm__ volatile("fcos":"=t"(res):"0"(x));
return res;
}
#undef cos

View File

@ -45,7 +45,7 @@ static void FFT_4_3DNOW(complex_t *x)
/* delta_p = 1 here */
/* x[k] = sum_{i=0..3} x[i] * w^{i*k}, w=e^{-2*pi/4}
*/
__asm__ __volatile__(
__asm__ volatile(
"movq 24(%1), %%mm3\n\t"
"movq 8(%1), %%mm1\n\t"
"pxor %2, %%mm3\n\t" /* mm3.re | -mm3.im */
@ -90,7 +90,7 @@ static void FFT_8_3DNOW(complex_t *x)
*/
complex_t wT1, wB1, wB2;
__asm__ __volatile__(
__asm__ volatile(
"movq 8(%2), %%mm0\n\t"
"movq 24(%2), %%mm1\n\t"
"movq %%mm0, %0\n\t" /* wT1 = x[1]; */
@ -99,7 +99,7 @@ static void FFT_8_3DNOW(complex_t *x)
:"r"(x)
:"memory");
__asm__ __volatile__(
__asm__ volatile(
"movq 16(%0), %%mm2\n\t"
"movq 32(%0), %%mm3\n\t"
"movq %%mm2, 8(%0)\n\t" /* x[1] = x[2]; */
@ -114,7 +114,7 @@ static void FFT_8_3DNOW(complex_t *x)
/* x[0] x[4] x[2] x[6] */
__asm__ __volatile__(
__asm__ volatile(
"movq 40(%1), %%mm0\n\t"
"movq %%mm0, %%mm3\n\t"
"movq 56(%1), %%mm1\n\t"
@ -153,7 +153,7 @@ static void FFT_8_3DNOW(complex_t *x)
:"memory");
/* x[1] x[5] */
__asm__ __volatile__ (
__asm__ volatile (
"movq %6, %%mm6\n\t"
"movq %5, %%mm7\n\t"
"movq %1, %%mm0\n\t"
@ -203,7 +203,7 @@ static void FFT_8_3DNOW(complex_t *x)
/* x[3] x[7] */
__asm__ __volatile__(
__asm__ volatile(
"movq %1, %%mm0\n\t"
#ifdef HAVE_3DNOWEX
"pswapd %3, %%mm1\n\t"
@ -358,13 +358,13 @@ imdct_do_512_3dnow
/* Pre IFFT complex multiply plus IFFT cmplx conjugate & reordering*/
#if 1
__asm__ __volatile__ (
__asm__ volatile (
"movq %0, %%mm7\n\t"
::"m"(x_plus_minus_3dnow)
:"memory");
for( i=0; i < 128; i++) {
int j = pm128[i];
__asm__ __volatile__ (
__asm__ volatile (
"movd %1, %%mm0\n\t"
"movd %3, %%mm1\n\t"
"punpckldq %2, %%mm0\n\t" /* mm0 = data[256-2*j-1] | data[2*j]*/
@ -394,7 +394,7 @@ imdct_do_512_3dnow
buf[i].im = (data[256-2*j-1] * xsin1[j] + data[2*j] * xcos1[j])*(-1.0);*/
}
#else
__asm__ __volatile__ ("femms":::"memory");
__asm__ volatile ("femms":::"memory");
for( i=0; i < 128; i++) {
/* z[i] = (X[256-2*i-1] + j * X[2*i]) * (xcos1[i] + j * xsin1[i]) ; */
int j= pm128[i];
@ -435,14 +435,14 @@ imdct_do_512_3dnow
/* Post IFFT complex multiply plus IFFT complex conjugate*/
#if 1
__asm__ __volatile__ (
__asm__ volatile (
"movq %0, %%mm7\n\t"
"movq %1, %%mm6\n\t"
::"m"(x_plus_minus_3dnow),
"m"(x_minus_plus_3dnow)
:"eax","memory");
for (i=0; i < 128; i++) {
__asm__ __volatile__ (
__asm__ volatile (
"movq %1, %%mm0\n\t" /* ac3_buf[i].re | ac3_buf[i].im */
"movq %%mm0, %%mm1\n\t" /* ac3_buf[i].re | ac3_buf[i].im */
#ifndef HAVE_3DNOWEX
@ -473,7 +473,7 @@ imdct_do_512_3dnow
ac3_buf[i].im =(tmp_a_r * ac3_xsin1[i]) - (tmp_a_i * ac3_xcos1[i]);*/
}
#else
__asm__ __volatile__ ("femms":::"memory");
__asm__ volatile ("femms":::"memory");
for( i=0; i < 128; i++) {
/* y[n] = z[n] * (xcos1[n] + j * xsin1[n]) ; */
tmp_a_r = buf[i].real;
@ -496,7 +496,7 @@ imdct_do_512_3dnow
);
for (i=0; i< 64; i++) {
/* merge two loops in one to enable working of 2 decoders */
__asm__ __volatile__ (
__asm__ volatile (
"movd 516(%1), %%mm0\n\t"
"movd (%1), %%mm1\n\t" /**data_ptr++=-buf[64+i].im**window_ptr+++*delay_ptr++;*/
"punpckldq (%2), %%mm0\n\t"/*data_ptr[128]=-buf[i].re*window_ptr[128]+delay_ptr[128];*/
@ -520,7 +520,7 @@ imdct_do_512_3dnow
}
window_ptr += 128;
#else
__asm__ __volatile__ ("femms":::"memory");
__asm__ volatile ("femms":::"memory");
for(i=0; i< 64; i++) {
*data_ptr++ = -buf[64+i].imag * *window_ptr++ + *delay_ptr++ + bias;
*data_ptr++ = buf[64-i-1].real * *window_ptr++ + *delay_ptr++ + bias;
@ -538,7 +538,7 @@ imdct_do_512_3dnow
for(i=0; i< 64; i++) {
/* merge two loops in one to enable working of 2 decoders */
window_ptr -=2;
__asm__ __volatile__(
__asm__ volatile(
"movd 508(%1), %%mm0\n\t"
"movd (%1), %%mm1\n\t"
"punpckldq (%2), %%mm0\n\t"
@ -565,9 +565,9 @@ imdct_do_512_3dnow
:"memory");
delay_ptr += 2;
}
__asm__ __volatile__ ("femms":::"memory");
__asm__ volatile ("femms":::"memory");
#else
__asm__ __volatile__ ("femms":::"memory");
__asm__ volatile ("femms":::"memory");
for(i=0; i< 64; i++) {
*delay_ptr++ = -buf[64+i].real * *--window_ptr;
*delay_ptr++ = buf[64-i-1].imag * *--window_ptr;

View File

@ -39,7 +39,7 @@ typedef struct
}i_cmplx_t;
#define TRANS_FILL_MM6_MM7_3DNOW()\
__asm__ __volatile__(\
__asm__ volatile(\
"movq %1, %%mm7\n\t"\
"movq %0, %%mm6\n\t"\
::"m"(x_plus_minus_3dnow),\
@ -66,7 +66,7 @@ typedef struct
#define TRANSZERO_3DNOW(A0,A4,A8,A12) \
{ \
__asm__ __volatile__(\
__asm__ volatile(\
"movq %4, %%mm0\n\t" /* mm0 = wTB[0]*/\
"movq %5, %%mm1\n\t" /* mm1 = wTB[k*2]*/ \
"movq %%mm0, %%mm5\n\t"/*u.re = wTB[0].re + wTB[k*2].re;*/\
@ -95,7 +95,7 @@ typedef struct
#define TRANSHALF_16_3DNOW(A2,A6,A10,A14)\
{\
__asm__ __volatile__(\
__asm__ volatile(\
"movq %4, %%mm0\n\t"/*u.re = wTB[2].im + wTB[2].re;*/\
"movq %%mm0, %%mm1\n\t"\
"pxor %%mm7, %%mm1\n\t"\
@ -136,7 +136,7 @@ typedef struct
#define TRANS_3DNOW(A1,A5,A9,A13,WT,WB,D,D3)\
{ \
__asm__ __volatile__(\
__asm__ volatile(\
"movq %1, %%mm4\n\t"\
"movq %%mm4, %%mm5\n\t"\
"punpckldq %%mm4, %%mm4\n\t"/*mm4 = D.re | D.re */\
@ -166,7 +166,7 @@ typedef struct
:\
:"m"(WT), "m"(D), "m"(WB), "m"(D3)\
:"memory");\
__asm__ __volatile__(\
__asm__ volatile(\
"movq %4, %%mm0\n\t"/* a1 = A1*/\
"movq %5, %%mm2\n\t"/* a1 = A5*/\
"movq %%mm0, %%mm1\n\t"\

View File

@ -374,10 +374,10 @@ void *decode_video(sh_video_t *sh_video, unsigned char *start, int in_size,
// some codecs are broken, and doesn't restore MMX state :(
// it happens usually with broken/damaged files.
if (gCpuCaps.has3DNow) {
__asm__ __volatile ("femms\n\t":::"memory");
__asm__ volatile ("femms\n\t":::"memory");
}
else if (gCpuCaps.hasMMX) {
__asm__ __volatile ("emms\n\t":::"memory");
__asm__ volatile ("emms\n\t":::"memory");
}
#endif

View File

@ -57,24 +57,24 @@ typedef union {
#define mmx_i2r(op,imm,reg) \
__asm__ __volatile__ (#op " %0, %%" #reg \
__asm__ volatile (#op " %0, %%" #reg \
: /* nothing */ \
: "i" (imm) )
#define mmx_m2r(op, mem, reg) \
__asm__ __volatile__ (#op " %0, %%" #reg \
__asm__ volatile (#op " %0, %%" #reg \
: /* nothing */ \
: "m" (mem))
#define mmx_r2m(op, reg, mem) \
__asm__ __volatile__ (#op " %%" #reg ", %0" \
__asm__ volatile (#op " %%" #reg ", %0" \
: "=m" (mem) \
: /* nothing */ )
#define mmx_r2r(op, regs, regd) \
__asm__ __volatile__ (#op " %" #regs ", %" #regd)
__asm__ volatile (#op " %" #regs ", %" #regd)
#define emms() __asm__ __volatile__ ("emms")
#define emms() __asm__ volatile ("emms")
#endif /* MPLAYER_MMX_H */

View File

@ -84,7 +84,7 @@ If you have questions please contact with me: Nick Kurshev: nickols_k@mail.ru.
#define small_memcpy(to,from,n)\
{\
register unsigned long int dummy;\
__asm__ __volatile__(\
__asm__ volatile(\
"rep; movsb"\
:"=&D"(to), "=&S"(from), "=&c"(dummy)\
/* It's most portable way to notify compiler */\
@ -153,7 +153,7 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
#endif
#ifndef HAVE_ONLY_MMX1
/* PREFETCH has effect even for MOVSB instruction ;) */
__asm__ __volatile__ (
__asm__ volatile (
PREFETCH" (%0)\n"
PREFETCH" 64(%0)\n"
PREFETCH" 128(%0)\n"
@ -188,7 +188,7 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
/* if SRC is misaligned */
for(; i>0; i--)
{
__asm__ __volatile__ (
__asm__ volatile (
PREFETCH" 320(%0)\n"
"movups (%0), %%xmm0\n"
"movups 16(%0), %%xmm1\n"
@ -210,7 +210,7 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
*/
for(; i>0; i--)
{
__asm__ __volatile__ (
__asm__ volatile (
PREFETCH" 320(%0)\n"
"movaps (%0), %%xmm0\n"
"movaps 16(%0), %%xmm1\n"
@ -228,7 +228,7 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
// Align destination at BLOCK_SIZE boundary
for(; ((int)to & (BLOCK_SIZE-1)) && i>0; i--)
{
__asm__ __volatile__ (
__asm__ volatile (
#ifndef HAVE_ONLY_MMX1
PREFETCH" 320(%0)\n"
#endif
@ -317,7 +317,7 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
for(; i>0; i--)
{
__asm__ __volatile__ (
__asm__ volatile (
#ifndef HAVE_ONLY_MMX1
PREFETCH" 320(%0)\n"
#endif
@ -346,11 +346,11 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
#ifdef HAVE_MMX2
/* since movntq is weakly-ordered, a "sfence"
* is needed to become ordered again. */
__asm__ __volatile__ ("sfence":::"memory");
__asm__ volatile ("sfence":::"memory");
#endif
#ifndef HAVE_SSE
/* enables to use FPU */
__asm__ __volatile__ (EMMS:::"memory");
__asm__ volatile (EMMS:::"memory");
#endif
}
/*
@ -405,7 +405,7 @@ static void * RENAME(mem2agpcpy)(void * to, const void * from, size_t len)
*/
for(; i>0; i--)
{
__asm__ __volatile__ (
__asm__ volatile (
PREFETCH" 320(%0)\n"
"movq (%0), %%mm0\n"
"movq 8(%0), %%mm1\n"
@ -430,10 +430,10 @@ static void * RENAME(mem2agpcpy)(void * to, const void * from, size_t len)
#ifdef HAVE_MMX2
/* since movntq is weakly-ordered, a "sfence"
* is needed to become ordered again. */
__asm__ __volatile__ ("sfence":::"memory");
__asm__ volatile ("sfence":::"memory");
#endif
/* enables to use FPU */
__asm__ __volatile__ (EMMS:::"memory");
__asm__ volatile (EMMS:::"memory");
}
/*
* Now do the tail of the block

View File

@ -52,7 +52,7 @@ extern char* def_path;
#else
// this asm code is no longer needed
#define STORE_ALL \
__asm__ __volatile__ ( \
__asm__ volatile ( \
"push %%ebx\n\t" \
"push %%ecx\n\t" \
"push %%edx\n\t" \
@ -60,7 +60,7 @@ extern char* def_path;
"push %%edi\n\t"::)
#define REST_ALL \
__asm__ __volatile__ ( \
__asm__ volatile ( \
"pop %%edi\n\t" \
"pop %%esi\n\t" \
"pop %%edx\n\t" \
@ -98,7 +98,7 @@ LRESULT WINAPI SendDriverMessage(HDRVR hDriver, UINT message,
#endif
if (!module || !module->hDriverModule || !module->DriverProc) return -1;
#ifndef __svr4__
__asm__ __volatile__ ("fsave (%0)\n\t": :"r"(&qw));
__asm__ volatile ("fsave (%0)\n\t": :"r"(&qw));
#endif
#ifdef WIN32_LOADER
@ -110,7 +110,7 @@ LRESULT WINAPI SendDriverMessage(HDRVR hDriver, UINT message,
REST_ALL;
#ifndef __svr4__
__asm__ __volatile__ ("frstor (%0)\n\t": :"r"(&qw));
__asm__ volatile ("frstor (%0)\n\t": :"r"(&qw));
#endif
#ifdef DETAILED_OUT

View File

@ -138,7 +138,7 @@ void Setup_FS_Segment(void)
{
unsigned int ldt_desc = LDT_SEL(fs_ldt);
__asm__ __volatile__(
__asm__ volatile(
"movl %0,%%eax; movw %%ax, %%fs" : : "r" (ldt_desc)
:"eax"
);
@ -154,7 +154,7 @@ static int LDT_Modify( int func, struct modify_ldt_ldt_s *ptr,
{
int res;
#ifdef __PIC__
__asm__ __volatile__( "pushl %%ebx\n\t"
__asm__ volatile( "pushl %%ebx\n\t"
"movl %2,%%ebx\n\t"
"int $0x80\n\t"
"popl %%ebx"
@ -165,7 +165,7 @@ static int LDT_Modify( int func, struct modify_ldt_ldt_s *ptr,
"d"(16)//sizeof(*ptr) from kernel point of view
:"esi" );
#else
__asm__ __volatile__("int $0x80"
__asm__ volatile("int $0x80"
: "=a" (res)
: "0" (__NR_modify_ldt),
"b" (func),

View File

@ -104,7 +104,7 @@ int main(int argc, char *argv[]){
printf("params: flags: %d, paramSize: %d, what: %d, params[0] = %x\n",
params->flags, params->paramSize, params->what, params->params[0]);
// __asm__ __volatile__ ("movl %%esp, %0\n\t" : "=a" (esp) :: "memory" );
// __asm__ volatile ("movl %%esp, %0\n\t" : "=a" (esp) :: "memory" );
// printf("ESP=%p\n",esp);
*((void**)0x62b7d640) = &x_table[0]; //malloc(0x00001837 * 4); // ugly hack?
@ -113,7 +113,7 @@ int main(int argc, char *argv[]){
ret = dispatcher(params, &globals);
// __asm__ __volatile__ ("movl %%esp, %0\n\t" : "=a" (esp) :: "memory" );
// __asm__ volatile ("movl %%esp, %0\n\t" : "=a" (esp) :: "memory" );
// printf("ESP=%p\n",esp);
printf("!!! CDComponentDispatch() => %d glob=%p\n",ret,globals);

View File

@ -79,7 +79,7 @@ char* def_path = WIN32_PATH;
static void do_cpuid(unsigned int ax, unsigned int *regs)
{
__asm__ __volatile__
__asm__ volatile
(
"pushl %%ebx; pushl %%ecx; pushl %%edx;"
".byte 0x0f, 0xa2;"
@ -95,7 +95,7 @@ static void do_cpuid(unsigned int ax, unsigned int *regs)
static unsigned int c_localcount_tsc()
{
int a;
__asm__ __volatile__
__asm__ volatile
(
"rdtsc\n\t"
:"=a"(a)
@ -106,7 +106,7 @@ static unsigned int c_localcount_tsc()
}
static void c_longcount_tsc(long long* z)
{
__asm__ __volatile__
__asm__ volatile
(
"pushl %%ebx\n\t"
"movl %%eax, %%ebx\n\t"
@ -2867,7 +2867,7 @@ static int WINAPI expIsBadStringPtrA(const char* string, int nchars)
static long WINAPI expInterlockedExchangeAdd( long* dest, long incr )
{
long ret;
__asm__ __volatile__
__asm__ volatile
(
"lock; xaddl %0,(%1)"
: "=r" (ret)
@ -4006,7 +4006,7 @@ static int exp_initterm(INITTERMFUNC *start, INITTERMFUNC *end)
// ok this trick with push/pop is necessary as otherwice
// edi/esi registers are being trashed
void* p = *start;
__asm__ __volatile__
__asm__ volatile
(
"pushl %%ebx \n\t"
"pushl %%ecx \n\t"
@ -4272,7 +4272,7 @@ static double expcos(double x)
static void explog10(void)
{
__asm__ __volatile__
__asm__ volatile
(
"fldl 8(%esp) \n\t"
"fldln2 \n\t"
@ -4283,7 +4283,7 @@ static void explog10(void)
static void expcos(void)
{
__asm__ __volatile__
__asm__ volatile
(
"fldl 8(%esp) \n\t"
"fcos \n\t"
@ -4300,7 +4300,7 @@ static void expcos(void)
static void exp_ftol(void)
{
__asm__ __volatile__
__asm__ volatile
(
"sub $12, %esp \n\t"
"fstcw -2(%ebp) \n\t"
@ -4319,8 +4319,8 @@ static void exp_ftol(void)
}
#define FPU_DOUBLES(var1,var2) double var1,var2; \
__asm__ __volatile__( "fstpl %0;fwait" : "=m" (var2) : ); \
__asm__ __volatile__( "fstpl %0;fwait" : "=m" (var1) : )
__asm__ volatile( "fstpl %0;fwait" : "=m" (var2) : ); \
__asm__ volatile( "fstpl %0;fwait" : "=m" (var1) : )
static double exp_CIpow(void)
{
@ -4361,7 +4361,7 @@ static int exp_setjmp3(void* jmpbuf, int x)
{
//dbgprintf("!!!!UNIMPLEMENTED: setjmp3(%p, %d) => 0\n", jmpbuf, x);
//return 0;
__asm__ __volatile__
__asm__ volatile
(
//"mov 4(%%esp), %%edx \n\t"
"mov (%%esp), %%eax \n\t"
@ -4383,7 +4383,7 @@ static int exp_setjmp3(void* jmpbuf, int x)
: "eax"
);
#if 1
__asm__ __volatile__
__asm__ volatile
(
"mov %%fs:0, %%eax \n\t" // unsure
"mov %%eax, 24(%%edx) \n\t"
@ -4762,7 +4762,7 @@ static double expfloor(double x)
}
#define FPU_DOUBLE(var) double var; \
__asm__ __volatile__( "fstpl %0;fwait" : "=m" (var) : )
__asm__ volatile( "fstpl %0;fwait" : "=m" (var) : )
static double exp_CIcos(void)
{

View File

@ -94,7 +94,7 @@ extern "C" {
# ifndef _EGCS_
#define __stdcall __attribute__((__stdcall__))
#define __cdecl __attribute__((__cdecl__))
# define RESTORE_ES __asm__ __volatile__("pushl %ds\n\tpopl %es")
# define RESTORE_ES __asm__ volatile("pushl %ds\n\tpopl %es")
# endif
# else
// # error You need gcc >= 2.7 to build Wine on a 386

View File

@ -39,7 +39,7 @@ void dct36_3dnow(real *inbuf, real *o1,
real *o2, real *wintab, real *tsbuf)
#endif
{
__asm__ __volatile__(
__asm__ volatile(
"movq (%%eax),%%mm0\n\t"
"movq 4(%%eax),%%mm1\n\t"
"pfadd %%mm1,%%mm0\n\t"

View File

@ -18,7 +18,7 @@ static float attribute_used plus_1f = 1.0;
void dct64_MMX_3dnow(short *a,short *b,real *c)
{
char tmp[256];
__asm__ __volatile(
__asm__ volatile(
" movl %2,%%eax\n\t"
" leal 128+%3,%%edx\n\t"

View File

@ -18,7 +18,7 @@ static float attribute_used plus_1f = 1.0;
void dct64_MMX_3dnowex(short *a,short *b,real *c)
{
char tmp[256];
__asm__ __volatile(
__asm__ volatile(
" movl %2,%%eax\n\t"
" leal 128+%3,%%edx\n\t"

View File

@ -9,7 +9,7 @@
void dct64_MMX(short *a,short *b,real *c)
{
char tmp[256];
__asm__ __volatile(
__asm__ volatile(
" movl %2,%%eax\n\t"
/* Phase 1*/
" flds (%%eax)\n\t"

View File

@ -41,7 +41,7 @@ int synth_1to1_pent(real *bandPtr, int channel, short *samples)
{
real tmp[3];
register int retval;
__asm__ __volatile(
__asm__ volatile(
" movl %%ebp,"MANGLE(saved_ebp)"\n\t"
" movl %1,%%eax\n\t"/*bandPtr*/
" movl %3,%%esi\n\t"

View File

@ -217,7 +217,7 @@ int synth_1to1_MMX(real *bandPtr, int channel, short *samples)
dct64_MMX_func(a, b, bandPtr);
window = mp3lib_decwins + 16 - bo1;
//printf("DEBUG: channel %d, bo %d, off %d\n", channel, bo, 16 - bo1);
__asm__ __volatile(
__asm__ volatile(
ASMALIGN(4)
".L03:\n\t"
"movq (%1),%%mm0\n\t"

View File

@ -372,7 +372,7 @@ static int nv_probe(int verbose, int force){
* PCI-Memory IO access macros.
*/
#define MEM_BARRIER() __asm__ __volatile__ ("" : : : "memory")
#define MEM_BARRIER() __asm__ volatile ("" : : : "memory")
#undef VID_WR08
#define VID_WR08(p,i,val) ({ MEM_BARRIER(); ((uint8_t *)(p))[(i)]=(val); })

View File

@ -196,7 +196,7 @@ do { \
#ifndef USE_RMW_CYCLES
/* Can be used to inhibit READ-MODIFY-WRITE cycles. On by default. */
#define MEM_BARRIER() __asm__ __volatile__ ("" : : : "memory")
#define MEM_BARRIER() __asm__ volatile ("" : : : "memory")
#undef VID_WR08
#define VID_WR08(p,i,val) ({ MEM_BARRIER(); ((uint8_t *)(p))[(i)]=(val); })

View File

@ -68,7 +68,7 @@ extern unsigned char *ioBase;
static __inline__ volatile void eieio()
{
__asm__ __volatile__ ("eieio");
__asm__ volatile ("eieio");
}
static __inline__ void outb(short port, unsigned char value)

View File

@ -68,37 +68,37 @@
static __inline__ void outb(unsigned long port, char val)
{
__asm__ __volatile__("stba %0, [%1] %2" : : "r" (val), "r" (port), "i" (ASI_PL));
__asm__ volatile("stba %0, [%1] %2" : : "r" (val), "r" (port), "i" (ASI_PL));
}
static __inline__ void outw(unsigned long port, char val)
{
__asm__ __volatile__("stha %0, [%1] %2" : : "r" (val), "r" (port), "i" (ASI_PL));
__asm__ volatile("stha %0, [%1] %2" : : "r" (val), "r" (port), "i" (ASI_PL));
}
static __inline__ void outl(unsigned long port, char val)
{
__asm__ __volatile__("sta %0, [%1] %2" : : "r" (val), "r" (port), "i" (ASI_PL));
__asm__ volatile("sta %0, [%1] %2" : : "r" (val), "r" (port), "i" (ASI_PL));
}
static __inline__ unsigned int inb(unsigned long port)
{
unsigned char ret;
__asm__ __volatile__("lduba [%1] %2, %0" : "=r" (ret) : "r" (port), "i" (ASI_PL));
__asm__ volatile("lduba [%1] %2, %0" : "=r" (ret) : "r" (port), "i" (ASI_PL));
return ret;
}
static __inline__ unsigned int inw(unsigned long port)
{
unsigned char ret;
__asm__ __volatile__("lduha [%1] %2, %0" : "=r" (ret) : "r" (port), "i" (ASI_PL));
__asm__ volatile("lduha [%1] %2, %0" : "=r" (ret) : "r" (port), "i" (ASI_PL));
return ret;
}
static __inline__ unsigned int inl(unsigned long port)
{
unsigned char ret;
__asm__ __volatile__("lda [%1] %2, %0" : "=r" (ret) : "r" (port), "i" (ASI_PL));
__asm__ volatile("lda [%1] %2, %0" : "=r" (ret) : "r" (port), "i" (ASI_PL));
return ret;
}

View File

@ -171,7 +171,7 @@ static __inline__ void outb(short port,char val)
}
else
#endif
__asm__ __volatile__("outb %0,%1" : :"a" (val), "d" (port));
__asm__ volatile("outb %0,%1" : :"a" (val), "d" (port));
return;
}
@ -199,7 +199,7 @@ static __inline__ void outw(short port,short val)
}
else
#endif
__asm__ __volatile__("outw %0,%1" : :"a" (val), "d" (port));
__asm__ volatile("outw %0,%1" : :"a" (val), "d" (port));
return;
}
@ -227,7 +227,7 @@ static __inline__ void outl(short port,unsigned int val)
}
else
#endif
__asm__ __volatile__("outl %0,%1" : :"a" (val), "d" (port));
__asm__ volatile("outl %0,%1" : :"a" (val), "d" (port));
return;
}
@ -255,7 +255,7 @@ static __inline__ unsigned int inb(short port)
}
else
#endif
__asm__ __volatile__("inb %1,%0" :
__asm__ volatile("inb %1,%0" :
"=a" (ret) :
"d" (port));
return ret;
@ -285,7 +285,7 @@ static __inline__ unsigned int inw(short port)
}
else
#endif
__asm__ __volatile__("inw %1,%0" :
__asm__ volatile("inw %1,%0" :
"=a" (ret) :
"d" (port));
return ret;
@ -315,7 +315,7 @@ static __inline__ unsigned int inl(short port)
}
else
#endif
__asm__ __volatile__("inl %1,%0" :
__asm__ volatile("inl %1,%0" :
"=a" (ret) :
"d" (port));
return ret;
@ -327,7 +327,7 @@ static __inline__ void intr_disable()
if (svgahelper_initialized == 1)
return;
#endif
__asm__ __volatile__("cli");
__asm__ volatile("cli");
}
static __inline__ void intr_enable()
@ -336,7 +336,7 @@ static __inline__ void intr_enable()
if (svgahelper_initialized == 1)
return;
#endif
__asm__ __volatile__("sti");
__asm__ volatile("sti");
}
#endif /* MPLAYER_ASMMACROS_X86_H */