Hello,

this patch adds required support for thiscall/fastcall
calling-convention of x86 Windows.  It is a prerequisit for fixing PR
libgcj/51500 for 32-bit Windows.  The default-calling-convention for
C++ none-static and none-variadic class-members is changed by 4.7 to
thiscall as described by vendors ABI defintion.

2012-01-25  Kai Tietz  <kti...@redhat.com>

        * src/libffi/src/x86/ffi.c (ffi_call_win32): Add new
        argument to prototype for specify calling-convention.
        (ffi_call): Add support for stdcall/thiscall convention.
        (ffi_raw_call): Likewise.
        * src/x86/ffitarget.h (ffi_abi): Add FFI_THISCALL and
        FFI_FASTCALL.
        * src/x86/win32.S (_ffi_call_win32): Add support for
        fastcall/thiscall calling-convention calls.

Tested for i686-w64-mingw32 and i686-pc-cygwin.  Ok for apply?

Regards,
Kai
ChangeLog

2012-01-25  Kai Tietz  <kti...@redhat.com>

        * src/libffi/src/x86/ffi.c (ffi_call_win32): Add new
        argument to prototype for specify calling-convention.
        (ffi_call): Add support for stdcall/thiscall convention.
        (ffi_raw_call): Likewise.
        * src/x86/ffitarget.h (ffi_abi): Add FFI_THISCALL and
        FFI_FASTCALL.
        * src/x86/win32.S (_ffi_call_win32): Add support for
        fastcall/thiscall calling-convention calls.

Index: gcc/libffi/src/x86/ffi.c
===================================================================
--- gcc.orig/libffi/src/x86/ffi.c
+++ gcc/libffi/src/x86/ffi.c
@@ -252,7 +252,7 @@ ffi_call_win64(void (*)(char *, extended
 #elif defined(X86_WIN32)
 extern void
 ffi_call_win32(void (*)(char *, extended_cif *), extended_cif *,
-               unsigned, unsigned, unsigned *, void (*fn)(void));
+               unsigned, unsigned, unsigned, unsigned *, void (*fn)(void));
 #else
 extern void ffi_call_SYSV(void (*)(char *, extended_cif *), extended_cif *,
                           unsigned, unsigned, unsigned *, void (*fn)(void));
@@ -316,8 +316,23 @@ void ffi_call(ffi_cif *cif, void (*fn)(v
 #elif defined(X86_WIN32)
     case FFI_SYSV:
     case FFI_STDCALL:
-      ffi_call_win32(ffi_prep_args, &ecif, cif->bytes, cif->flags,
-                     ecif.rvalue, fn);
+    case FFI_THISCALL:
+    case FFI_FASTCALL:
+      {
+       unsigned int abi = cif->abi;
+       size_t sz = 0;
+       unsigned int i;
+
+       for (i=0; i < cif->nargs && sz < 8;i++)
+         sz += (cif->arg_types[i]->size + 3) & ~3;
+       if (sz <= 4 && cif->nargs <= 1
+           && abi == FFI_FASTCALL)
+         abi = FFI_THISCALL;
+       if (!sz && cif->nargs == 0 && abi == FFI_THISCALL)
+         abi = FFI_STDCALL;
+        ffi_call_win32(ffi_prep_args, &ecif, abi, cif->bytes, cif->flags,
+                       ecif.rvalue, fn);
+      }
       break;
 #else
     case FFI_SYSV:
@@ -644,8 +659,22 @@ ffi_raw_call(ffi_cif *cif, void (*fn)(vo
 #ifdef X86_WIN32
     case FFI_SYSV:
     case FFI_STDCALL:
-      ffi_call_win32(ffi_prep_args_raw, &ecif, cif->bytes, cif->flags,
-                     ecif.rvalue, fn);
+    case FFI_THISCALL:
+    case FFI_FASTCALL:
+      {
+        unsigned int abi = cif->abi;
+        size_t sz = 0;
+        unsigned int i;
+
+        for (i=0; i < cif->nargs && sz < 8;i++)
+          sz += (cif->arg_types[i]->size + 3) & ~3;
+        if (sz <= 4 && cif->nargs < 2 && abi == FFI_FASTCALL)
+          abi = FFI_THISCALL;
+        if (!sz && cif->nargs == 0 && abi == FFI_THISCALL)
+          abi = FFI_STDCALL;
+        ffi_call_win32(ffi_prep_args, &ecif, abi, cif->bytes, cif->flags,
+                       ecif.rvalue, fn);
+      }
       break;
 #else
     case FFI_SYSV:
Index: gcc/libffi/src/x86/ffitarget.h
===================================================================
--- gcc.orig/libffi/src/x86/ffitarget.h
+++ gcc/libffi/src/x86/ffitarget.h
@@ -64,6 +64,8 @@ typedef enum ffi_abi {
 #ifdef X86_WIN32
   FFI_SYSV,
   FFI_STDCALL,
+  FFI_THISCALL,
+  FFI_FASTCALL,
   /* TODO: Add fastcall support for the sake of completeness */
   FFI_DEFAULT_ABI = FFI_SYSV,
 #endif
Index: gcc/libffi/src/x86/win32.S
===================================================================
--- gcc.orig/libffi/src/x86/win32.S
+++ gcc/libffi/src/x86/win32.S
@@ -45,6 +45,7 @@ _TEXT SEGMENT
 ffi_call_win32 PROC NEAR,
     ffi_prep_args : NEAR PTR DWORD,
     ecif          : NEAR PTR DWORD,
+    cif_abi       : DWORD,
     cif_bytes     : DWORD,
     cif_flags     : DWORD,
     rvalue        : NEAR PTR DWORD,
@@ -64,6 +65,19 @@ ffi_call_win32 PROC NEAR,
         ;; Return stack to previous state and call the function
         add  esp, 8
 
+       ;; Handle thiscall and fastcall
+       cmp cif_abi, 3 ;; FFI_THISCALL
+       jz do_thiscall
+       cmp cif_abi, 4 ;; FFI_FASTCALL
+       jnz do_stdcall
+       mov ecx, DWORD PTR [esp]
+       mov edx, DWORD PTR [esp+4]
+       add esp, 8
+       jmp do_stdcall
+do_thiscall:
+       mov ecx, DWORD PTR [esp]
+       add esp, 4
+do_stdcall:
         call fn
 
         ;; cdecl:   we restore esp in the epilogue, so there's no need to
@@ -405,7 +419,7 @@ _ffi_call_win32:
         movl  %esp,%ebp
 .LCFI1:
         # Make room for all of the new args.
-        movl  16(%ebp),%ecx                                                    
 
+        movl  20(%ebp),%ecx                                                    
 
         subl  %ecx,%esp
  
         movl  %esp,%eax
@@ -417,19 +431,34 @@ _ffi_call_win32:
  
         # Return stack to previous state and call the function
         addl  $8,%esp
- 
+
+       # Handle fastcall and thiscall
+       cmpl $3, 16(%ebp)  # FFI_THISCALL
+       jz .do_thiscall
+       cmpl $4, 16(%ebp) # FFI_FASTCALL
+       jnz .do_fncall
+       movl (%esp), %ecx
+       movl 4(%esp), %edx
+       addl $8, %esp
+       jmp .do_fncall
+.do_thiscall:
+       movl (%esp), %ecx
+       addl $4, %esp
+
+.do_fncall:
+        
         # FIXME: Align the stack to a 128-bit boundary to avoid
         # potential performance hits.
 
-        call  *28(%ebp)
+        call  *32(%ebp)
  
         # stdcall functions pop arguments off the stack themselves
 
         # Load %ecx with the return type code
-        movl  20(%ebp),%ecx
+        movl  24(%ebp),%ecx
  
         # If the return value pointer is NULL, assume no return value.
-        cmpl  $0,24(%ebp)
+        cmpl  $0,28(%ebp)
         jne   0f
  
         # Even if there is no space for the return value, we are
@@ -488,50 +517,50 @@ _ffi_call_win32:
 
 .Lretint:
         # Load %ecx with the pointer to storage for the return value
-        movl  24(%ebp),%ecx
+        movl  28(%ebp),%ecx
         movl  %eax,0(%ecx)
         jmp   .Lepilogue
  
 .Lretfloat:
          # Load %ecx with the pointer to storage for the return value
-        movl  24(%ebp),%ecx
+        movl  28(%ebp),%ecx
         fstps (%ecx)
         jmp   .Lepilogue
  
 .Lretdouble:
         # Load %ecx with the pointer to storage for the return value
-        movl  24(%ebp),%ecx
+        movl  28(%ebp),%ecx
         fstpl (%ecx)
         jmp   .Lepilogue
  
 .Lretlongdouble:
         # Load %ecx with the pointer to storage for the return value
-        movl  24(%ebp),%ecx
+        movl  28(%ebp),%ecx
         fstpt (%ecx)
         jmp   .Lepilogue
  
 .Lretint64:
         # Load %ecx with the pointer to storage for the return value
-        movl  24(%ebp),%ecx
+        movl  28(%ebp),%ecx
         movl  %eax,0(%ecx)
         movl  %edx,4(%ecx)
        jmp   .Lepilogue
 
 .Lretstruct1b:
         # Load %ecx with the pointer to storage for the return value
-        movl  24(%ebp),%ecx
+        movl  28(%ebp),%ecx
         movb  %al,0(%ecx)
         jmp   .Lepilogue
  
 .Lretstruct2b:
         # Load %ecx with the pointer to storage for the return value
-        movl  24(%ebp),%ecx
+        movl  28(%ebp),%ecx
         movw  %ax,0(%ecx)
         jmp   .Lepilogue
 
 .Lretstruct4b:
         # Load %ecx with the pointer to storage for the return value
-        movl  24(%ebp),%ecx
+        movl  28(%ebp),%ecx
         movl  %eax,0(%ecx)
         jmp   .Lepilogue
 

Reply via email to