OK here we go,

Included is a patch for MMX/MMX2/SSE optimized memcpy(). If you choose 
MY_CPU=i386 in makefile.unix, the code autodetects if your CPU & OS support 
the above extensions and enable appropriate optimization. Therefore, I assume 
it will run on any i386 CPU (according to the source, these memcpy() are 10% 
upto 500%, 5 times, faster than non-MMX optimized memcpy()).

The code is actually a rip-off from the xine source tree, which in turn is a 
rip off from mplayer code, which in turn is a rip off from linux kernel code. 
Don't you love open source software ?

I removed PowerPC optimized memcpy() from xine code because I have no machine 
to test.

BEFORE applying the patch, please run the following script to change almost 
all appearance of memcpy in mame source to xmame_fast_memcpy. Please run this 
in the parent directory of your xmame directory (xmame-0.61.1-pr9 in my 
case).

#!/bin/bash
# change all appearance of memcpy into xmame_fast_memcpy

for file in `find xmame-0.61.1-pr9 -type f -name '*.[ch]' | egrep -v 
"contrib|m68kmake|md5"`
do
        grep "memcpy" $file > /dev/null
        if [ $? = 0 ]; then
                sed -e "s/memcpy/xmame_fast_memcpy/g" $file > ./aaaaaaaa.aaa
                mv -f ./aaaaaaaa.aaa $file
        fi
done
# script ends here ########################


Now, you can patch the modified code with the following diff file. It adds two 
files fastmcpy.c and fastmcpy.h into src/unix, at the line 
#include "fastmcpy.h" into several main header files. It also add a call to 
xmame_probe_fast_memcpy() in main.c to probe the best memcpy() method.



diff -Naur xmame-0.61.1-pr9.save/src/driver.h xmame-0.61.1-pr9/src/driver.h
--- xmame-0.61.1-pr9.save/src/driver.h  2002-10-25 11:54:16.000000000 -0500
+++ xmame-0.61.1-pr9/src/driver.h       2002-11-12 09:31:04.000000000 -0600
@@ -68,6 +68,7 @@
 #include "cheat.h"
 #include "tilemap.h"
 #include "profiler.h"
+#include "fastmcpy.h"

 #ifdef MESS
 #include "messdrv.h"
diff -Naur xmame-0.61.1-pr9.save/src/mame.h xmame-0.61.1-pr9/src/mame.h
--- xmame-0.61.1-pr9.save/src/mame.h    2002-10-19 03:12:40.000000000 -0500
+++ xmame-0.61.1-pr9/src/mame.h 2002-11-12 09:31:04.000000000 -0600
@@ -16,6 +16,8 @@
 #include "osdepend.h"
 #include "drawgfx.h"
 #include "palette.h"
+#include "fastmcpy.h"
+

 extern char build_version[];

diff -Naur xmame-0.61.1-pr9.save/src/unix/Makefile 
xmame-0.61.1-pr9/src/unix/Makefile
--- xmame-0.61.1-pr9.save/src/unix/Makefile     2002-10-25 11:54:16.000000000 
-0500
+++ xmame-0.61.1-pr9/src/unix/Makefile  2002-11-12 09:31:04.000000000 -0600
@@ -7,7 +7,7 @@

 # common objs
 COMMON_OBJS  =  \
-       $(OBJDIR)/main.o $(OBJDIR)/sound.o \
+       $(OBJDIR)/main.o $(OBJDIR)/sound.o $(OBJDIR)/fastmcpy.o \
        $(OBJDIR)/keyboard.o $(OBJDIR)/devices.o \
        $(OBJDIR)/video.o $(OBJDIR)/mode.o \
        $(OBJDIR)/fileio.o $(OBJDIR)/dirio.o $(OBJDIR)/config.o \
diff -Naur xmame-0.61.1-pr9.save/src/unix/fastmcpy.c 
xmame-0.61.1-pr9/src/unix/fastmcpy.c
--- xmame-0.61.1-pr9.save/src/unix/fastmcpy.c   1969-12-31 18:00:00.000000000 
-0600
+++ xmame-0.61.1-pr9/src/unix/fastmcpy.c        2002-11-12 09:47:49.000000000 -0600
@@ -0,0 +1,574 @@
+/*
+ * Toan T Nguyen, November 2002
+ * 
+ * These are the MMX/MMX2/SSE optimized versions of memcpy
+ *
+ * This code was adapted from Linux Kernel sources by Nick Kurshev to
+ * the mplayer program. (http://mplayer.sourceforge.net)
+ *
+ * Miguel Freitas split the #ifdefs into several specialized functions that
+ * are benchmarked at runtime by xine. Some original comments from Nick
+ * have been preserved documenting some MMX/SSE oddities.
+ * Also added kernel memcpy function that seems faster than glibc one.
+ *
+ * This code was adapted to xmame by Toan T Nguyen. It combines the files
+ * memcpy.c and cpu_accel.c in the xine source tree. The ARCH_PPC code is 
+ * removed since I don't have a PowerPC to test.
+ *
+ */
+
+#include <sys/times.h>
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <setjmp.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include "fastmcpy.h"
+
+#ifdef X86_ASM
+
+#define ARCH_X86
+
+
+/* x86 accelerations */
+#define MM_ACCEL_X86_MMX        0x80000000
+#define MM_ACCEL_X86_3DNOW      0x40000000
+#define MM_ACCEL_X86_MMXEXT     0x20000000
+#define MM_ACCEL_X86_SSE        0x10000000
+#define MM_ACCEL_X86_SSE2       0x08000000
+/* x86 compat defines */
+#define MM_MMX                  MM_ACCEL_X86_MMX
+#define MM_3DNOW                MM_ACCEL_X86_3DNOW
+#define MM_MMXEXT               MM_ACCEL_X86_MMXEXT
+#define MM_SSE                  MM_ACCEL_X86_SSE
+#define MM_SSE2                 MM_ACCEL_X86_SSE2
+
+
+#ifdef ARCH_X86
+static uint32_t arch_accel (void)
+{
+  uint32_t eax, ebx, ecx, edx;
+  int AMD;
+  uint32_t caps;
+
+#ifndef PIC
+#define cpuid(op,eax,ebx,ecx,edx)       \
+    asm ("cpuid"                        \
+         : "=a" (eax),                  \
+           "=b" (ebx),                  \
+           "=c" (ecx),                  \
+           "=d" (edx)                   \
+         : "a" (op)                     \
+         : "cc")
+#else   // PIC version : save ebx
+#define cpuid(op,eax,ebx,ecx,edx)       \
+    asm ("pushl %%ebx\n\t"              \
+         "cpuid\n\t"                    \
+         "movl %%ebx,%1\n\t"            \
+         "popl %%ebx"                   \
+         : "=a" (eax),                  \
+           "=r" (ebx),                  \
+           "=c" (ecx),                  \
+           "=d" (edx)                   \
+         : "a" (op)                     \
+         : "cc")
+#endif
+
+  asm ("pushfl\n\t"
+       "pushfl\n\t"
+       "popl %0\n\t"
+       "movl %0,%1\n\t"
+       "xorl $0x200000,%0\n\t"
+       "pushl %0\n\t"
+       "popfl\n\t"
+       "pushfl\n\t"
+       "popl %0\n\t"
+       "popfl"
+       : "=r" (eax),
+       "=r" (ebx)
+       :
+       : "cc");
+
+  if (eax == ebx)             /* no cpuid */
+    return 0;
+
+  cpuid (0x00000000, eax, ebx, ecx, edx);
+  if (!eax)                   /* vendor string only */
+    return 0;
+
+  AMD = (ebx == 0x68747541) && (ecx == 0x444d4163) && (edx == 0x69746e65);
+
+  cpuid (0x00000001, eax, ebx, ecx, edx);
+  if (! (edx & 0x00800000))   /* no MMX */
+    return 0;
+
+  caps = MM_ACCEL_X86_MMX;
+  if (edx & 0x02000000)       /* SSE - identical to AMD MMX extensions */
+    caps |= MM_ACCEL_X86_SSE | MM_ACCEL_X86_MMXEXT;
+
+  if (edx & 0x04000000)       /* SSE2 */
+    caps |= MM_ACCEL_X86_SSE2;
+
+  cpuid (0x80000000, eax, ebx, ecx, edx);
+  if (eax < 0x80000001)       /* no extended capabilities */
+    return caps;
+
+  cpuid (0x80000001, eax, ebx, ecx, edx);
+
+  if (edx & 0x80000000)
+    caps |= MM_ACCEL_X86_3DNOW;
+
+  if (AMD && (edx & 0x00400000))      /* AMD MMX extensions */
+    caps |= MM_ACCEL_X86_MMXEXT;
+
+  return caps;
+}
+
+static jmp_buf sigill_return;
+
+static void sigill_handler (int n) {
+  printf ("cpu_accel: OS doesn't support SSE instructions.\n");
+  longjmp(sigill_return, 1);
+}
+#endif /* ARCH_X86 */
+
+uint32_t xmame_mm_accel (void)
+{
+#ifdef ARCH_X86
+  static uint32_t accel;
+
+  accel = arch_accel ();
+
+  /* test OS support for SSE */
+  if( accel & MM_ACCEL_X86_SSE ) {
+    if (setjmp(sigill_return)) {
+      accel &= ~(MM_ACCEL_X86_SSE|MM_ACCEL_X86_SSE2);
+    } else {
+      signal (SIGILL, sigill_handler);
+      __asm __volatile ("xorps %xmm0, %xmm0");
+      signal (SIGILL, SIG_DFL);
+    }
+  }
+
+  return accel;
+
+#else
+  return 0;
+#endif /* ARCH_X86 */
+}
+
+
+void *(* xmame_fast_memcpy)(void *to, const void *from, size_t len);
+
+/* Original comments from mplayer (file: aclib.c)
+ This part of code was taken by me from Linux-2.4.3 and slightly modified
+for MMX, MMX2, SSE instruction set. I have done it since linux uses page 
aligned
+blocks but mplayer uses weakly ordered data and original sources can not
+speedup them. Only using PREFETCHNTA and MOVNTQ together have effect!
+
+>From IA-32 Intel Architecture Software Developer's Manual Volume 1,
+
+Order Number 245470:
+"10.4.6. Cacheability Control, Prefetch, and Memory Ordering Instructions"
+
+Data referenced by a program can be temporal (data will be used again) or
+non-temporal (data will be referenced once and not reused in the immediate
+future). To make efficient use of the processor's caches, it is generally
+desirable to cache temporal data and not cache non-temporal data. Overloading
+the processor's caches with non-temporal data is sometimes referred to as
+"polluting the caches".
+The non-temporal data is written to memory with Write-Combining semantics.
+
+The PREFETCHh instructions permits a program to load data into the processor
+at a suggested cache level, so that it is closer to the processors load and
+store unit when it is needed. If the data is already present in a level of
+the cache hierarchy that is closer to the processor, the PREFETCHh 
instruction
+will not result in any data movement.
+But we should you PREFETCHNTA: Non-temporal data fetch data into location
+close to the processor, minimizing cache pollution.
+
+The MOVNTQ (store quadword using non-temporal hint) instruction stores
+packed integer data from an MMX register to memory, using a non-temporal 
hint.
+The MOVNTPS (store packed single-precision floating-point values using
+non-temporal hint) instruction stores packed floating-point data from an
+XMM register to memory, using a non-temporal hint.
+
+The SFENCE (Store Fence) instruction controls write ordering by creating a
+fence for memory store operations. This instruction guarantees that the 
results
+of every store instruction that precedes the store fence in program order is
+globally visible before any store instruction that follows the fence. The
+SFENCE instruction provides an efficient way of ensuring ordering between
+procedures that produce weakly-ordered data and procedures that consume that
+data.
+
+If you have questions please contact with me: Nick Kurshev: 
[EMAIL PROTECTED]
+*/
+
+/*  mmx v.1 Note: Since we added alignment of destinition it speedups
+    of memory copying on PentMMX, Celeron-1 and P2 upto 12% versus
+    standard (non MMX-optimized) version.
+    Note: on K6-2+ it speedups memory copying upto 25% and
+          on K7 and P3 about 500% (5 times). 
+*/
+
+/* Additional notes on gcc assembly and processors: [MF]
+prefetch is specific for AMD processors, the intel ones should be
+prefetch0, prefetch1, prefetch2 which are not recognized by my gcc.
+prefetchnta is supported both on athlon and pentium 3.
+
+therefore i will take off prefetchnta instructions from the mmx1 version
+to avoid problems on pentium mmx and k6-2.
+
+quote of the day:
+"Using prefetches efficiently is more of an art than a science"
+*/
+
+
+#ifdef ARCH_X86
+
+/* for small memory blocks (<256 bytes) this version is faster */
+#define small_memcpy(to,from,n)\
+{\
+register unsigned long int dummy;\
+__asm__ __volatile__(\
+  "rep; movsb"\
+  :"=&D"(to), "=&S"(from), "=&c"(dummy)\
+  :"0" (to), "1" (from),"2" (n)\
+  : "memory");\
+}
+
+/* linux kernel __memcpy (from: /include/asm/string.h) */
+static inline void * __memcpy(void * to, const void * from, size_t n)
+{
+int d0, d1, d2;
+
+  if( n < 4 ) {
+    small_memcpy(to,from,n);
+  }
+  else
+    __asm__ __volatile__(
+    "rep ; movsl\n\t"
+    "testb $2,%b4\n\t"
+    "je 1f\n\t"
+    "movsw\n"
+    "1:\ttestb $1,%b4\n\t"
+    "je 2f\n\t"
+    "movsb\n"
+    "2:"
+    : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+    :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
+    : "memory");
+  
+  return (to);
+}
+
+#define SSE_MMREG_SIZE 16
+#define MMX_MMREG_SIZE 8
+
+#define MMX1_MIN_LEN 0x800  /* 2K blocks */
+#define MIN_LEN 0x40  /* 64-byte blocks */
+
+/* SSE note: i tried to move 128 bytes a time instead of 64 but it
+didn't make any measureable difference. i'm using 64 for the sake of
+simplicity. [MF] */
+static void * sse_memcpy(void * to, const void * from, size_t len)
+{
+  void *retval;
+  size_t i;
+  retval = to;
+
+  /* PREFETCH has effect even for MOVSB instruction ;) */
+  __asm__ __volatile__ (
+    "   prefetchnta (%0)\n"
+    "   prefetchnta 64(%0)\n"
+    "   prefetchnta 128(%0)\n"
+    "   prefetchnta 192(%0)\n"
+    "   prefetchnta 256(%0)\n"
+    : : "r" (from) );
+
+  if(len >= MIN_LEN)
+  {
+    register unsigned long int delta;
+    /* Align destinition to MMREG_SIZE -boundary */
+    delta = ((unsigned long int)to)&(SSE_MMREG_SIZE-1);
+    if(delta)
+    {
+      delta=SSE_MMREG_SIZE-delta;
+      len -= delta;
+      small_memcpy(to, from, delta);
+    }
+    i = len >> 6; /* len/64 */
+    len&=63;
+    if(((unsigned long)from) & 15)
+      /* if SRC is misaligned */
+      for(; i>0; i--)
+      {
+        __asm__ __volatile__ (
+        "prefetchnta 320(%0)\n"
+        "movups (%0), %%xmm0\n"
+        "movups 16(%0), %%xmm1\n"
+        "movups 32(%0), %%xmm2\n"
+        "movups 48(%0), %%xmm3\n"
+        "movntps %%xmm0, (%1)\n"
+        "movntps %%xmm1, 16(%1)\n"
+        "movntps %%xmm2, 32(%1)\n"
+        "movntps %%xmm3, 48(%1)\n"
+        :: "r" (from), "r" (to) : "memory");
+        ((const unsigned char *)from)+=64;
+        ((unsigned char *)to)+=64;
+      }
+    else
+      /*
+         Only if SRC is aligned on 16-byte boundary.
+         It allows to use movaps instead of movups, which required data
+         to be aligned or a general-protection exception (#GP) is generated.
+      */
+      for(; i>0; i--)
+      {
+        __asm__ __volatile__ (
+        "prefetchnta 320(%0)\n"
+        "movaps (%0), %%xmm0\n"
+        "movaps 16(%0), %%xmm1\n"
+        "movaps 32(%0), %%xmm2\n"
+        "movaps 48(%0), %%xmm3\n"
+        "movntps %%xmm0, (%1)\n"
+        "movntps %%xmm1, 16(%1)\n"
+        "movntps %%xmm2, 32(%1)\n"
+        "movntps %%xmm3, 48(%1)\n"
+        :: "r" (from), "r" (to) : "memory");
+        ((const unsigned char *)from)+=64;
+        ((unsigned char *)to)+=64;
+      }
+    /* since movntq is weakly-ordered, a "sfence"
+     * is needed to become ordered again. */
+    __asm__ __volatile__ ("sfence":::"memory");
+    /* enables to use FPU */
+    __asm__ __volatile__ ("emms":::"memory");
+  }
+  /*
+   *   Now do the tail of the block
+   */
+  if(len) __memcpy(to, from, len);
+  return retval;
+}
+
+static void * mmx_memcpy(void * to, const void * from, size_t len)
+{
+  void *retval;
+  size_t i;
+  retval = to;
+
+  if(len >= MMX1_MIN_LEN)
+  {
+    register unsigned long int delta;
+    /* Align destinition to MMREG_SIZE -boundary */
+    delta = ((unsigned long int)to)&(MMX_MMREG_SIZE-1);
+    if(delta)
+    {
+      delta=MMX_MMREG_SIZE-delta;
+      len -= delta;
+      small_memcpy(to, from, delta);
+    }
+    i = len >> 6; /* len/64 */
+    len&=63;
+    for(; i>0; i--)
+    {
+      __asm__ __volatile__ (
+      "movq (%0), %%mm0\n"
+      "movq 8(%0), %%mm1\n"
+      "movq 16(%0), %%mm2\n"
+      "movq 24(%0), %%mm3\n"
+      "movq 32(%0), %%mm4\n"
+      "movq 40(%0), %%mm5\n"
+      "movq 48(%0), %%mm6\n"
+      "movq 56(%0), %%mm7\n"
+      "movq %%mm0, (%1)\n"
+      "movq %%mm1, 8(%1)\n"
+      "movq %%mm2, 16(%1)\n"
+      "movq %%mm3, 24(%1)\n"
+      "movq %%mm4, 32(%1)\n"
+      "movq %%mm5, 40(%1)\n"
+      "movq %%mm6, 48(%1)\n"
+      "movq %%mm7, 56(%1)\n"
+      :: "r" (from), "r" (to) : "memory");
+      ((const unsigned char *)from)+=64;
+      ((unsigned char *)to)+=64;
+    }
+    __asm__ __volatile__ ("emms":::"memory");
+  }
+  /*
+   *   Now do the tail of the block
+   */
+  if(len) __memcpy(to, from, len);
+  return retval;
+}
+
+void * mmx2_memcpy(void * to, const void * from, size_t len)
+{
+  void *retval;
+  size_t i;
+  retval = to;
+
+  /* PREFETCH has effect even for MOVSB instruction ;) */
+  __asm__ __volatile__ (
+    "   prefetchnta (%0)\n"
+    "   prefetchnta 64(%0)\n"
+    "   prefetchnta 128(%0)\n"
+    "   prefetchnta 192(%0)\n"
+    "   prefetchnta 256(%0)\n"
+    : : "r" (from) );
+
+  if(len >= MIN_LEN)
+  {
+    register unsigned long int delta;
+    /* Align destinition to MMREG_SIZE -boundary */
+    delta = ((unsigned long int)to)&(MMX_MMREG_SIZE-1);
+    if(delta)
+    {
+      delta=MMX_MMREG_SIZE-delta;
+      len -= delta;
+      small_memcpy(to, from, delta);
+    }
+    i = len >> 6; /* len/64 */
+    len&=63;
+    for(; i>0; i--)
+    {
+      __asm__ __volatile__ (
+      "prefetchnta 320(%0)\n"
+      "movq (%0), %%mm0\n"
+      "movq 8(%0), %%mm1\n"
+      "movq 16(%0), %%mm2\n"
+      "movq 24(%0), %%mm3\n"
+      "movq 32(%0), %%mm4\n"
+      "movq 40(%0), %%mm5\n"
+      "movq 48(%0), %%mm6\n"
+      "movq 56(%0), %%mm7\n"
+      "movntq %%mm0, (%1)\n"
+      "movntq %%mm1, 8(%1)\n"
+      "movntq %%mm2, 16(%1)\n"
+      "movntq %%mm3, 24(%1)\n"
+      "movntq %%mm4, 32(%1)\n"
+      "movntq %%mm5, 40(%1)\n"
+      "movntq %%mm6, 48(%1)\n"
+      "movntq %%mm7, 56(%1)\n"
+      :: "r" (from), "r" (to) : "memory");
+      ((const unsigned char *)from)+=64;
+      ((unsigned char *)to)+=64;
+    }
+     /* since movntq is weakly-ordered, a "sfence"
+     * is needed to become ordered again. */
+    __asm__ __volatile__ ("sfence":::"memory");
+    __asm__ __volatile__ ("emms":::"memory");
+  }
+  /*
+   *   Now do the tail of the block
+   */
+  if(len) __memcpy(to, from, len);
+  return retval;
+}
+
+static void *linux_kernel_memcpy(void *to, const void *from, size_t len) {
+  return __memcpy(to,from,len);
+}
+
+#endif /* ARCH_X86 */
+
+static struct {
+  char *name;
+  void *(* function)(void *to, const void *from, size_t len);
+  unsigned long long time;
+  uint32_t cpu_require;
+} memcpy_method[] = 
+{
+  { NULL, NULL, 0, 0 },
+  { "glibc memcpy()", memcpy, 0, 0 },
+#ifdef ARCH_X86
+  { "linux kernel memcpy()", linux_kernel_memcpy, 0, 0 },
+  { "MMX optimized memcpy()", mmx_memcpy, 0, MM_MMX },
+  { "MMXEXT optimized memcpy()", mmx2_memcpy, 0, MM_MMXEXT },
+  { "SSE optimized memcpy()", sse_memcpy, 0, MM_MMXEXT|MM_SSE },
+#endif /* ARCH_X86 */
+  { NULL, NULL, 0, 0 }
+};
+
+#ifdef ARCH_X86
+static unsigned long long int rdtsc()
+{
+  unsigned long long int x;
+  __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x));     
+  return x;
+}
+#else
+static unsigned long long int rdtsc()
+{
+#warning "Using rdtsc() hack (via times(NULL))"
+  return times(NULL);
+}
+#endif
+
+
+#define BUFSIZE 1024*1024
+void xmame_probe_fast_memcpy(void)
+{
+  unsigned long long t;
+  char *buf1, *buf2;
+  int i, j, best;
+  int config_flags = -1;
+  static char *memcpy_methods[] = {
+    "probe", "glibc",
+#ifdef ARCH_X86
+    "kernel", "mmx", "mmxext", "sse",
+#endif
+    NULL
+  };
+
+  config_flags = xmame_mm_accel();
+
+  best = 0;
+
+  xmame_fast_memcpy = memcpy;
+
+  if( (buf1 = malloc(BUFSIZE)) == NULL )
+    return;
+    
+  if( (buf2 = malloc(BUFSIZE)) == NULL ) {
+    free(buf1);
+    return;
+  }
+
+  printf("Benchmarking memcpy methods (smaller is better):\n");
+  /* make sure buffers are present on physical memory */
+  memcpy(buf1,buf2,BUFSIZE);
+
+  for(i=1; memcpy_method[i].name; i++)
+  {
+    if( (config_flags & memcpy_method[i].cpu_require) != 
+         memcpy_method[i].cpu_require )
+      continue;
+
+    t = rdtsc();
+    for(j=0;j<50;j++) {
+      memcpy_method[i].function(buf2,buf1,BUFSIZE);
+      memcpy_method[i].function(buf1,buf2,BUFSIZE);
+    }     
+
+    t = rdtsc() - t;
+    memcpy_method[i].time = t;
+    
+    printf("\t%s : %lld\n",memcpy_method[i].name, t);
+    
+    if( best == 0 || t < memcpy_method[best].time )
+      best = i;
+  }
+
+  printf("xmame: using %s\n", memcpy_method[best].name );
+  xmame_fast_memcpy = memcpy_method[best].function;
+
+  free(buf1);
+  free(buf2);
+}
+
+#endif /* X86_ASM */
diff -Naur xmame-0.61.1-pr9.save/src/unix/fastmcpy.h 
xmame-0.61.1-pr9/src/unix/fastmcpy.h
--- xmame-0.61.1-pr9.save/src/unix/fastmcpy.h   1969-12-31 18:00:00.000000000 
-0600
+++ xmame-0.61.1-pr9/src/unix/fastmcpy.h        2002-11-12 09:49:54.000000000 -0600
@@ -0,0 +1,19 @@
+
+#ifndef __FASTMCPY_H
+#define __FASTMCPY_H
+
+#ifdef X86_ASM
+
+/* Optimized/fast memcpy */
+extern void *(* xmame_fast_memcpy)(void *to, const void *from, size_t len);
+
+/* Benchmark available memcpy methods */
+void xmame_probe_fast_memcpy(void);
+
+#else
+
+#define xmame_fast_memcpy memcpy
+
+#endif
+
+#endif
diff -Naur xmame-0.61.1-pr9.save/src/unix/main.c 
xmame-0.61.1-pr9/src/unix/main.c
--- xmame-0.61.1-pr9.save/src/unix/main.c       2002-10-19 03:12:34.000000000 -0500
+++ xmame-0.61.1-pr9/src/unix/main.c    2002-11-12 09:48:38.000000000 -0600
@@ -53,6 +53,11 @@
        printf("Success!\n");
 #endif
 
+#ifdef X86_ASM
+       /* check the best method for memcpy() */
+       xmame_probe_fast_memcpy();
+#endif
+
        /* some display methods need to do some stuff with root rights */
        if (sysdep_init()!= OSD_OK) exit(OSD_NOT_OK);

diff -Naur xmame-0.61.1-pr9.save/src/unix/sysdep/sound_stream.h 
xmame-0.61.1-pr9/src/unix/sysdep/sound_stream.h
--- xmame-0.61.1-pr9.save/src/unix/sysdep/sound_stream.h        2002-05-02 
21:53:54.000000000 -0500
+++ xmame-0.61.1-pr9/src/unix/sysdep/sound_stream.h     2002-11-12 
09:31:04.000000000 -0600
@@ -20,6 +20,7 @@
 #ifndef __SOUND_STREAM_H
 #define __SOUND_STREAM_H
 
+#include "fastmcpy.h"
 #include "sysdep_dsp.h"
 #include "begin_code.h"
 
diff -Naur xmame-0.61.1-pr9.save/src/unix/xmame.h 
xmame-0.61.1-pr9/src/unix/xmame.h
--- xmame-0.61.1-pr9.save/src/unix/xmame.h      2002-10-19 03:12:06.000000000 -0500
+++ xmame-0.61.1-pr9/src/unix/xmame.h   2002-11-12 09:31:04.000000000 -0600
@@ -42,6 +42,7 @@
 #include "sysdep/sysdep_palette.h"
 #include "sysdep/rc.h"
 #include "sysdep/sound_stream.h"
+#include "fastmcpy.h"

 /*
  * Definitions.



_______________________________________________
Xmame mailing list
[EMAIL PROTECTED]
http://toybox.twisted.org.uk/mailman/listinfo/xmame

Reply via email to