Well, this is really a temporary workaround for people who like to run the latest git kernel from Linus, and it's only tested for ati-drivers-12.9_beta, and only on ~amd_64.
--- common/lib/modules/fglrx/build_mod/firegl_public.orig 2012-10-15 10:10:58.593454377 -0700 +++ common/lib/modules/fglrx/build_mod/firegl_public.c 2012-10-15 10:12:56.453972670 -0700 @@ -3892,7 +3892,7 @@ KCL_DEBUG_ERROR(REMAP_PAGE_RANGE_STR " failed\n"); return -EAGAIN; } - vma->vm_flags |= VM_SHM | VM_RESERVED; /* Don't swap */ + vma->vm_flags |= VM_SHM | VM_IO; /* Don't swap */ vma->vm_ops = &vm_ops; break; @@ -3922,14 +3922,14 @@ KCL_DEBUG_ERROR(REMAP_PAGE_RANGE_STR " failed\n"); return -EAGAIN; } - vma->vm_flags |= VM_SHM | VM_RESERVED; /* Don't swap */ + vma->vm_flags |= VM_SHM | VM_IO; /* Don't swap */ vma->vm_ops = &vm_ops; } break; #endif case __KE_SHM: - vma->vm_flags |= VM_SHM | VM_RESERVED; /* Don't swap */ + vma->vm_flags |= VM_SHM | VM_IO; /* Don't swap */ vma->vm_ops = &vm_shm_ops; break; @@ -3937,7 +3937,7 @@ pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - vma->vm_flags |= VM_RESERVED; + vma->vm_flags |= VM_IO; //vma->vm_flags |= VM_SHM | VM_LOCKED; /* DDDDDDDDDDon't swap */ //vma->vm_mm->locked_vm += pages; /* Kernel tracks aqmount of locked pages */ @@ -3946,14 +3946,14 @@ case __KE_CTX: pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - vma->vm_flags |= VM_LOCKED | VM_SHM | VM_RESERVED; /* Don't swap */ + vma->vm_flags |= VM_LOCKED | VM_SHM | VM_IO; /* Don't swap */ vma->vm_mm->locked_vm += pages; /* Kernel tracks aqmount of locked pages */ vma->vm_ops = &vm_ctx_ops; break; case __KE_PCI_BQS: pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - vma->vm_flags |= VM_LOCKED | VM_SHM | VM_RESERVED; /* Don't swap */ + vma->vm_flags |= VM_LOCKED | VM_SHM | VM_IO; /* Don't swap */ vma->vm_mm->locked_vm += pages; /* Kernel tracks aqmount of locked pages */ vma->vm_ops = &vm_pci_bq_ops; break; @@ -3984,9 +3984,9 @@ return -EAGAIN; } #ifdef __x86_64__ - vma->vm_flags |= VM_RESERVED; + vma->vm_flags |= VM_IO; #else - vma->vm_flags |= VM_SHM | VM_RESERVED; /* Don't swap */ + vma->vm_flags |= VM_SHM | VM_IO; /* Don't swap */ #endif vma->vm_ops = &vm_ops; } @@ -4015,9 +4015,9 @@ return -EAGAIN; } #ifdef __x86_64__ - vma->vm_flags |= VM_RESERVED; + vma->vm_flags |= VM_IO; #else - vma->vm_flags |= VM_SHM | VM_RESERVED; /* Don't swap */ + vma->vm_flags |= VM_SHM | VM_IO; /* Don't swap */ #endif vma->vm_ops = &vm_agp_bq_ops; } @@ -4025,7 +4025,7 @@ #endif /* __AGP__BUILTIN__ */ case __KE_KMAP: - vma->vm_flags |= VM_SHM | VM_RESERVED; + vma->vm_flags |= VM_SHM | VM_IO; vma->vm_ops = &vm_kmap_ops; if (readonly && (vma->vm_flags & VM_WRITE)) { @@ -4046,7 +4046,7 @@ #endif // fall through case __KE_GART_CACHEABLE: - vma->vm_flags |= VM_RESERVED; + vma->vm_flags |= VM_IO; vma->vm_ops = &vm_gart_ops; break; default: