The attached patch advances the Nixpkgs nvidia-x11 expression to the
version 375.39, the latest long term stable release. It also patches
it for the 4.10 kernel.
>From c251121bdcb24239a8ec05e78696bf20059e2da3 Mon Sep 17 00:00:00 2001
From: Karn Kallio <[email protected]>
Date: Sun, 12 Mar 2017 20:19:57 -0400
Subject: [PATCH] nvidia-x11 : advance to 375.39, the latest long term stable
release. Also patch for kernel 4.10.
---
pkgs/os-specific/linux/nvidia-x11/default.nix | 10 +-
pkgs/os-specific/linux/nvidia-x11/generic.nix | 2 +
.../nvidia-x11/nvidia-375.39-linux-4.10.patch | 293 +++++++++++++++++++++
3 files changed, 300 insertions(+), 5 deletions(-)
create mode 100644 pkgs/os-specific/linux/nvidia-x11/nvidia-375.39-linux-4.10.patch
diff --git a/pkgs/os-specific/linux/nvidia-x11/default.nix b/pkgs/os-specific/linux/nvidia-x11/default.nix
index 5e26fef6e1..95ad3a961f 100644
--- a/pkgs/os-specific/linux/nvidia-x11/default.nix
+++ b/pkgs/os-specific/linux/nvidia-x11/default.nix
@@ -6,11 +6,11 @@ in
{
# Policy: use the highest stable version as the default (on our master).
stable = generic {
- version = "375.26";
- sha256_32bit = "0yv19rkz2wzzj0fygfjb1mh21iy769kff3yg2kzk8bsiwnmcyybw";
- sha256_64bit = "1kqy9ayja3g5znj2hzx8pklz8qi0b0l9da7c3ldg3hlxf31v4hjg";
- settingsSha256 = "1s8zf5cfhx8m05fvws0gh1q0wy5zyyg2j510zlwp4hk35y7dic5y";
- persistencedSha256 = "15r6rbzyk4yaqkpkqs8j00zc7jbhgp8naskv93dwjyw0lnj0wgky";
+ version = "375.39";
+ sha256_32bit = "0mlly5n84640xa2mcdqqg44s42ck6g3lj5skf7gmfp2w5ibzccvz";
+ sha256_64bit = "19w5v81f770rqjrvdwz11k015zli2y8f4x10ydqxcy0nhhh5mgli";
+ settingsSha256 = "0f881q4jzliqzqi1p5lzwz86h829m5g74zdj7nlfi1cc6s45g5p5";
+ persistencedSha256 = "0zj6wdcgg2ljhvsssfsqz9wk28ykmsh4gwmis31q3rsrkq668x33";
};
beta = generic {
diff --git a/pkgs/os-specific/linux/nvidia-x11/generic.nix b/pkgs/os-specific/linux/nvidia-x11/generic.nix
index 9e39a6df09..cb3a99fb37 100644
--- a/pkgs/os-specific/linux/nvidia-x11/generic.nix
+++ b/pkgs/os-specific/linux/nvidia-x11/generic.nix
@@ -42,6 +42,8 @@ let
}
else throw "nvidia-x11 does not support platform ${stdenv.system}";
+ patches = [] ++ optional (!libsOnly && versionAtLeast kernel.dev.version "4.10" && version == "375.39") ./nvidia-375.39-linux-4.10.patch ;
+
inherit version useGLVND useProfiles;
inherit (stdenv) system;
diff --git a/pkgs/os-specific/linux/nvidia-x11/nvidia-375.39-linux-4.10.patch b/pkgs/os-specific/linux/nvidia-x11/nvidia-375.39-linux-4.10.patch
new file mode 100644
index 0000000000..4812b63f22
--- /dev/null
+++ b/pkgs/os-specific/linux/nvidia-x11/nvidia-375.39-linux-4.10.patch
@@ -0,0 +1,293 @@
+diff -Nurp temp/NVIDIA-Linux-x86_64-375.39/kernel/common/inc/nv-linux.h NVIDIA-Linux-x86_64-375.39/kernel/common/inc/nv-linux.h
+--- temp/NVIDIA-Linux-x86_64-375.39/kernel/common/inc/nv-linux.h 2017-02-01 13:50:37.000000000 +1100
++++ NVIDIA-Linux-x86_64-375.39/kernel/common/inc/nv-linux.h 2017-02-20 14:49:55.758847585 +1100
+@@ -294,7 +294,8 @@ NV_STATUS nvos_forward_error_to_cray(str
+
+ extern int nv_pat_mode;
+
+-#if defined(CONFIG_HOTPLUG_CPU)
++//#if defined(CONFIG_HOTPLUG_CPU)
++#if 0
+ #define NV_ENABLE_HOTPLUG_CPU
+ #include <linux/cpu.h> /* CPU hotplug support */
+ #include <linux/notifier.h> /* struct notifier_block, etc */
+diff -Nurp temp/NVIDIA-Linux-x86_64-375.39/kernel/nv_compiler.h NVIDIA-Linux-x86_64-375.39/kernel/nv_compiler.h
+--- temp/NVIDIA-Linux-x86_64-375.39/kernel/nv_compiler.h 1970-01-01 10:00:00.000000000 +1000
++++ NVIDIA-Linux-x86_64-375.39/kernel/nv_compiler.h 2017-02-20 14:46:39.397338730 +1100
+@@ -0,0 +1 @@
++#define NV_COMPILER "gcc version 6.2.0 20161005 (Ubuntu 6.2.0-5ubuntu12) "
+diff -Nurp temp/NVIDIA-Linux-x86_64-375.39/kernel/nvidia/nv-p2p.c NVIDIA-Linux-x86_64-375.39/kernel/nvidia/nv-p2p.c
+--- temp/NVIDIA-Linux-x86_64-375.39/kernel/nvidia/nv-p2p.c 2017-02-01 13:50:37.000000000 +1100
++++ NVIDIA-Linux-x86_64-375.39/kernel/nvidia/nv-p2p.c 2017-02-20 14:49:55.758847585 +1100
+@@ -146,7 +146,7 @@ EXPORT_SYMBOL(nvidia_p2p_destroy_mapping
+ int nvidia_p2p_get_pages(
+ uint64_t p2p_token,
+ uint32_t va_space,
+- uint64_t virtual_address,
++ uint64_t address,
+ uint64_t length,
+ struct nvidia_p2p_page_table **page_table,
+ void (*free_callback)(void * data),
+@@ -211,7 +211,7 @@ int nvidia_p2p_get_pages(
+ }
+
+ status = rm_p2p_get_pages(sp, p2p_token, va_space,
+- virtual_address, length, physical_addresses, wreqmb_h,
++ address, length, physical_addresses, wreqmb_h,
+ rreqmb_h, &entries, &gpu_uuid, *page_table,
+ free_callback, data);
+ if (status != NV_OK)
+@@ -286,7 +286,7 @@ failed:
+
+ if (bGetPages)
+ {
+- rm_p2p_put_pages(sp, p2p_token, va_space, virtual_address,
++ rm_p2p_put_pages(sp, p2p_token, va_space, address,
+ gpu_uuid, *page_table);
+ }
+
+@@ -329,7 +329,7 @@ EXPORT_SYMBOL(nvidia_p2p_free_page_table
+ int nvidia_p2p_put_pages(
+ uint64_t p2p_token,
+ uint32_t va_space,
+- uint64_t virtual_address,
++ uint64_t address,
+ struct nvidia_p2p_page_table *page_table
+ )
+ {
+@@ -343,7 +343,7 @@ int nvidia_p2p_put_pages(
+ return rc;
+ }
+
+- status = rm_p2p_put_pages(sp, p2p_token, va_space, virtual_address,
++ status = rm_p2p_put_pages(sp, p2p_token, va_space, address,
+ page_table->gpu_uuid, page_table);
+ if (status == NV_OK)
+ nvidia_p2p_free_page_table(page_table);
+diff -Nurp temp/NVIDIA-Linux-x86_64-375.39/kernel/nvidia/nv-pat.c NVIDIA-Linux-x86_64-375.39/kernel/nvidia/nv-pat.c
+--- temp/NVIDIA-Linux-x86_64-375.39/kernel/nvidia/nv-pat.c 2017-02-01 13:50:37.000000000 +1100
++++ NVIDIA-Linux-x86_64-375.39/kernel/nvidia/nv-pat.c 2017-02-20 14:49:55.758847585 +1100
+@@ -217,7 +217,7 @@ nvidia_cpu_callback(struct notifier_bloc
+ else
+ NV_SMP_CALL_FUNCTION(nv_setup_pat_entries, hcpu, 1);
+ break;
+- case CPU_DOWN_PREPARE:
++ case CPU_DOWN_PREPARE_FROZEN:
+ if (cpu == (NvUPtr)hcpu)
+ nv_restore_pat_entries(NULL);
+ else
+diff -Nurp temp/NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-fence.c NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-fence.c
+--- temp/NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-fence.c 2017-02-01 13:47:52.000000000 +1100
++++ NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-fence.c 2017-02-20 14:53:39.418241149 +1100
+@@ -31,7 +31,7 @@
+
+ #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
+ struct nv_fence {
+- struct fence base;
++ struct dma_fence base;
+ spinlock_t lock;
+
+ struct nvidia_drm_device *nv_dev;
+@@ -51,7 +51,7 @@ nv_fence_ready_to_signal(struct nv_fence
+
+ static const char *nvidia_drm_gem_prime_fence_op_get_driver_name
+ (
+- struct fence *fence
++ struct dma_fence *fence
+ )
+ {
+ return "NVIDIA";
+@@ -59,7 +59,7 @@ static const char *nvidia_drm_gem_prime_
+
+ static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name
+ (
+- struct fence *fence
++ struct dma_fence *fence
+ )
+ {
+ return "nvidia.prime";
+@@ -67,7 +67,7 @@ static const char *nvidia_drm_gem_prime_
+
+ static bool nvidia_drm_gem_prime_fence_op_signaled
+ (
+- struct fence *fence
++ struct dma_fence *fence
+ )
+ {
+ struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
+@@ -99,7 +99,7 @@ unlock_struct_mutex:
+
+ static bool nvidia_drm_gem_prime_fence_op_enable_signaling
+ (
+- struct fence *fence
++ struct dma_fence *fence
+ )
+ {
+ bool ret = true;
+@@ -107,7 +107,7 @@ static bool nvidia_drm_gem_prime_fence_o
+ struct nvidia_drm_gem_object *nv_gem = nv_fence->nv_gem;
+ struct nvidia_drm_device *nv_dev = nv_fence->nv_dev;
+
+- if (fence_is_signaled(fence))
++ if (dma_fence_is_signaled(fence))
+ {
+ return false;
+ }
+@@ -136,7 +136,7 @@ static bool nvidia_drm_gem_prime_fence_o
+ }
+
+ nv_gem->fenceContext.softFence = fence;
+- fence_get(fence);
++ dma_fence_get(fence);
+
+ unlock_struct_mutex:
+ mutex_unlock(&nv_dev->dev->struct_mutex);
+@@ -146,7 +146,7 @@ unlock_struct_mutex:
+
+ static void nvidia_drm_gem_prime_fence_op_release
+ (
+- struct fence *fence
++ struct dma_fence *fence
+ )
+ {
+ struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
+@@ -155,7 +155,7 @@ static void nvidia_drm_gem_prime_fence_o
+
+ static signed long nvidia_drm_gem_prime_fence_op_wait
+ (
+- struct fence *fence,
++ struct dma_fence *fence,
+ bool intr,
+ signed long timeout
+ )
+@@ -170,12 +170,12 @@ static signed long nvidia_drm_gem_prime_
+ * that it should never get hit during normal operation, but not so long
+ * that the system becomes unresponsive.
+ */
+- return fence_default_wait(fence, intr,
++ return dma_fence_default_wait(fence, intr,
+ (timeout == MAX_SCHEDULE_TIMEOUT) ?
+ msecs_to_jiffies(96) : timeout);
+ }
+
+-static const struct fence_ops nvidia_drm_gem_prime_fence_ops = {
++static const struct dma_fence_ops nvidia_drm_gem_prime_fence_ops = {
+ .get_driver_name = nvidia_drm_gem_prime_fence_op_get_driver_name,
+ .get_timeline_name = nvidia_drm_gem_prime_fence_op_get_timeline_name,
+ .signaled = nvidia_drm_gem_prime_fence_op_signaled,
+@@ -285,7 +285,7 @@ static void nvidia_drm_gem_prime_fence_s
+ bool force
+ )
+ {
+- struct fence *fence = nv_gem->fenceContext.softFence;
++ struct dma_fence *fence = nv_gem->fenceContext.softFence;
+
+ WARN_ON(!mutex_is_locked(&nv_dev->dev->struct_mutex));
+
+@@ -301,10 +301,10 @@ static void nvidia_drm_gem_prime_fence_s
+
+ if (force || nv_fence_ready_to_signal(nv_fence))
+ {
+- fence_signal(&nv_fence->base);
++ dma_fence_signal(&nv_fence->base);
+
+ nv_gem->fenceContext.softFence = NULL;
+- fence_put(&nv_fence->base);
++ dma_fence_put(&nv_fence->base);
+
+ nvKms->disableChannelEvent(nv_dev->pDevice,
+ nv_gem->fenceContext.cb);
+@@ -320,7 +320,7 @@ static void nvidia_drm_gem_prime_fence_s
+
+ nv_fence = container_of(fence, struct nv_fence, base);
+
+- fence_signal(&nv_fence->base);
++ dma_fence_signal(&nv_fence->base);
+ }
+ }
+
+@@ -513,7 +513,7 @@ int nvidia_drm_gem_prime_fence_init
+ * fence_context_alloc() cannot fail, so we do not need to check a return
+ * value.
+ */
+- nv_gem->fenceContext.context = fence_context_alloc(1);
++ nv_gem->fenceContext.context = dma_fence_context_alloc(1);
+
+ ret = nvidia_drm_gem_prime_fence_import_semaphore(
+ nv_dev, nv_gem, p->index,
+@@ -670,7 +670,7 @@ int nvidia_drm_gem_prime_fence_attach
+ nv_fence->nv_gem = nv_gem;
+
+ spin_lock_init(&nv_fence->lock);
+- fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops,
++ dma_fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops,
+ &nv_fence->lock, nv_gem->fenceContext.context,
+ p->sem_thresh);
+
+@@ -680,7 +680,7 @@ int nvidia_drm_gem_prime_fence_attach
+
+ reservation_object_add_excl_fence(&nv_gem->fenceContext.resv,
+ &nv_fence->base);
+- fence_put(&nv_fence->base); /* Reservation object has reference */
++ dma_fence_put(&nv_fence->base); /* Reservation object has reference */
+
+ ret = 0;
+
+diff -Nurp temp/NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-gem.h NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-gem.h
+--- temp/NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-gem.h 2017-02-01 13:47:52.000000000 +1100
++++ NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-gem.h 2017-02-20 14:49:55.758847585 +1100
+@@ -98,7 +98,7 @@ struct nvidia_drm_gem_object
+ /* Software signaling structures */
+ struct NvKmsKapiChannelEvent *cb;
+ struct nvidia_drm_gem_prime_soft_fence_event_args *cbArgs;
+- struct fence *softFence; /* Fence for software signaling */
++ struct dma_fence *softFence; /* Fence for software signaling */
+ } fenceContext;
+ #endif
+ };
+diff -Nurp temp/NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-modeset.c NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-modeset.c
+--- temp/NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-modeset.c 2017-02-01 13:47:52.000000000 +1100
++++ NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-modeset.c 2017-02-20 14:49:55.758847585 +1100
+@@ -77,8 +77,7 @@ void nvidia_drm_atomic_state_clear(struc
+
+ void nvidia_drm_atomic_state_free(struct drm_atomic_state *state)
+ {
+- struct nvidia_drm_atomic_state *nv_state =
+- to_nv_atomic_state(state);
++ struct nvidia_drm_atomic_state *nv_state = to_nv_atomic_state(state);
+ drm_atomic_state_default_release(state);
+ nvidia_drm_free(nv_state);
+ }
+diff -Nurp temp/NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-priv.h NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-priv.h
+--- temp/NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-priv.h 2017-02-01 13:47:52.000000000 +1100
++++ NVIDIA-Linux-x86_64-375.39/kernel/nvidia-drm/nvidia-drm-priv.h 2017-02-20 14:49:55.758847585 +1100
+@@ -34,7 +34,7 @@
+ #endif
+
+ #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
+-#include <linux/fence.h>
++#include <linux/dma-fence.h>
+ #include <linux/reservation.h>
+ #endif
+
+diff -Nurp temp/NVIDIA-Linux-x86_64-375.39/kernel/nvidia-uvm/uvm8_test.c NVIDIA-Linux-x86_64-375.39/kernel/nvidia-uvm/uvm8_test.c
+--- temp/NVIDIA-Linux-x86_64-375.39/kernel/nvidia-uvm/uvm8_test.c 2017-02-01 13:50:33.000000000 +1100
++++ NVIDIA-Linux-x86_64-375.39/kernel/nvidia-uvm/uvm8_test.c 2017-02-20 14:49:55.758847585 +1100
+@@ -103,7 +103,7 @@ static NV_STATUS uvm8_test_nv_kthread_q(
+ return NV_ERR_INVALID_STATE;
+ }
+
+-static NV_STATUS uvm8_test_get_kernel_virtual_address(
++static NV_STATUS uvm8_test_get_kernel_address(
+ UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS_PARAMS *params,
+ struct file *filp)
+ {
+@@ -173,7 +173,7 @@ long uvm8_test_ioctl(struct file *filp,
+ UVM_ROUTE_CMD_STACK(UVM_TEST_RANGE_GROUP_RANGE_COUNT, uvm8_test_range_group_range_count);
+ UVM_ROUTE_CMD_STACK(UVM_TEST_GET_PREFETCH_FAULTS_REENABLE_LAPSE, uvm8_test_get_prefetch_faults_reenable_lapse);
+ UVM_ROUTE_CMD_STACK(UVM_TEST_SET_PREFETCH_FAULTS_REENABLE_LAPSE, uvm8_test_set_prefetch_faults_reenable_lapse);
+- UVM_ROUTE_CMD_STACK(UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS, uvm8_test_get_kernel_virtual_address);
++ UVM_ROUTE_CMD_STACK(UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS, uvm8_test_get_kernel_address);
+ UVM_ROUTE_CMD_STACK(UVM_TEST_PMA_ALLOC_FREE, uvm8_test_pma_alloc_free);
+ UVM_ROUTE_CMD_STACK(UVM_TEST_PMM_ALLOC_FREE_ROOT, uvm8_test_pmm_alloc_free_root);
+ UVM_ROUTE_CMD_STACK(UVM_TEST_PMM_INJECT_PMA_EVICT_ERROR, uvm8_test_pmm_inject_pma_evict_error);
--
2.11.1
_______________________________________________
nix-dev mailing list
[email protected]
http://lists.science.uu.nl/mailman/listinfo/nix-dev