summaryrefslogtreecommitdiffstats
path: root/abs/core/nvidia
diff options
context:
space:
mode:
authorBritney Fransen <brfransen@gmail.com>2017-03-30 17:16:21 (GMT)
committerBritney Fransen <brfransen@gmail.com>2017-03-30 17:16:21 (GMT)
commit6dce02640f282da9cf02a9941394706c6d2b0f78 (patch)
tree70260e6f14396e41568d68845bf6b1c4d7757ea3 /abs/core/nvidia
parent2b006c4a34629b3bcc50b78a920c94cf88a6459a (diff)
downloadlinhes_pkgbuild-6dce02640f282da9cf02a9941394706c6d2b0f78.zip
linhes_pkgbuild-6dce02640f282da9cf02a9941394706c6d2b0f78.tar.gz
linhes_pkgbuild-6dce02640f282da9cf02a9941394706c6d2b0f78.tar.bz2
nvidia & nvidia-utils: update to 378.13
Diffstat (limited to 'abs/core/nvidia')
-rw-r--r--abs/core/nvidia/PKGBUILD12
-rw-r--r--abs/core/nvidia/kernel_4.10.patch382
2 files changed, 390 insertions, 4 deletions
diff --git a/abs/core/nvidia/PKGBUILD b/abs/core/nvidia/PKGBUILD
index 47329f3..b5a0707 100644
--- a/abs/core/nvidia/PKGBUILD
+++ b/abs/core/nvidia/PKGBUILD
@@ -5,19 +5,21 @@
pkgbase=nvidia
pkgname=(nvidia nvidia-dkms)
-pkgver=375.26
+pkgver=378.13
_extramodules=extramodules-4.4-ARCH
-pkgrel=1
+pkgrel=4
pkgdesc="NVIDIA drivers for linux"
arch=('i686' 'x86_64')
url="http://www.nvidia.com/"
makedepends=('nvidia-libgl' "nvidia-utils=${pkgver}" 'linux' 'linux-headers>=4.4' 'linux-headers<4.5')
license=('custom')
options=('!strip')
+source=('kernel_4.10.patch')
source_i686=("http://us.download.nvidia.com/XFree86/Linux-x86/${pkgver}/NVIDIA-Linux-x86-${pkgver}.run")
source_x86_64=("http://us.download.nvidia.com/XFree86/Linux-x86_64/${pkgver}/NVIDIA-Linux-x86_64-${pkgver}-no-compat32.run")
-sha512sums_i686=('3bc859a95469a45f3c627018248d83e178d160385c3d17d9f890b0d142ecd1220fb21c442e4fe7755b831227a9c820736f447b162acd9699819c6e8145d6d841')
-sha512sums_x86_64=('f52f6597daa1eaf4cbd934d785da6028ef23ecef98e14746143e3738504f8d65b73788abbcf9fd812317fc2c53cdf1c4d4839de57fafdea1930a08c6b21f1992')
+md5sums=('e81769b830b7a1e60c635e3bbe559f59')
+md5sums_i686=('dd1077750af9a067739ec291fb24175f')
+md5sums_x86_64=('fe4d25b19a780a690cafc8e3b7c0113f')
[[ "$CARCH" = "i686" ]] && _pkg="NVIDIA-Linux-x86-${pkgver}"
[[ "$CARCH" = "x86_64" ]] && _pkg="NVIDIA-Linux-x86_64-${pkgver}-no-compat32"
@@ -26,6 +28,8 @@ prepare() {
sh "${_pkg}.run" --extract-only
cd "${_pkg}"
+# patch -Np1 --no-backup-if-mismatch -i ../kernel_4.10.patch
+
cp -a kernel kernel-dkms
cd kernel-dkms
sed -i "s/__VERSION_STRING/${pkgver}/" dkms.conf
diff --git a/abs/core/nvidia/kernel_4.10.patch b/abs/core/nvidia/kernel_4.10.patch
new file mode 100644
index 0000000..4ad9f8a
--- /dev/null
+++ b/abs/core/nvidia/kernel_4.10.patch
@@ -0,0 +1,382 @@
+From ca8b106738521823707f3567cedb41ca158792a3 Mon Sep 17 00:00:00 2001
+From: Alberto Milone <alberto.milone@canonical.com>
+Date: Wed, 15 Feb 2017 10:53:42 +0100
+Subject: [PATCH 1/1] Add support for Linux 4.10
+
+---
+ common/inc/nv-mm.h | 9 ++++--
+ nvidia-drm/nvidia-drm-fence.c | 72 +++++++++++++++++++++++++++++++++++++++++++
+ nvidia-drm/nvidia-drm-gem.h | 6 ++++
+ nvidia-drm/nvidia-drm-priv.h | 7 +++++
+ nvidia/nv-pat.c | 40 ++++++++++++++++++++++++
+ 5 files changed, 132 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/common/inc/nv-mm.h b/kernel/common/inc/nv-mm.h
+index 06d7da4..e5cc56a 100644
+--- a/kernel/common/inc/nv-mm.h
++++ b/kernel/common/inc/nv-mm.h
+@@ -46,6 +46,8 @@
+ * 2016 Dec 14:5b56d49fc31dbb0487e14ead790fc81ca9fb2c99
+ */
+
++#include <linux/version.h>
++
+ #if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
+ #if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS)
+ #define NV_GET_USER_PAGES get_user_pages
+@@ -92,10 +94,13 @@
+ pages, vmas, NULL);
+
+ #else
+-
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
+ pages, vmas);
+-
++#else
++ return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
++ pages, vmas, NULL);
++#endif
+ #endif
+
+ }
+diff --git a/kernel/nvidia-drm/nvidia-drm-fence.c b/kernel/nvidia-drm/nvidia-drm-fence.c
+index 5e98c5f..fa2c508 100644
+--- a/kernel/nvidia-drm/nvidia-drm-fence.c
++++ b/kernel/nvidia-drm/nvidia-drm-fence.c
+@@ -31,7 +31,11 @@
+
+ #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
+ struct nv_fence {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ struct fence base;
++#else
++ struct dma_fence base;
++#endif
+ spinlock_t lock;
+
+ struct nvidia_drm_device *nv_dev;
+@@ -51,7 +55,11 @@ nv_fence_ready_to_signal(struct nv_fence *nv_fence)
+
+ static const char *nvidia_drm_gem_prime_fence_op_get_driver_name
+ (
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ struct fence *fence
++#else
++ struct dma_fence *fence
++#endif
+ )
+ {
+ return "NVIDIA";
+@@ -59,7 +67,11 @@ static const char *nvidia_drm_gem_prime_fence_op_get_driver_name
+
+ static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name
+ (
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ struct fence *fence
++#else
++ struct dma_fence *fence
++#endif
+ )
+ {
+ return "nvidia.prime";
+@@ -67,7 +79,11 @@ static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name
+
+ static bool nvidia_drm_gem_prime_fence_op_signaled
+ (
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ struct fence *fence
++#else
++ struct dma_fence *fence
++#endif
+ )
+ {
+ struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
+@@ -99,7 +115,11 @@ unlock_struct_mutex:
+
+ static bool nvidia_drm_gem_prime_fence_op_enable_signaling
+ (
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ struct fence *fence
++#else
++ struct dma_fence *fence
++#endif
+ )
+ {
+ bool ret = true;
+@@ -107,7 +127,11 @@ static bool nvidia_drm_gem_prime_fence_op_enable_signaling
+ struct nvidia_drm_gem_object *nv_gem = nv_fence->nv_gem;
+ struct nvidia_drm_device *nv_dev = nv_fence->nv_dev;
+
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ if (fence_is_signaled(fence))
++#else
++ if (dma_fence_is_signaled(fence))
++#endif
+ {
+ return false;
+ }
+@@ -136,7 +160,11 @@ static bool nvidia_drm_gem_prime_fence_op_enable_signaling
+ }
+
+ nv_gem->fenceContext.softFence = fence;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ fence_get(fence);
++#else
++ dma_fence_get(fence);
++#endif
+
+ unlock_struct_mutex:
+ mutex_unlock(&nv_dev->dev->struct_mutex);
+@@ -146,7 +174,11 @@ unlock_struct_mutex:
+
+ static void nvidia_drm_gem_prime_fence_op_release
+ (
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ struct fence *fence
++#else
++ struct dma_fence *fence
++#endif
+ )
+ {
+ struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
+@@ -155,7 +187,11 @@ static void nvidia_drm_gem_prime_fence_op_release
+
+ static signed long nvidia_drm_gem_prime_fence_op_wait
+ (
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ struct fence *fence,
++#else
++ struct dma_fence *fence,
++#endif
+ bool intr,
+ signed long timeout
+ )
+@@ -170,12 +206,20 @@ static signed long nvidia_drm_gem_prime_fence_op_wait
+ * that it should never get hit during normal operation, but not so long
+ * that the system becomes unresponsive.
+ */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ return fence_default_wait(fence, intr,
++#else
++ return dma_fence_default_wait(fence, intr,
++#endif
+ (timeout == MAX_SCHEDULE_TIMEOUT) ?
+ msecs_to_jiffies(96) : timeout);
+ }
+
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ static const struct fence_ops nvidia_drm_gem_prime_fence_ops = {
++#else
++static const struct dma_fence_ops nvidia_drm_gem_prime_fence_ops = {
++#endif
+ .get_driver_name = nvidia_drm_gem_prime_fence_op_get_driver_name,
+ .get_timeline_name = nvidia_drm_gem_prime_fence_op_get_timeline_name,
+ .signaled = nvidia_drm_gem_prime_fence_op_signaled,
+@@ -285,7 +329,11 @@ static void nvidia_drm_gem_prime_fence_signal
+ bool force
+ )
+ {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ struct fence *fence = nv_gem->fenceContext.softFence;
++#else
++ struct dma_fence *fence = nv_gem->fenceContext.softFence;
++#endif
+
+ WARN_ON(!mutex_is_locked(&nv_dev->dev->struct_mutex));
+
+@@ -301,10 +349,18 @@ static void nvidia_drm_gem_prime_fence_signal
+
+ if (force || nv_fence_ready_to_signal(nv_fence))
+ {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ fence_signal(&nv_fence->base);
++#else
++ dma_fence_signal(&nv_fence->base);
++#endif
+
+ nv_gem->fenceContext.softFence = NULL;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ fence_put(&nv_fence->base);
++#else
++ dma_fence_put(&nv_fence->base);
++#endif
+
+ nvKms->disableChannelEvent(nv_dev->pDevice,
+ nv_gem->fenceContext.cb);
+@@ -320,7 +376,11 @@ static void nvidia_drm_gem_prime_fence_signal
+
+ nv_fence = container_of(fence, struct nv_fence, base);
+
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ fence_signal(&nv_fence->base);
++#else
++ dma_fence_signal(&nv_fence->base);
++#endif
+ }
+ }
+
+@@ -513,7 +573,11 @@ int nvidia_drm_gem_prime_fence_init
+ * fence_context_alloc() cannot fail, so we do not need to check a return
+ * value.
+ */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ nv_gem->fenceContext.context = fence_context_alloc(1);
++#else
++ nv_gem->fenceContext.context = dma_fence_context_alloc(1);
++#endif
+
+ ret = nvidia_drm_gem_prime_fence_import_semaphore(
+ nv_dev, nv_gem, p->index,
+@@ -670,7 +734,11 @@ int nvidia_drm_gem_prime_fence_attach
+ nv_fence->nv_gem = nv_gem;
+
+ spin_lock_init(&nv_fence->lock);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops,
++#else
++ dma_fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops,
++#endif
+ &nv_fence->lock, nv_gem->fenceContext.context,
+ p->sem_thresh);
+
+@@ -680,7 +748,11 @@ int nvidia_drm_gem_prime_fence_attach
+
+ reservation_object_add_excl_fence(&nv_gem->fenceContext.resv,
+ &nv_fence->base);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ fence_put(&nv_fence->base); /* Reservation object has reference */
++#else
++ dma_fence_put(&nv_fence->base);
++#endif
+
+ ret = 0;
+
+diff --git a/kernel/nvidia-drm/nvidia-drm-gem.h b/kernel/nvidia-drm/nvidia-drm-gem.h
+index 4ff45e8..394576b 100644
+--- a/kernel/nvidia-drm/nvidia-drm-gem.h
++++ b/kernel/nvidia-drm/nvidia-drm-gem.h
+@@ -29,6 +29,8 @@
+
+ #include "nvidia-drm-priv.h"
+
++#include <linux/version.h>
++
+ #include <drm/drmP.h>
+ #include "nvkms-kapi.h"
+
+@@ -98,7 +100,11 @@ struct nvidia_drm_gem_object
+ /* Software signaling structures */
+ struct NvKmsKapiChannelEvent *cb;
+ struct nvidia_drm_gem_prime_soft_fence_event_args *cbArgs;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ struct fence *softFence; /* Fence for software signaling */
++#else
++ struct dma_fence *softFence;
++#endif
+ } fenceContext;
+ #endif
+ };
+diff --git a/kernel/nvidia-drm/nvidia-drm-priv.h b/kernel/nvidia-drm/nvidia-drm-priv.h
+index 1e9b9f9..ae171e7 100644
+--- a/kernel/nvidia-drm/nvidia-drm-priv.h
++++ b/kernel/nvidia-drm/nvidia-drm-priv.h
+@@ -25,6 +25,8 @@
+
+ #include "conftest.h" /* NV_DRM_AVAILABLE */
+
++#include <linux/version.h>
++
+ #if defined(NV_DRM_AVAILABLE)
+
+ #include <drm/drmP.h>
+@@ -34,7 +36,12 @@
+ #endif
+
+ #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ #include <linux/fence.h>
++#else
++#include <linux/dma-fence.h>
++#endif
++
+ #include <linux/reservation.h>
+ #endif
+
+diff --git a/kernel/nvidia/nv-pat.c b/kernel/nvidia/nv-pat.c
+index df78020..0af7d47 100644
+--- a/kernel/nvidia/nv-pat.c
++++ b/kernel/nvidia/nv-pat.c
+@@ -203,6 +203,7 @@ void nv_disable_pat_support(void)
+ }
+
+ #if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ static int
+ nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ {
+@@ -234,6 +235,34 @@ static struct notifier_block nv_hotcpu_nfb = {
+ .notifier_call = nvidia_cpu_callback,
+ .priority = 0
+ };
++#else
++static int nvidia_cpu_online(unsigned int hcpu)
++{
++ unsigned int cpu = get_cpu();
++ if (cpu == hcpu)
++ nv_setup_pat_entries(NULL);
++ else
++ NV_SMP_CALL_FUNCTION(nv_setup_pat_entries, (void *)(long int)hcpu, 1);
++
++ put_cpu();
++
++ return 0;
++}
++
++static int nvidia_cpu_down_prep(unsigned int hcpu)
++{
++ unsigned int cpu = get_cpu();
++ if (cpu == hcpu)
++ nv_restore_pat_entries(NULL);
++ else
++ NV_SMP_CALL_FUNCTION(nv_restore_pat_entries, (void *)(long int)hcpu, 1);
++
++ put_cpu();
++
++ return 0;
++}
++#endif
++
+ #endif
+
+ int nv_init_pat_support(nvidia_stack_t *sp)
+@@ -255,7 +284,14 @@ int nv_init_pat_support(nvidia_stack_t *sp)
+ #if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)
+ if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
+ {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ if (register_hotcpu_notifier(&nv_hotcpu_nfb) != 0)
++#else
++ if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
++ "gpu/nvidia:online",
++ nvidia_cpu_online,
++ nvidia_cpu_down_prep) != 0)
++#endif
+ {
+ nv_disable_pat_support();
+ nv_printf(NV_DBG_ERRORS,
+@@ -280,7 +316,11 @@ void nv_teardown_pat_support(void)
+ {
+ nv_disable_pat_support();
+ #if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ unregister_hotcpu_notifier(&nv_hotcpu_nfb);
++#else
++ cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
++#endif
+ #endif
+ }
+ }
+--
+2.7.4
+