summaryrefslogtreecommitdiffstats
path: root/abs/core/nvidia-71xx/NVIDIA_kernel-96.43.05-2290218.diff.txt
diff options
context:
space:
mode:
Diffstat (limited to 'abs/core/nvidia-71xx/NVIDIA_kernel-96.43.05-2290218.diff.txt')
-rw-r--r--abs/core/nvidia-71xx/NVIDIA_kernel-96.43.05-2290218.diff.txt1471
1 files changed, 1471 insertions, 0 deletions
diff --git a/abs/core/nvidia-71xx/NVIDIA_kernel-96.43.05-2290218.diff.txt b/abs/core/nvidia-71xx/NVIDIA_kernel-96.43.05-2290218.diff.txt
new file mode 100644
index 0000000..0dfcfd5
--- /dev/null
+++ b/abs/core/nvidia-71xx/NVIDIA_kernel-96.43.05-2290218.diff.txt
@@ -0,0 +1,1471 @@
+diff -ru usr/src/nv/Makefile.kbuild usr/src/nv.2305230/Makefile.kbuild
+--- usr/src/nv/Makefile.kbuild 2008-01-21 11:48:11.000000000 -0800
++++ usr/src/nv.2305230/Makefile.kbuild 2008-03-18 12:55:25.461856977 -0700
+@@ -176,6 +176,7 @@
+ vmap \
+ signal_struct \
+ agp_backend_acquire \
++ set_pages_uc \
+ change_page_attr \
+ pci_get_class \
+ sysctl_max_map_count \
+diff -ru usr/src/nv/conftest.sh usr/src/nv.2305230/conftest.sh
+--- usr/src/nv/conftest.sh 2008-01-21 11:48:11.000000000 -0800
++++ usr/src/nv.2305230/conftest.sh 2008-03-18 12:55:28.782046180 -0700
+@@ -100,6 +100,32 @@
+ fi
+ ;;
+
++ set_pages_uc)
++ #
++ # Determine if the set_pages_uc() function is present.
++ #
++ echo "#include <linux/autoconf.h>
++ #include <asm/cacheflush.h>
++ void conftest_set_pages_uc(void) {
++ set_pages_uc();
++ }" > conftest$$.c
++
++ $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
++ rm -f conftest$$.c
++
++ if [ -f conftest$$.o ]; then
++ rm -f conftest$$.o
++ echo "#undef NV_SET_PAGES_UC_PRESENT" >> conftest.h
++ return
++ else
++ echo "#ifdef NV_CHANGE_PAGE_ATTR_PRESENT" >> conftest.h
++ echo "#undef NV_CHANGE_PAGE_ATTR_PRESENT" >> conftest.h
++ echo "#endif" >> conftest.h
++ echo "#define NV_SET_PAGES_UC_PRESENT" >> conftest.h
++ return
++ fi
++ ;;
++
+ change_page_attr)
+ #
+ # Determine if the change_page_attr() function is
+@@ -124,7 +150,9 @@
+ rm -f conftest$$.o
+ return
+ else
++ echo "#ifndef NV_SET_PAGES_UC_PRESENT" >> conftest.h
+ echo "#define NV_CHANGE_PAGE_ATTR_PRESENT" >> conftest.h
++ echo "#endif" >> conftest.h
+ return
+ fi
+ ;;
+@@ -501,6 +529,8 @@
+ return
+ fi
+
++ rm -f conftest$$.o
++
+ echo "#include <linux/autoconf.h>
+ #include <linux/interrupt.h>
+ irq_handler_t conftest_isr;
+diff -ru usr/src/nv/nv-linux.h usr/src/nv.2305230/nv-linux.h
+--- usr/src/nv/nv-linux.h 2008-01-21 11:48:11.000000000 -0800
++++ usr/src/nv.2305230/nv-linux.h 2008-03-18 12:57:26.340745475 -0700
+@@ -19,8 +19,8 @@
+ #include <linux/utsname.h>
+
+
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
+-# error This driver does not support pre-2.4 kernels!
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 7)
++# error This driver does not support 2.4 kernels older than 2.4.7!
+ #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+ # define KERNEL_2_4
+ #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+@@ -51,6 +51,10 @@
+ #include <linux/modversions.h>
+ #endif
+
++#if defined(KERNEL_2_4) && !defined(EXPORT_SYMTAB)
++#define EXPORT_SYMTAB
++#endif
++
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+
+@@ -98,6 +102,7 @@
+
+ #include <linux/spinlock.h>
+ #include <asm/semaphore.h>
++#include <linux/completion.h>
+ #include <linux/highmem.h>
+
+ #ifdef CONFIG_PROC_FS
+@@ -118,8 +123,12 @@
+ #endif
+
+ #if defined(CONFIG_KGDB)
++#if defined(NV_OLD_MM_KGDB_BREAKPOINT_PRESENT)
++#include <asm/kgdb.h>
++#else
+ #include <linux/kgdb.h>
+ #endif
++#endif
+
+ #if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE)
+ #define AGPGART
+@@ -516,19 +525,16 @@
+ #define NV_KMEM_CACHE_ALLOC(ptr, kmem_cache, type) \
+ { \
+ (ptr) = kmem_cache_alloc(kmem_cache, GFP_KERNEL); \
+- KM_ALLOC_RECORD(ptr, sizeof(type), "km_cache_alloc"); \
+ }
+
+ #define NV_KMEM_CACHE_FREE(ptr, type, kmem_cache) \
+ { \
+- KM_FREE_RECORD(ptr, sizeof(type), "km_cache_free"); \
+ kmem_cache_free(kmem_cache, ptr); \
+ }
+
+-#if defined(NV_SG_MAP_BUFFERS) /* Linux/x86-64, only */
+ #if defined(NV_VMAP_PRESENT)
+ #if (NV_VMAP_ARGUMENT_COUNT == 2)
+-#define NV_VMAP(ptr, pages, count, cached) \
++#define NV_VMAP_KERNEL(ptr, pages, count, prot) \
+ { \
+ (ptr) = (unsigned long)vmap(pages, count); \
+ VM_ALLOC_RECORD((void *)ptr, (count) * PAGE_SIZE, "vm_vmap"); \
+@@ -537,26 +543,35 @@
+ #ifndef VM_MAP
+ #define VM_MAP 0
+ #endif
+-#define NV_VMAP(ptr, pages, count, cached) \
++#define NV_VMAP_KERNEL(ptr, pages, count, prot) \
+ { \
+- pgprot_t __prot = (cached) ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE; \
+- (ptr) = (unsigned long)vmap(pages, count, VM_MAP, __prot); \
++ (ptr) = (unsigned long)vmap(pages, count, VM_MAP, prot); \
+ VM_ALLOC_RECORD((void *)ptr, (count) * PAGE_SIZE, "vm_vmap"); \
+ }
+ #else
+ #error "NV_VMAP_ARGUMENT_COUNT value unrecognized!"
+ #endif
+ #else
++#if defined(NV_SG_MAP_BUFFERS)
+ #error "NV_VMAP() undefined (vmap() unavailable)!"
++#endif
+ #endif /* NV_VMAP_PRESENT */
+
+-#define NV_VUNMAP(ptr, count) \
++#define NV_VUNMAP_KERNEL(ptr, count) \
+ { \
+- VM_FREE_RECORD((void *)ptr, (count) * PAGE_SIZE, "vm_vmap"); \
++ VM_FREE_RECORD((void *)ptr, (count) * PAGE_SIZE, "vm_vunmap"); \
+ vunmap((void *)(ptr)); \
+ }
+
+-#endif /* NV_SG_MAP_BUFFERS */
++#define NV_VMAP(addr, pages, count, cached) \
++ { \
++ pgprot_t __prot = (cached) ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE; \
++ void *__ptr = nv_vmap(pages, count, __prot); \
++ (addr) = (unsigned long)__ptr; \
++ }
++
++#define NV_VUNMAP(addr, count) nv_vunmap((void *)addr, count)
++
+
+ #endif /* !defined NVWATCH */
+
+@@ -764,9 +779,10 @@
+
+ #define NV_PGD_OFFSET(address, kernel, mm) \
+ ({ \
++ struct mm_struct *__mm = (mm); \
+ pgd_t *__pgd; \
+ if (!kernel) \
+- __pgd = pgd_offset(mm, address); \
++ __pgd = pgd_offset(__mm, address); \
+ else \
+ __pgd = pgd_offset_k(address); \
+ __pgd; \
+@@ -968,9 +984,7 @@
+ #define NV_ALLOC_TYPE_PCI (1<<0)
+ #define NV_ALLOC_TYPE_AGP (1<<1)
+ #define NV_ALLOC_TYPE_CONTIG (1<<2)
+-#define NV_ALLOC_TYPE_KERNEL (1<<3)
+-#define NV_ALLOC_TYPE_VMALLOC (1<<4)
+-#define NV_ALLOC_TYPE_VMAP (1<<5)
++#define NV_ALLOC_TYPE_VMAP (1<<3)
+
+ #define NV_ALLOC_MAPPING_SHIFT 16
+ #define NV_ALLOC_MAPPING(flags) (((flags)>>NV_ALLOC_MAPPING_SHIFT)&0xff)
+@@ -980,7 +994,6 @@
+
+ #define NV_ALLOC_MAPPING_AGP(flags) ((flags) & NV_ALLOC_TYPE_AGP)
+ #define NV_ALLOC_MAPPING_CONTIG(flags) ((flags) & NV_ALLOC_TYPE_CONTIG)
+-#define NV_ALLOC_MAPPING_VMALLOC(flags) ((flags) & NV_ALLOC_TYPE_VMALLOC)
+ #define NV_ALLOC_MAPPING_VMAP(flags) ((flags) & NV_ALLOC_TYPE_VMAP)
+
+ static inline U032 nv_alloc_init_flags(int cached, int agp, int contig, int kernel)
+@@ -988,12 +1001,7 @@
+ U032 flags = NV_ALLOC_ENC_MAPPING(cached);
+ if (agp) flags |= NV_ALLOC_TYPE_AGP;
+ else flags |= NV_ALLOC_TYPE_PCI;
+- if (kernel) flags |= NV_ALLOC_TYPE_KERNEL;
+-#if defined(NV_SG_MAP_BUFFERS)
+ if (kernel && !contig) flags |= NV_ALLOC_TYPE_VMAP;
+-#else
+- if (kernel && !contig) flags |= NV_ALLOC_TYPE_VMALLOC;
+-#endif
+ if (contig && !agp) flags |= NV_ALLOC_TYPE_CONTIG;
+ return flags;
+ }
+@@ -1067,21 +1075,24 @@
+ #define NV_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data))
+ #define NV_ATOMIC_READ(data) atomic_read(&(data))
+
++extern int nv_update_memory_types;
++
+ /*
+- * a BUG() is triggered on early 2.6 x86_64 kernels. the underlying
+- * problem actually exists on many architectures and kernels, but
+- * these are the only kernels that check the condition and trigger
+- * a BUG(). note that this is a problem of the core kernel, not an
+- * nvidia bug (and can still be triggered by agpgart). let's avoid
+- * change_page_attr on those kernels.
++ * Using change_page_attr() on early Linux/x86-64 2.6 kernels may
++ * result in a BUG() being triggered. The underlying problem
++ * actually exists on multiple architectures and kernels, but only
++ * the above check for the condition and trigger a BUG().
++ *
++ * Note that this is a due to a bug in the Linux kernel, not an
++ * NVIDIA driver bug (it can also be triggered by AGPGART).
++ *
++ * We therefore need to determine at runtime if change_page_attr()
++ * can be used safely on these kernels.
+ */
+-#if defined(NV_CHANGE_PAGE_ATTR_PRESENT)
+-extern int nv_use_cpa;
+-
+-#if defined(NVCPU_X86_64) && !defined(KERNEL_2_4) && \
+- (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11))
+-#define NV_CHANGE_PAGE_ATTR_BUG_PRESENT 1
+-#endif
++#if defined(NV_CHANGE_PAGE_ATTR_PRESENT) && defined(NVCPU_X86_64) && \
++ !defined(KERNEL_2_4) && \
++ (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11))
++#define NV_CHANGE_PAGE_ATTR_BUG_PRESENT
+ #endif
+
+ #if defined(NVCPU_X86) || defined(NVCPU_X86_64)
+@@ -1093,7 +1104,7 @@
+ *
+ * We need to be careful to mask out _PAGE_NX when the host system
+ * doesn't support this feature or when it's disabled: the kernel
+- * may not do this in its implementation of the change_page_attr()
++ * may not do this in its implementation of the change_page_attr()
+ * interface.
+ */
+ #ifndef X86_FEATURE_NX
+diff -ru usr/src/nv/nv-vm.c usr/src/nv.2305230/nv-vm.c
+--- usr/src/nv/nv-vm.c 2008-01-21 11:48:11.000000000 -0800
++++ usr/src/nv.2305230/nv-vm.c 2008-03-18 12:55:58.755754293 -0700
+@@ -43,42 +43,40 @@
+ }
+ #endif
+
+-/*
+- * AMD Athlon processors expose a subtle bug in the Linux
+- * kernel, that may lead to AGP memory corruption. Recent
+- * kernel versions had a workaround for this problem, but
+- * 2.4.20 is the first kernel to address it properly. The
+- * page_attr API provides the means to solve the problem.
+- */
+-
+ static inline void nv_set_page_attrib_uncached(nv_pte_t *page_ptr)
+ {
+-#if defined(NV_CHANGE_PAGE_ATTR_PRESENT)
+- if (nv_use_cpa)
++ if (nv_update_memory_types)
+ {
+- struct page *page = virt_to_page(__va(page_ptr->phys_addr));
++#if defined(NV_SET_PAGES_UC_PRESENT)
++ struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
++ set_pages_uc(page, 1);
++#elif defined(NV_CHANGE_PAGE_ATTR_PRESENT)
++ struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
+ pgprot_t prot = PAGE_KERNEL_NOCACHE;
+ #if defined(NVCPU_X86) || defined(NVCPU_X86_64)
+ pgprot_val(prot) &= __nv_supported_pte_mask;
+ #endif
+ change_page_attr(page, 1, prot);
+- }
+ #endif
++ }
+ }
+
+ static inline void nv_set_page_attrib_cached(nv_pte_t *page_ptr)
+ {
+-#if defined(NV_CHANGE_PAGE_ATTR_PRESENT)
+- if (nv_use_cpa)
++ if (nv_update_memory_types)
+ {
+- struct page *page = virt_to_page(__va(page_ptr->phys_addr));
++#if defined(NV_SET_PAGES_UC_PRESENT)
++ struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
++ set_pages_wb(page, 1);
++#elif defined(NV_CHANGE_PAGE_ATTR_PRESENT)
++ struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
+ pgprot_t prot = PAGE_KERNEL;
+ #if defined(NVCPU_X86) || defined(NVCPU_X86_64)
+ pgprot_val(prot) &= __nv_supported_pte_mask;
+ #endif
+ change_page_attr(page, 1, prot);
++#endif
+ }
+-#endif /* NV_CHANGE_PAGE_ATTR_PRESENT */
+ }
+
+ static inline void nv_lock_page(nv_pte_t *page_ptr)
+@@ -360,8 +358,11 @@
+ #if defined(NV_CPA_NEEDS_FLUSHING)
+ nv_execute_on_all_cpus(cache_flush, NULL);
+ #endif
++#if (defined(NVCPU_X86) || defined(NVCPU_X86_64)) && \
++ defined(NV_CHANGE_PAGE_ATTR_PRESENT)
+ global_flush_tlb();
+ #endif
++#endif
+ }
+
+ /*
+@@ -409,11 +410,11 @@
+ nv_pte_t *page_ptr = *at->page_table;
+ unsigned int i, j, gfp_mask;
+ unsigned long virt_addr = 0, phys_addr;
++ struct page **pages = 0;
+ #if defined(NV_SG_MAP_BUFFERS)
+ int ret = -1;
+ nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
+ struct pci_dev *dev = nvl->dev;
+- struct page **pages = 0;
+ #endif
+
+ nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_vm_malloc_pages: %d pages\n",
+@@ -462,22 +463,7 @@
+ }
+ #endif
+ }
+- else if (NV_ALLOC_MAPPING_VMALLOC(at->flags))
+- {
+- void *virt_kptr = NULL;
+- at->size = at->num_pages * PAGE_SIZE;
+- nv_printf(NV_DBG_MEMINFO, "NVRM: VM: vmalloc, size 0x%x\n", at->size);
+- NV_VMALLOC(virt_kptr, at->size, NV_ALLOC_MAPPING_CACHED(at->flags));
+- virt_addr = (unsigned long) virt_kptr;
+- if (virt_addr == 0)
+- {
+- nv_printf(NV_DBG_ERRORS,
+- "NVRM: nv_vm_malloc failed to allocate vmalloc memory\n");
+- return -1;
+- }
+- }
+
+-#if defined(NV_SG_MAP_BUFFERS)
+ if (NV_ALLOC_MAPPING_VMAP(at->flags))
+ {
+ NV_KMALLOC(pages, sizeof(struct page *) * at->num_pages);
+@@ -488,11 +474,10 @@
+ return -1;
+ }
+ }
+-#endif
+
+ for (i = 0; i < at->num_pages; i++)
+ {
+- if (!NV_ALLOC_MAPPING_CONTIG(at->flags) && !NV_ALLOC_MAPPING_VMALLOC(at->flags))
++ if (!NV_ALLOC_MAPPING_CONTIG(at->flags))
+ {
+ NV_GET_FREE_PAGES(virt_addr, 0, gfp_mask);
+ if (virt_addr == 0)
+@@ -553,10 +538,10 @@
+ }
+ }
+ nv_sg_load(&page_ptr->sg_list, page_ptr);
++#endif
+
+ if (NV_ALLOC_MAPPING_VMAP(at->flags))
+ pages[i] = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
+-#endif
+
+ if (!NV_ALLOC_MAPPING_CACHED(at->flags))
+ nv_set_page_attrib_uncached(page_ptr);
+@@ -565,41 +550,21 @@
+ virt_addr += PAGE_SIZE;
+ }
+
+-#if defined(NV_SG_MAP_BUFFERS)
+ if (NV_ALLOC_MAPPING_VMAP(at->flags))
+ {
+ NV_VMAP(virt_addr, pages, at->num_pages, NV_ALLOC_MAPPING_CACHED(at->flags));
+-#if defined(KERNEL_2_4)
+- if (virt_addr != 0)
+- {
+- unsigned int i;
+- /*
+- * XXX Linux 2.4's vmap() increments the pages' reference counts
+- * in preparation for vfree(); the latter skips the calls to
+- * __free_page() if the pages are marked reserved, however, so
+- * that the underlying memory is effectively leaked when we free
+- * it later. Decrement the count here to avoid this leak.
+- */
+- for (i = 0; i < at->num_pages; i++)
+- {
+- if (PageReserved(pages[i]))
+- atomic_dec(&pages[i]->count);
+- }
+- }
+-#endif
+- NV_KFREE((void *)pages, sizeof(struct page *) * at->num_pages);
+ if (virt_addr == 0)
+ {
+ nv_printf(NV_DBG_ERRORS,
+ "NVRM: VM: nv_vm_malloc_pages: failed to vmap pages\n");
+ goto failed;
+ }
++ NV_KFREE((void *)pages, sizeof(struct page *) * at->num_pages);
+ for (i = 0; i < at->num_pages; i++)
+ {
+ at->page_table[i]->virt_addr = virt_addr + i * PAGE_SIZE;
+ }
+ }
+-#endif /* NV_SG_MAP_BUFFERS */
+
+ nv_vm_list_page_count(at->page_table, at->num_pages);
+
+@@ -624,7 +589,7 @@
+ nv_sg_unmap_buffer(dev, &page_ptr->sg_list, page_ptr);
+ #endif
+ nv_unlock_page(page_ptr);
+- if (!NV_ALLOC_MAPPING_CONTIG(at->flags) && !NV_ALLOC_MAPPING_VMALLOC(at->flags))
++ if (!NV_ALLOC_MAPPING_CONTIG(at->flags))
+ NV_FREE_PAGES(page_ptr->virt_addr, 0);
+ }
+ }
+@@ -638,16 +603,9 @@
+ #endif
+ NV_FREE_PAGES(page_ptr->virt_addr, at->order);
+ }
+- else if (NV_ALLOC_MAPPING_VMALLOC(at->flags))
+- {
+- page_ptr = *at->page_table;
+- NV_VFREE((void *) page_ptr->virt_addr, at->size);
+- }
+
+-#if defined(NV_SG_MAP_BUFFERS)
+ if (NV_ALLOC_MAPPING_VMAP(at->flags) && pages != 0)
+- NV_VFREE((void *)pages, sizeof(struct page *) * at->num_pages);
+-#endif
++ NV_KFREE((void *)pages, sizeof(struct page *) * at->num_pages);
+
+ return -1;
+ }
+@@ -674,19 +632,17 @@
+ }
+ nv_vm_list_page_count(at->page_table, at->num_pages);
+
+-#if defined(NV_SG_MAP_BUFFERS)
+ if (NV_ALLOC_MAPPING_VMAP(at->flags))
+ NV_VUNMAP((void *)at->page_table[0]->virt_addr, at->num_pages); // undo vmap()
+-#endif
+
+ for (i = 0; i < at->num_pages; i++)
+ {
+ page_ptr = at->page_table[i];
+ if (!NV_ALLOC_MAPPING_CACHED(at->flags))
+ nv_set_page_attrib_cached(page_ptr);
+-#if defined(NV_SG_MAP_BUFFERS)
+ if (NV_ALLOC_MAPPING_VMAP(at->flags))
+ page_ptr->virt_addr = (unsigned long) __va(page_ptr->phys_addr);
++#if defined(NV_SG_MAP_BUFFERS)
+ if (!NV_ALLOC_MAPPING_CONTIG(at->flags))
+ nv_sg_unmap_buffer(dev, &page_ptr->sg_list, page_ptr);
+ #endif
+@@ -701,7 +657,7 @@
+ }
+ }
+ nv_unlock_page(page_ptr);
+- if (!NV_ALLOC_MAPPING_CONTIG(at->flags) && !NV_ALLOC_MAPPING_VMALLOC(at->flags))
++ if (!NV_ALLOC_MAPPING_CONTIG(at->flags))
+ NV_FREE_PAGES(page_ptr->virt_addr, 0);
+ }
+ nv_flush_caches();
+@@ -714,9 +670,136 @@
+ #endif
+ NV_FREE_PAGES(page_ptr->virt_addr, at->order);
+ }
+- else if (NV_ALLOC_MAPPING_VMALLOC(at->flags))
++}
++
++#if defined(NV_VMAP_PRESENT) && defined(KERNEL_2_4) && defined(NVCPU_X86)
++static unsigned long
++nv_vmap_vmalloc(
++ int count,
++ struct page **pages,
++ pgprot_t prot
++)
++{
++ void *virt_addr = NULL;
++ unsigned int i, size = count * PAGE_SIZE;
++
++ NV_VMALLOC(virt_addr, size, TRUE);
++ if (virt_addr == NULL)
++ {
++ nv_printf(NV_DBG_ERRORS,
++ "NVRM: vmalloc() failed to allocate vmap() scratch pages!\n");
++ return 0;
++ }
++
++ for (i = 0; i < (unsigned int)count; i++)
+ {
+- page_ptr = *at->page_table;
+- NV_VFREE((void *) page_ptr->virt_addr, at->size);
++ pgd_t *pgd = NULL;
++ pmd_t *pmd = NULL;
++ pte_t *pte = NULL;
++ unsigned long address;
++ struct page *page;
++
++ address = (unsigned long)virt_addr + i * PAGE_SIZE;
++
++ pgd = NV_PGD_OFFSET(address, 1, NULL);
++ if (!NV_PGD_PRESENT(pgd))
++ goto failed;
++
++ pmd = NV_PMD_OFFSET(address, pgd);
++ if (!NV_PMD_PRESENT(pmd))
++ goto failed;
++
++ pte = NV_PTE_OFFSET(address, pmd);
++ if (!NV_PTE_PRESENT(pte))
++ goto failed;
++
++ page = NV_GET_PAGE_STRUCT(pte_val(*pte) & PAGE_MASK);
++ get_page(pages[i]);
++ set_pte(pte, mk_pte(pages[i], prot));
++ put_page(page);
++ NV_PTE_UNMAP(pte);
++ }
++ nv_flush_caches();
++
++ return (unsigned long)virt_addr;
++
++failed:
++ NV_VFREE(virt_addr, size);
++
++ return 0;
++}
++
++static void
++nv_vunmap_vmalloc(
++ void *address,
++ int count
++)
++{
++ NV_VFREE(address, count * PAGE_SIZE);
++}
++#endif /* NV_VMAP_PRESENT && KERNEL_2_4 && NVCPU_X86 */
++
++void *nv_vmap(
++ struct page **pages,
++ int count,
++ pgprot_t prot
++)
++{
++ unsigned long virt_addr = 0;
++#if defined(NV_VMAP_PRESENT)
++#if defined(KERNEL_2_4) && defined(NVCPU_X86)
++ /*
++ * XXX Linux 2.4's vmap() checks the requested mapping's size against
++ * the value of (max_mapnr << PAGESHIFT); since 'map_nr' is a 32-bit
++ * symbol, the checks fails given enough physical memory. We can't solve
++ * this problem by adjusting the value of 'map_nr', but we can avoid
++ * vmap() by going through vmalloc().
++ */
++ if (max_mapnr >= 0x100000)
++ virt_addr = nv_vmap_vmalloc(count, pages, prot);
++ else
++#endif
++ NV_VMAP_KERNEL(virt_addr, pages, count, prot);
++#if defined(KERNEL_2_4)
++ if (virt_addr)
++ {
++ int i;
++ /*
++ * XXX Linux 2.4's vmap() increments the pages' reference counts
++ * in preparation for vfree(); the latter skips the calls to
++ * __free_page() if the pages are marked reserved, however, so
++ * that the underlying memory is effectively leaked when we free
++ * it later. Decrement the count here to avoid this leak.
++ */
++ for (i = 0; i < count; i++)
++ {
++ if (PageReserved(pages[i]))
++ atomic_dec(&pages[i]->count);
++ }
+ }
++#endif
++#endif /* NV_VMAP_PRESENT */
++ return (void *)virt_addr;
++}
++
++void nv_vunmap(
++ void *address,
++ int count
++)
++{
++#if defined(NV_VMAP_PRESENT)
++#if defined(KERNEL_2_4) && defined(NVCPU_X86)
++ /*
++ * XXX Linux 2.4's vmap() checks the requested mapping's size against
++ * the value of (max_mapnr << PAGESHIFT); since 'map_nr' is a 32-bit
++ * symbol, the checks fails given enough physical memory. We can't solve
++ * this problem by adjusting the value of 'map_nr', but we can avoid
++ * vmap() by going through vmalloc().
++ */
++ if (max_mapnr >= 0x100000)
++ nv_vunmap_vmalloc(address, count);
++ else
++#endif
++ NV_VUNMAP_KERNEL(address, count);
++#endif /* NV_VMAP_PRESENT */
+ }
+diff -ru usr/src/nv/nv-vm.h usr/src/nv.2305230/nv-vm.h
+--- usr/src/nv/nv-vm.h 2008-01-21 11:48:11.000000000 -0800
++++ usr/src/nv.2305230/nv-vm.h 2008-03-18 12:55:54.643519951 -0700
+@@ -11,6 +11,9 @@
+ #ifndef _NV_VM_H_
+ #define _NV_VM_H_
+
++void * nv_vmap(struct page **, int, pgprot_t);
++void nv_vunmap(void *, int);
++
+ int nv_vm_malloc_pages(nv_state_t *, nv_alloc_t *);
+ void nv_vm_free_pages(nv_state_t *, nv_alloc_t *);
+
+diff -ru usr/src/nv/nv.c usr/src/nv.2305230/nv.c
+--- usr/src/nv/nv.c 2008-01-21 11:48:11.000000000 -0800
++++ usr/src/nv.2305230/nv.c 2008-03-18 12:56:01.123889244 -0700
+@@ -97,9 +97,7 @@
+ unsigned int nv_remap_limit;
+ #endif
+
+-#if defined(NV_CHANGE_PAGE_ATTR_PRESENT)
+-int nv_use_cpa = 1;
+-#endif
++int nv_update_memory_types = 1;
+
+ static void *nv_pte_t_cache = NULL;
+
+@@ -850,16 +848,13 @@
+
+ for (i = 0; i < 4; i++)
+ {
+- if (NV_PAT_ENTRY(pat1, i) == 1)
++ if ((i != 1) && NV_PAT_ENTRY(pat1, i) == 1)
+ {
+ nv_printf(NV_DBG_ERRORS, "NVRM: PAT index %d already configured for Write-Combining!\n", i);
+ nv_printf(NV_DBG_ERRORS, "NVRM: Aborting, due to PAT already being configured\n");
+ return 0;
+ }
+- }
+
+- for (i = 0; i < 4; i++)
+- {
+ if (NV_PAT_ENTRY(pat2, i) == 1)
+ {
+ nv_printf(NV_DBG_ERRORS, "NVRM: PAT index %d already configured for Write-Combining!\n", i + 4);
+@@ -969,30 +964,26 @@
+ #endif /* defined(NV_BUILD_NV_PAT_SUPPORT) */
+ }
+
+-
+ #if defined(NV_CHANGE_PAGE_ATTR_BUG_PRESENT)
+-
+-/* nv_verify_cpa_interface - determine if the change_page_attr bug is fixed
+- * in this kernel.
++/*
++ * nv_verify_cpa_interface() - determine if the change_page_attr() large page
++ * management accounting bug known to exist in early Linux/x86-64 kernels
++ * is present in this kernel.
+ *
+- * there's really not a good way to determine if change_page_attr is fixed.
+- * we can't really use cpa on 2.6 x86_64 kernels < 2.6.11, as if we run into
+- * the accounting bug, the kernel will throw a BUG. this isn't 100% accurate,
+- * as it doesn't throw a bug until we try to restore the caching attributes
+- * of the page. so if we can track down a 4M allocation, we can mark it
+- * uncached and see if the accounting was done correctly.
+- *
+- * this is a little ugly, but the most accurate approach to determining if
+- * this kernel is good.
++ * There's really no good way to determine if change_page_attr() is working
++ * correctly. We can't reliably use change_page_attr() on Linux/x86-64 2.6
++ * kernels < 2.6.11: if we run into the accounting bug, the Linux kernel will
++ * trigger a BUG() if we attempt to restore the WB memory type of a page
++ * originally part of a large page.
+ *
+- * why do we even bother? some distributions have back-ported the cpa fix to
+- * kernels < 2.6.11. we want to use change_page_attr to avoid random corruption
+- * and hangs, but need to make sure it's safe to do so.
++ * So if we can successfully allocate such a page, change its memory type to
++ * UC and check if the accounting was done correctly, we can determine if
++ * the change_page_attr() interface can be used safely.
+ *
+- * return values:
+- * 0 - test passed, interface works
+- * 1 - test failed, status unclear
+- * -1 - test failed, interface broken
++ * Return values:
++ * 0 - test passed, the change_page_attr() interface works
++ * 1 - test failed, the status is unclear
++ * -1 - test failed, the change_page_attr() interface is broken
+ */
+
+ static inline pte_t *check_large_page(unsigned long vaddr)
+@@ -1000,7 +991,7 @@
+ pgd_t *pgd = NULL;
+ pmd_t *pmd = NULL;
+
+- pgd = NV_PGD_OFFSET(vaddr, 1, &init_mm);
++ pgd = NV_PGD_OFFSET(vaddr, 1, NULL);
+ if (!NV_PGD_PRESENT(pgd))
+ return NULL;
+
+@@ -1110,35 +1101,42 @@
+
+ return 1;
+ }
+-
+ #endif /* defined(NV_CHANGE_PAGE_ATTR_BUG_PRESENT) */
+
+-
+-// verify that the kernel's mapping matches the requested type
+-// this is to protect against accidental cache aliasing problems
++/*
++ * nv_verify_page_mappings() - verify that the kernel mapping of the specified
++ * page matches the specified type. This is to help detect bugs in the Linux
++ * kernel's change_page_attr() interface, early.
++ *
++ * This function relies on the ability to perform kernel virtul address to PFN
++ * translations and therefore on 'init_mm'. Unfortunately, the latter is no
++ * longer exported in recent Linux/x86 2.6 kernels. The export was removed at
++ * roughtly the same time as the set_pages_{uc,wb}() change_page_attr()
++ * replacement interfaces were introduced; hopefully, it will be sufficient to
++ * check for their presence.
++ */
+ int nv_verify_page_mappings(
+ nv_pte_t *page_ptr,
+ unsigned int cachetype
+ )
+ {
+- struct mm_struct *mm;
++#if defined(NV_CHANGE_PAGE_ATTR_PRESENT) || \
++ (defined(NV_SET_PAGES_UC_PRESENT) && !defined(NVCPU_X86))
++ unsigned long retval = -1;
++#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
+ pgd_t *pgd = NULL;
+ pmd_t *pmd = NULL;
+ pte_t *pte = NULL;
+- unsigned long retval = -1;
+ unsigned int flags, expected;
+ unsigned long address;
+ static int count = 0;
+
+-#if defined(NV_CHANGE_PAGE_ATTR_PRESENT)
+- if (!nv_use_cpa)
++ if (!nv_update_memory_types)
+ return 0;
+-#endif
+
+ address = (unsigned long)__va(page_ptr->phys_addr);
+- mm = &init_mm; // always a kernel page
+
+- pgd = NV_PGD_OFFSET(address, 1, mm);
++ pgd = NV_PGD_OFFSET(address, 1, NULL);
+ if (!NV_PGD_PRESENT(pgd))
+ {
+ nv_printf(NV_DBG_ERRORS, "NVRM: pgd not present for addr 0x%lx\n", address);
+@@ -1204,7 +1202,11 @@
+ }
+
+ failed:
++#endif /* defined(NVCPU_X86) || defined(NVCPU_X86_64) */
+ return retval;
++#else
++ return 0;
++#endif
+ }
+
+ #if defined(NV_BUILD_NV_PAT_SUPPORT) && defined(CONFIG_HOTPLUG_CPU)
+@@ -1250,7 +1252,8 @@
+ static int __init nvidia_init_module(void)
+ {
+ int rc;
+- U032 i, count;
++ U032 i, count, data;
++ nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device);
+
+ #if defined(VM_CHECKER)
+ nv_init_lock(vm_lock);
+@@ -1266,11 +1269,18 @@
+ return -ENODEV;
+ }
+
++ if (!rm_init_rm())
++ {
++ nv_printf(NV_DBG_ERRORS, "NVRM: rm_init_rm() failed!\n");
++ return -EIO;
++ }
++
+ memset(nv_linux_devices, 0, sizeof(nv_linux_devices));
+
+ if (pci_register_driver(&nv_pci_driver) < 0)
+ {
+ pci_unregister_driver(&nv_pci_driver); // XXX ???
++ rm_shutdown_rm();
+ nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA graphics adapter found!\n");
+ return -ENODEV;
+ }
+@@ -1293,6 +1303,7 @@
+ if (num_probed_nv_devices == 0)
+ {
+ pci_unregister_driver(&nv_pci_driver);
++ rm_shutdown_rm();
+ nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA graphics adapter probed!\n");
+ return -ENODEV;
+ }
+@@ -1307,6 +1318,7 @@
+ if (num_nv_devices == 0)
+ {
+ pci_unregister_driver(&nv_pci_driver);
++ rm_shutdown_rm();
+ nv_printf(NV_DBG_ERRORS,
+ "NVRM: None of the NVIDIA graphics adapters were initialized!\n");
+ return -ENODEV;
+@@ -1317,8 +1329,9 @@
+ rc = register_chrdev(nv_major, "nvidia", &nv_fops);
+ if (rc < 0)
+ {
+- nv_printf(NV_DBG_ERRORS, "NVRM: register chrdev failed\n");
+ pci_unregister_driver(&nv_pci_driver);
++ rm_shutdown_rm();
++ nv_printf(NV_DBG_ERRORS, "NVRM: register_chrdev() failed!\n");
+ return rc;
+ }
+
+@@ -1367,14 +1380,6 @@
+ }
+ #endif
+
+- // Init the resource manager
+- if (!rm_init_rm())
+- {
+- nv_printf(NV_DBG_ERRORS, "NVRM: rm_init_rm() failed\n");
+- rc = -EIO;
+- goto failed;
+- }
+-
+ #if defined(NV_SG_MAP_BUFFERS)
+ rm_read_registry_dword(NV_STATE_PTR(&nv_ctl_device), "NVreg", "RemapLimit", &nv_remap_limit);
+
+@@ -1407,48 +1412,41 @@
+ /* create /proc/driver/nvidia */
+ nvos_proc_create();
+
+-#if defined(NV_CHANGE_PAGE_ATTR_PRESENT)
++ /*
++ * Give users an opportunity to disable the driver's use of
++ * the change_page_attr() and set_pages_{uc,wb}() kernel
++ * interfaces.
++ */
++ rc = rm_read_registry_dword(nv, "NVreg", "UpdateMemoryTypes", &data);
++ if ((rc == 0) && ((int)data != ~0))
+ {
+- int data;
+-
+- // allow the user to override us with a registry key
+- rc = rm_read_registry_dword(NV_STATE_PTR(&nv_ctl_device), "NVreg", "UseCPA", &data);
+- if ((rc == 0) && (data != -1))
+- {
+- nv_use_cpa = data;
+- }
++ nv_update_memory_types = data;
++ }
+ #if defined(NV_CHANGE_PAGE_ATTR_BUG_PRESENT)
+- else
++ /*
++ * Unless we explicitely detect that the change_page_attr()
++ * inteface is fixed, disable usage of the interface on
++ * this kernel. Notify the user of this problem using the
++ * driver's /proc warnings interface (read by the installer
++ * and the bug report script).
++ */
++ else
++ {
++ rc = nv_verify_cpa_interface();
++ if (rc < 0)
+ {
+- /*
+- * Unless we explicitely detect that the change_page_attr()
+- * inteface is fixed, disable usage of the interface on
+- * this kernel. Notify the user of this problem using the
+- * driver's /proc warnings interface (read by the installer
+- * and the bug report script).
+- */
+- rc = nv_verify_cpa_interface();
+- if (rc < 0)
+- {
+- nv_prints(NV_DBG_ERRORS, __cpgattr_warning);
+- nvos_proc_add_warning_file("change_page_attr", __cpgattr_warning);
+- nv_use_cpa = 0;
+- }
+- else if (rc != 0)
+- {
+- nv_prints(NV_DBG_ERRORS, __cpgattr_warning_2);
+- nvos_proc_add_warning_file("change_page_attr", __cpgattr_warning_2);
+- nv_use_cpa = 0;
+- }
++ nv_prints(NV_DBG_ERRORS, __cpgattr_warning);
++ nvos_proc_add_warning_file("change_page_attr", __cpgattr_warning);
++ nv_update_memory_types = 0;
++ }
++ else if (rc != 0)
++ {
++ nv_prints(NV_DBG_ERRORS, __cpgattr_warning_2);
++ nvos_proc_add_warning_file("change_page_attr", __cpgattr_warning_2);
++ nv_update_memory_types = 0;
+ }
+-#endif
+ }
+-#endif
+-
+-
+-#if defined(DEBUG)
+- inter_module_register("nv_linux_devices", THIS_MODULE, nv_linux_devices);
+-#endif
++#endif /* defined(NV_CHANGE_PAGE_ATTR_BUG_PRESENT) */
+
+ #if defined(NVCPU_X86_64) && defined(CONFIG_IA32_EMULATION) && !defined(HAVE_COMPAT_IOCTL)
+ /* Register ioctl conversions for 32 bit clients */
+@@ -1501,6 +1499,8 @@
+ }
+
+ pci_unregister_driver(&nv_pci_driver);
++ rm_shutdown_rm();
++
+ return rc;
+ }
+
+@@ -1532,10 +1532,6 @@
+ /* remove /proc/driver/nvidia */
+ nvos_proc_remove();
+
+-#if defined(DEBUG)
+- inter_module_unregister("nv_linux_devices");
+-#endif
+-
+ #if defined(NV_PM_SUPPORT_OLD_STYLE_APM)
+ for (i = 0; i < num_nv_devices; i++)
+ {
+@@ -2112,6 +2108,8 @@
+ if (rm_validate_pfn_range(file, NV_VMA_PGOFF(vma),
+ NV_VMA_SIZE(vma)) == RM_OK)
+ {
++ if (vma->vm_flags & VM_WRITE)
++ return -EACCES;
+ vma->vm_flags |= (VM_IO | VM_LOCKED);
+ #if defined(NV_VM_INSERT_PAGE_PRESENT)
+ if (NV_VM_INSERT_PAGE(vma, vma->vm_start, pfn_to_page(NV_VMA_PGOFF(vma))))
+@@ -2240,6 +2238,8 @@
+ if (rm_validate_pfn_range(file, NV_VMA_PGOFF(vma),
+ NV_VMA_SIZE(vma)) == RM_OK)
+ {
++ if (vma->vm_flags & VM_WRITE)
++ return -EACCES;
+ vma->vm_flags |= (VM_IO | VM_LOCKED);
+ #if defined(NV_VM_INSERT_PAGE_PRESENT)
+ if (NV_VM_INSERT_PAGE(vma, vma->vm_start, pfn_to_page(NV_VMA_PGOFF(vma))))
+@@ -3266,8 +3266,21 @@
+ pte_t *pte = NULL;
+ unsigned long retval;
+
+- mm = (kern) ? &init_mm : current->mm;
+- if (!kern) down_read(&current->mm->mmap_sem);
++ if (!kern)
++ {
++ mm = current->mm;
++ down_read(&mm->mmap_sem);
++ }
++ else
++ {
++#if defined(NV_SET_PAGES_UC_PRESENT) && defined(NVCPU_X86)
++ /* nv_printf(NV_DBG_ERRORS,
++ "NVRM: can't translate KVA in nv_get_phys_address()!\n"); */
++ return 0;
++#else
++ mm = NULL;
++#endif
++ }
+
+ pgd = NV_PGD_OFFSET(address, kern, mm);
+ if (!NV_PGD_PRESENT(pgd))
+@@ -3288,11 +3301,13 @@
+ retval &= ~_PAGE_NX;
+ #endif
+
+- if (!kern) up_read(&current->mm->mmap_sem);
++ if (!kern)
++ up_read(&mm->mmap_sem);
+ return retval;
+
+ failed:
+- if (!kern) up_read(&current->mm->mmap_sem);
++ if (!kern)
++ up_read(&mm->mmap_sem);
+ return 0;
+ }
+
+@@ -3300,12 +3315,19 @@
+ unsigned long address
+ )
+ {
+- // make sure this address is a kernel pointer
++#if defined(NV_SET_PAGES_UC_PRESENT) && defined(NVCPU_X86)
++ nv_linux_state_t *nvl;
++ nv_alloc_t *at;
++ unsigned long virt_addr;
++ U032 i, j;
++#endif
++
++ /* make sure this address is a kernel virtual address */
+ #if defined(DEBUG) && !defined(CONFIG_X86_4G)
+ if (address < PAGE_OFFSET)
+ {
+ nv_printf(NV_DBG_WARNINGS,
+- "NVRM: user address passed to get_kern_phys_address: 0x%lx\n",
++ "NVRM: user address passed to get_kern_phys_address: 0x%llx!\n",
+ address);
+ return 0;
+ }
+@@ -3315,6 +3337,30 @@
+ if ((address > PAGE_OFFSET) && (address < VMALLOC_START))
+ return __pa(address);
+
++#if defined(NV_SET_PAGES_UC_PRESENT) && defined(NVCPU_X86)
++ for (i = 0; i < num_nv_devices; i++)
++ {
++ nvl = &nv_linux_devices[i];
++
++ for (at = nvl->alloc_queue; (at != NULL); at = at->next)
++ {
++ if (!NV_ALLOC_MAPPING_VMAP(at->flags))
++ continue;
++
++ for (j = 0; j < at->num_pages; j++)
++ {
++ virt_addr = at->page_table[j]->virt_addr;
++ if ((address >= virt_addr) &&
++ (address < (virt_addr + PAGE_SIZE)))
++ {
++ return (at->page_table[j]->phys_addr +
++ (address & ~PAGE_MASK));
++ }
++ }
++ }
++ }
++#endif
++
+ return _get_phys_address(address, 1);
+ }
+
+@@ -3322,12 +3368,12 @@
+ unsigned long address
+ )
+ {
+- // make sure this address is not a kernel pointer
++ /* make sure this address is not a kernel virtual address */
+ #if defined(DEBUG) && !defined(CONFIG_X86_4G)
+ if (address >= PAGE_OFFSET)
+ {
+ nv_printf(NV_DBG_WARNINGS,
+- "NVRM: kernel address passed to get_user_phys_address: 0x%lx\n",
++ "NVRM: kernel address passed to get_user_phys_address: 0x%llx!\n",
+ address);
+ return 0;
+ }
+@@ -3396,8 +3442,6 @@
+ if (nv_vm_malloc_pages(nv, at))
+ goto failed;
+
+- at->class = class;
+-
+ // set our 'key' to the page_table. rm_alloc_agp_pages will call
+ // nv_translate_address below, which will look up pages using
+ // the value of *pAddress as a key, then index into the page_table
+@@ -3952,7 +3996,8 @@
+
+ if (num_nv_devices == NV_MAX_DEVICES)
+ {
+- nv_printf(NV_DBG_ERRORS, "NVRM: maximum device number (%d) reached!\n", num_nv_devices);
++ nv_printf(NV_DBG_ERRORS, "NVRM: maximum device number (%d) exceeded!\n",
++ (NV_MAX_DEVICES - 1));
+ return -1;
+ }
+
+@@ -4085,13 +4130,10 @@
+ return -1;
+ }
+
+-int NV_API_CALL nv_no_incoherent_mappings
+-(
+- void
+-)
++int NV_API_CALL nv_no_incoherent_mappings(void)
+ {
+-#if defined(NV_CHANGE_PAGE_ATTR_PRESENT)
+- return 1;
++#if defined(NV_CHANGE_PAGE_ATTR_PRESENT) || defined(NV_SET_PAGES_UC_PRESENT)
++ return (nv_update_memory_types);
+ #else
+ return 0;
+ #endif
+diff -ru usr/src/nv/nv.h usr/src/nv.2305230/nv.h
+--- usr/src/nv/nv.h 2008-01-21 11:48:11.000000000 -0800
++++ usr/src/nv.2305230/nv.h 2008-03-18 12:55:56.919649651 -0700
+@@ -335,15 +332,39 @@
+
+ #define NV_DEVICE_NAME_LENGTH 40
+
++#define NV_MAX_ISR_DELAY_US 20000
++#define NV_MAX_ISR_DELAY_MS (NV_MAX_ISR_DELAY_US / 1000)
++
++#define NV_TIMERCMP(a, b, CMP) \
++ (((a)->tv_sec == (b)->tv_sec) ? \
++ ((a)->tv_usec CMP (b)->tv_usec) : ((a)->tv_sec CMP (b)->tv_sec))
++
++#define NV_TIMERADD(a, b, result) \
++ { \
++ (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
++ (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
++ if ((result)->tv_usec >= 1000000) \
++ { \
++ ++(result)->tv_sec; \
++ (result)->tv_usec -= 1000000; \
++ } \
++ }
++
++#define NV_TIMERSUB(a, b, result) \
++ { \
++ (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \
++ (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \
++ if ((result)->tv_usec < 0) \
++ { \
++ --(result)->tv_sec; \
++ (result)->tv_usec += 1000000; \
++ } \
++ }
+
+ /*
+ * driver internal interfaces
+ */
+
+-/* need a fake device number for control device; just to flag it for msgs */
+-#define NV_CONTROL_DEVICE_NUMBER 100
+-
+-
+ #ifndef NVWATCH
+
+ /*
+diff -ru usr/src/nv/os-interface.c usr/src/nv.2305230/os-interface.c
+--- usr/src/nv/os-interface.c 2008-01-21 11:48:11.000000000 -0800
++++ usr/src/nv.2305230/os-interface.c 2008-03-18 12:56:04.288069559 -0700
+@@ -25,7 +25,6 @@
+ #include "os-interface.h"
+ #include "nv-linux.h"
+
+-
+ static volatile int os_block_on_smp_barrier;
+
+ #ifdef CONFIG_SMP
+@@ -63,7 +62,7 @@
+ //
+ typedef struct os_sema_s
+ {
+- struct semaphore wait;
++ struct completion completion;
+ spinlock_t lock;
+ S032 count;
+ } os_sema_t;
+@@ -86,7 +85,7 @@
+ return rmStatus;
+
+ os_sema = (os_sema_t *)*ppSema;
+- sema_init(&os_sema->wait, 0);
++ init_completion(&os_sema->completion);
+ spin_lock_init(&os_sema->lock);
+ os_sema->count = 1;
+
+@@ -131,7 +130,7 @@
+ {
+ os_sema->count--;
+ spin_unlock_irqrestore(&os_sema->lock, old_irq);
+- down(&os_sema->wait);
++ wait_for_completion(&os_sema->completion);
+ }
+ else
+ {
+@@ -208,7 +207,7 @@
+ spin_unlock_irqrestore(&os_sema->lock, old_irq);
+
+ if (doWakeup)
+- up(&os_sema->wait);
++ complete(&os_sema->completion);
+
+ return RM_OK;
+ }
+@@ -448,8 +447,6 @@
+ //
+ //---------------------------------------------------------------------------
+
+-#define NV_MAX_ISR_UDELAY 20000
+-#define NV_MAX_ISR_MDELAY (NV_MAX_ISR_UDELAY / 1000)
+ #define NV_MSECS_PER_JIFFIE (1000 / HZ)
+ #define NV_MSECS_TO_JIFFIES(msec) ((msec) * HZ / 1000)
+ #define NV_USECS_PER_JIFFIE (1000000 / HZ)
+@@ -474,7 +471,7 @@
+ do_gettimeofday(&tm1);
+ #endif
+
+- if (in_irq() && MicroSeconds > NV_MAX_ISR_UDELAY)
++ if (in_irq() && (MicroSeconds > NV_MAX_ISR_DELAY_US))
+ return RM_ERROR;
+
+ mdelay_safe_msec = MicroSeconds / 1000;
+@@ -494,36 +491,6 @@
+ return RM_OK;
+ }
+
+-#ifndef timercmp
+-# define timercmp(a, b, CMP) \
+- (((a)->tv_sec == (b)->tv_sec) ? \
+- ((a)->tv_usec CMP (b)->tv_usec) : \
+- ((a)->tv_sec CMP (b)->tv_sec))
+-#endif
+-#ifndef timeradd
+-# define timeradd(a, b, result) \
+- do { \
+- (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
+- (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
+- if ((result)->tv_usec >= 1000000) \
+- { \
+- ++(result)->tv_sec; \
+- (result)->tv_usec -= 1000000; \
+- } \
+- } while (0)
+-#endif
+-#ifndef timersub
+-# define timersub(a, b, result) \
+- do { \
+- (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \
+- (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \
+- if ((result)->tv_usec < 0) { \
+- --(result)->tv_sec; \
+- (result)->tv_usec += 1000000; \
+- } \
+- } while (0)
+-#endif
+-
+ /*
+ * On Linux, a jiffie represents the time passed in between two timer
+ * interrupts. The number of jiffies per second (HZ) varies across the
+@@ -549,7 +516,7 @@
+ tm_start = tm_aux;
+ #endif
+
+- if (in_irq() && MilliSeconds > NV_MAX_ISR_MDELAY)
++ if (in_irq() && (MilliSeconds > NV_MAX_ISR_DELAY_MS))
+ return RM_ERROR;
+
+ if (!NV_MAY_SLEEP())
+@@ -561,7 +528,7 @@
+ MicroSeconds = MilliSeconds * 1000;
+ tm_end.tv_usec = MicroSeconds;
+ tm_end.tv_sec = 0;
+- timeradd(&tm_aux, &tm_end, &tm_end);
++ NV_TIMERADD(&tm_aux, &tm_end, &tm_end);
+
+ /* do we have a full jiffie to wait? */
+ jiffies = NV_USECS_TO_JIFFIES(MicroSeconds);
+@@ -590,9 +557,9 @@
+ schedule_timeout(jiffies);
+ /* catch the remainder, if any */
+ do_gettimeofday(&tm_aux);
+- if (timercmp(&tm_aux, &tm_end, <))
++ if (NV_TIMERCMP(&tm_aux, &tm_end, <))
+ {
+- timersub(&tm_end, &tm_aux, &tm_aux); // tm_aux = tm_end - tm_aux
++ NV_TIMERSUB(&tm_end, &tm_aux, &tm_aux); // tm_aux = tm_end - tm_aux
+ MicroSeconds = tm_aux.tv_usec + tm_aux.tv_sec * 1000000;
+ } else
+ MicroSeconds = 0;
+@@ -1109,6 +1076,18 @@
+ {
+ void *vaddr;
+
++ if (start == 0)
++ {
++ if (mode != NV_MEMORY_CACHED)
++ {
++ nv_printf(NV_DBG_ERRORS,
++ "NVRM: os_map_kernel_space: won't map address 0x%0llx UC!\n", start);
++ return NULL;
++ }
++ else
++ return (void *)PAGE_OFFSET;
++ }
++
+ if (!NV_MAY_SLEEP())
+ {
+ nv_printf(NV_DBG_ERRORS,
+@@ -1131,6 +1110,9 @@
+ U032 size_bytes
+ )
+ {
++ if (addr == (void *)PAGE_OFFSET)
++ return;
++
+ NV_IOUNMAP(addr, size_bytes);
+ }
+
+@@ -1176,7 +1158,11 @@
+ #if defined(CONFIG_X86_REMOTE_DEBUG)
+ __asm__ __volatile__ ("int $3");
+ #elif defined(CONFIG_KGDB)
++#if defined(NV_OLD_MM_KGDB_BREAKPOINT_PRESENT)
++ BREAKPOINT;
++#else
+ BREAKPOINT();
++#endif
+ #elif defined(CONFIG_KDB)
+ KDB_ENTER();
+ #else
+diff -ru usr/src/nv/os-registry.c usr/src/nv.2305230/os-registry.c
+--- usr/src/nv/os-registry.c 2008-01-21 11:48:11.000000000 -0800
++++ usr/src/nv.2305230/os-registry.c 2008-03-18 12:56:07.492252151 -0700
+@@ -48,24 +48,6 @@
+ * This could be changed to work on a per-device basis.
+ */
+
+-/*
+- * The 2nd argument to MODULE_PARM is used to verify parameters passed
+- * to the module at load time. It should be a string in the following
+- * format:
+- *
+- * [min[-max]]{b,h,i,l,s}
+- *
+- * The MIN and MAX specifiers delimit the length of the array. If MAX
+- * is omitted, it defaults to MIN; if both are omitted, the default is
+- * 1. The final character is a type specifier.
+- *
+- * b byte
+- * h short
+- * i int
+- * l long
+- * s string
+- */
+-
+ /*
+ * Option: VideoMemoryTypeOverride
+ *
+@@ -360,9 +342,6 @@
+ static int NVreg_DevicesConnected = 0;
+ NV_MODULE_PARAMETER(NVreg_DevicesConnected);
+
+-static int NVreg_VideoEnhancement = 0;
+-NV_MODULE_PARAMETER(NVreg_VideoEnhancement);
+-
+ static int NVreg_RmLogonRC = 1;
+ NV_MODULE_PARAMETER(NVreg_RmLogonRC);
+
+@@ -402,34 +381,36 @@
+ NV_MODULE_PARAMETER(NVreg_RemapLimit);
+
+ /*
+- * Option: UseCPA
++ * Option: UpdateMemoryTypes
+ *
+ * Description:
+ *
+- * Many kernels have a broken implementation of change_page_attr that leads
+- * to cache aliasing problems. x86_64 kernels between 2.6.0 and 2.6.10 will
+- * force a kernel BUG_ON() when this condition is encountered. For this
+- * reason, the NVIDIA driver is very careful about not using the CPA kernel
+- * interface on these kernels.
++ * Many kernels have broken implementations of the change_page_attr()
++ * kernel interface that may cause cache aliasing problems. Linux/x86-64
++ * kernels between 2.6.0 and 2.6.10 may prompt kernel BUG()s due to
++ * improper accounting in the interface's large page management code, for
++ * example. For this reason, the NVIDIA Linux driver is very careful about
++ * not using the change_page_attr() kernel interface on these kernels.
++ *
++ * Due to the serious nature of the problems that can arise from bugs in
++ * the change_page_attr(), set_pages_{uc,wb}() and other kernel interfaces
++ * used to modify memory types, the NVIDIA driver implements a manual
++ * registry key override to allow forcibly enabling or disabling use of
++ * these APIs.
+ *
+- * Some distributions have backported this fix to kernel versions that fall
+- * within this version range. The NVIDIA driver attempts to automatically
+- * detect these fixes and reenable usage of the change_page_attr interface.
++ * Possible values:
+ *
+- * Due to the serious nature of the problems that can arise from this, the
+- * NVIDIA driver implements a manual registry key to force usage of this API
+- * to be enabled or disabled. This registry key can be used to force usage
+- * of the API on a known fixed kernel if the NVIDIA driver fails to detect
+- * the kernel as fixed. This registry key can also be used to disable usage
+- * of the API on a bad kernel that is misdetected as a fixed kernel.
++ * ~0 = use the NVIDIA driver's default logic (default)
++ * 0 = enable use of change_page_attr(), etc.
++ * 1 = disable use of change_page_attr(), etc.
+ *
+- * The default value is '-1' (use NVIDIA driver default logic)
+- * A value of '0' will forcibly disable change_page_attr calls.
+- * A value of '1' will forcibly enable change_page_attr calls.
++ * By default, the NVIDIA driver will attempt to auto-detect if it can
++ * safely use the change_page_attr() and other kernel interfaces to modify
++ * the memory types of kernel mappings.
+ */
+
+-static int NVreg_UseCPA = -1;
+-NV_MODULE_PARAMETER(NVreg_UseCPA);
++static int NVreg_UpdateMemoryTypes = ~0;
++NV_MODULE_PARAMETER(NVreg_UpdateMemoryTypes);
+
+ // 1 - Force sourcing vbios from ROM
+ // 0 - business as usual
+@@ -477,15 +458,14 @@
+ { "NVreg", "ResmanDebugLevel", &NVreg_ResmanDebugLevel, 1 },
+ { "NVreg", "FlatPanelMode", &NVreg_FlatPanelMode, 1 },
+ { "NVreg", "DevicesConnected", &NVreg_DevicesConnected, 1 },
+- { "NVreg", "VideoEnhancement", &NVreg_VideoEnhancement, 1 },
+ { "NVreg", "RmLogonRC", &NVreg_RmLogonRC, 1 },
++ { "NVreg", "VbiosFromROM", &NVreg_VbiosFromROM, 1 },
+ { "NVreg", "ModifyDeviceFiles", &NVreg_ModifyDeviceFiles, 1 },
+ { "NVreg", "DeviceFileUID", &NVreg_DeviceFileUID, 1 },
+ { "NVreg", "DeviceFileGID", &NVreg_DeviceFileGID, 1 },
+ { "NVreg", "DeviceFileMode", &NVreg_DeviceFileMode, 1 },
+- { "NVreg", "VbiosFromROM", &NVreg_VbiosFromROM, 1 },
+ { "NVreg", "RemapLimit", &NVreg_RemapLimit, 1 },
+- { "NVreg", "UseCPA", &NVreg_UseCPA, 1 },
++ { "NVreg", "UpdateMemoryTypes", &NVreg_UpdateMemoryTypes, 1 },
+ { "NVreg", "RMEdgeIntrCheck", &NVreg_RMEdgeIntrCheck, 1 },
+ { NULL, NULL, NULL, 0 }
+ };