From 03d72aa18f15df9987fe5837284e15b9ccf6e3f8 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Tue, 18 Mar 2008 17:00:19 -0700 Subject: x86: PAT use reserve free memtype in pci_mmap_page_range Add reserve_memtype and free_memtype wrapper for pci_mmap_page_range. Free is called on unmap, but identity map continues to be mapped as per pci_mmap_page_range request, until next request for the same region calls ioremap_change_attr(), which will go through without conflict. This way of mapping is identical to one used in ioremap/iounmap. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/pci/i386.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 60 insertions(+), 8 deletions(-) (limited to 'arch/x86/pci/i386.c') diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 103b9dff121..4ebf52f6b1f 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -30,6 +30,9 @@ #include #include #include +#include + +#include #include "pci.h" @@ -297,10 +300,34 @@ void pcibios_set_master(struct pci_dev *dev) pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); } +static void pci_unmap_page_range(struct vm_area_struct *vma) +{ + u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT; + free_memtype(addr, addr + vma->vm_end - vma->vm_start); +} + +static void pci_track_mmap_page_range(struct vm_area_struct *vma) +{ + u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT; + unsigned long flags = pgprot_val(vma->vm_page_prot) + & _PAGE_CACHE_MASK; + + reserve_memtype(addr, addr + vma->vm_end - vma->vm_start, flags, NULL); +} + +static struct vm_operations_struct pci_mmap_ops = { + .open = pci_track_mmap_page_range, + .close = pci_unmap_page_range, +}; + int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { unsigned long prot; + u64 addr = vma->vm_pgoff << PAGE_SHIFT; + unsigned long len = vma->vm_end - vma->vm_start; + unsigned long flags; + unsigned long new_flags; /* I/O space cannot be accessed via normal processor loads and * stores on this platform. @@ -308,21 +335,46 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, if (mmap_state == pci_mmap_io) return -EINVAL; - /* Leave vm_pgoff as-is, the PCI space address is the physical - * address on this platform. - */ prot = pgprot_val(vma->vm_page_prot); - if (boot_cpu_data.x86 > 3) - prot |= _PAGE_PCD | _PAGE_PWT; + if (pat_wc_enabled && write_combine) + prot |= _PAGE_CACHE_WC; + else if (boot_cpu_data.x86 > 3) + prot |= _PAGE_CACHE_UC; + vma->vm_page_prot = __pgprot(prot); - /* Write-combine setting is ignored, it is changed via the mtrr - * interfaces on this platform. - */ + flags = pgprot_val(vma->vm_page_prot) & _PAGE_CACHE_MASK; + if (reserve_memtype(addr, addr + len, flags, &new_flags)) { + /* + * Do not fallback to certain memory types with certain + * requested type: + * - request is uncached, return cannot be write-back + * - request is uncached, return cannot be write-combine + * - request is write-combine, return cannot be write-back + */ + if ((flags == _PAGE_CACHE_UC && + (new_flags == _PAGE_CACHE_WB || + new_flags == _PAGE_CACHE_WC)) || + (flags == _PAGE_CACHE_WC && + new_flags == _PAGE_CACHE_WB)) { + free_memtype(addr, addr+len); + return -EINVAL; + } + flags = new_flags; + } + + if (vma->vm_pgoff <= max_pfn_mapped && + ioremap_change_attr((unsigned long)__va(addr), len, flags)) { + free_memtype(addr, addr + len); + return -EINVAL; + } + if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; + vma->vm_ops = &pci_mmap_ops; + return 0; } -- cgit v1.2.3-70-g09d2 From dee7cbb210fdd266ad81af4689bcbac3649f38ff Mon Sep 17 00:00:00 2001 From: Venki Pallipadi Date: Mon, 24 Mar 2008 14:39:55 -0700 Subject: x86: PAT bug fix for attribute type check after reserve_memtype Bug fixes for reserve_memtype() call in __ioremap and pci_mmap_page_range(). If reserve_memtype returns non-zero, then it is an error and subsequent free is not required. Requested and returned prot value check should be done when reserve_memtype returns success. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/mm/ioremap.c | 11 +++++++++-- arch/x86/pci/i386.c | 7 ++++++- 2 files changed, 15 insertions(+), 3 deletions(-) (limited to 'arch/x86/pci/i386.c') diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 6cd3418afe7..3f7f05e2c43 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -124,6 +124,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, struct vm_struct *area; unsigned long new_prot_val; pgprot_t prot; + int retval; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; @@ -163,8 +164,14 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; - if (reserve_memtype(phys_addr, phys_addr + size, - prot_val, &new_prot_val)) { + retval = reserve_memtype(phys_addr, phys_addr + size, + prot_val, &new_prot_val); + if (retval) { + printk("reserve_memtype returned %d\n", retval); + return NULL; + } + + if (prot_val != new_prot_val) { /* * Do not fallback to certain memory types with certain * requested type: diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 4ebf52f6b1f..2ead7236307 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -328,6 +328,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, unsigned long len = vma->vm_end - vma->vm_start; unsigned long flags; unsigned long new_flags; + int retval; /* I/O space cannot be accessed via normal processor loads and * stores on this platform. @@ -344,7 +345,11 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, vma->vm_page_prot = __pgprot(prot); flags = pgprot_val(vma->vm_page_prot) & _PAGE_CACHE_MASK; - if (reserve_memtype(addr, addr + len, flags, &new_flags)) { + retval = reserve_memtype(addr, addr + len, flags, &new_flags); + if (retval) + return retval; + + if (flags != new_flags) { /* * Do not fallback to certain memory types with certain * requested type: -- cgit v1.2.3-70-g09d2