summaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw@amazon.co.uk>2017-04-07 13:01:00 +0300
committerBjorn Helgaas <bhelgaas@google.com>2017-04-20 16:47:47 +0300
commitfcdb10d6b179fde41ca94d032afda8f4ed796b8e (patch)
treec298e32733f546d057fb4af9c2c36736ea86b8fc /arch/ia64
parent61eee41ae135517392d05cbf1177523a02f12727 (diff)
downloadlinux-fcdb10d6b179fde41ca94d032afda8f4ed796b8e.tar.xz
ia64: Remove redundant checks for WC in pci_mmap_page_range()
For a PCI MMIO BAR, phys_mem_access_prot() should always return UC or WC. And while a mixture of cached and uncached mappings is forbidden, we were already mixing WC and UC, which is OK. Just do as we're asked. Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Tested-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/pci/pci.c18
1 files changed, 2 insertions, 16 deletions
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 27020f30caa6..7438e8c84cdd 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -423,9 +423,6 @@ pci_mmap_page_range (struct pci_dev *dev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
- unsigned long size = vma->vm_end - vma->vm_start;
- pgprot_t prot;
-
/*
* I/O space cannot be accessed via normal processor loads and
* stores on this platform.
@@ -439,21 +436,10 @@ pci_mmap_page_range (struct pci_dev *dev, int bar,
*/
return -EINVAL;
- prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
- vma->vm_page_prot);
-
- /*
- * If the user requested WC, the kernel uses UC or WC for this region,
- * and the chipset supports WC, we can use WC. Otherwise, we have to
- * use the same attribute the kernel uses.
- */
- if (write_combine &&
- ((pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_UC ||
- (pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_WC) &&
- efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
+ if (write_combine)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
- vma->vm_page_prot = prot;
+ vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot))