|
@@ -355,36 +355,6 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
|
|
|
- * device mapping.
|
|
|
- */
|
|
|
-static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
|
|
|
- pgprot_t protection,
|
|
|
- enum pci_mmap_state mmap_state,
|
|
|
- int write_combine)
|
|
|
-{
|
|
|
-
|
|
|
- /* Write combine is always 0 on non-memory space mappings. On
|
|
|
- * memory space, if the user didn't pass 1, we check for a
|
|
|
- * "prefetchable" resource. This is a bit hackish, but we use
|
|
|
- * this to workaround the inability of /sysfs to provide a write
|
|
|
- * combine bit
|
|
|
- */
|
|
|
- if (mmap_state != pci_mmap_mem)
|
|
|
- write_combine = 0;
|
|
|
- else if (write_combine == 0) {
|
|
|
- if (rp->flags & IORESOURCE_PREFETCH)
|
|
|
- write_combine = 1;
|
|
|
- }
|
|
|
-
|
|
|
- /* XXX would be nice to have a way to ask for write-through */
|
|
|
- if (write_combine)
|
|
|
- return pgprot_noncached_wc(protection);
|
|
|
- else
|
|
|
- return pgprot_noncached(protection);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* This one is used by /dev/mem and fbdev who have no clue about the
|
|
|
* PCI device, it tries to find the PCI device first and calls the
|
|
@@ -458,9 +428,10 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
|
|
return -EINVAL;
|
|
|
|
|
|
vma->vm_pgoff = offset >> PAGE_SHIFT;
|
|
|
- vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
|
|
|
- vma->vm_page_prot,
|
|
|
- mmap_state, write_combine);
|
|
|
+ if (write_combine)
|
|
|
+ vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
|
|
|
+ else
|
|
|
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
|
|
|
|
ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
|
|
vma->vm_end - vma->vm_start, vma->vm_page_prot);
|
|
@@ -610,39 +581,25 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
|
|
|
const struct resource *rsrc,
|
|
|
resource_size_t *start, resource_size_t *end)
|
|
|
{
|
|
|
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
|
|
- resource_size_t offset = 0;
|
|
|
+ struct pci_bus_region region;
|
|
|
|
|
|
- if (hose == NULL)
|
|
|
+ if (rsrc->flags & IORESOURCE_IO) {
|
|
|
+ pcibios_resource_to_bus(dev->bus, ®ion,
|
|
|
+ (struct resource *) rsrc);
|
|
|
+ *start = region.start;
|
|
|
+ *end = region.end;
|
|
|
return;
|
|
|
+ }
|
|
|
|
|
|
- if (rsrc->flags & IORESOURCE_IO)
|
|
|
- offset = (unsigned long)hose->io_base_virt - _IO_BASE;
|
|
|
-
|
|
|
- /* We pass a fully fixed up address to userland for MMIO instead of
|
|
|
- * a BAR value because X is lame and expects to be able to use that
|
|
|
- * to pass to /dev/mem !
|
|
|
- *
|
|
|
- * That means that we'll have potentially 64 bits values where some
|
|
|
- * userland apps only expect 32 (like X itself since it thinks only
|
|
|
- * Sparc has 64 bits MMIO) but if we don't do that, we break it on
|
|
|
- * 32 bits CHRPs :-(
|
|
|
- *
|
|
|
- * Hopefully, the sysfs insterface is immune to that gunk. Once X
|
|
|
- * has been fixed (and the fix spread enough), we can re-enable the
|
|
|
- * 2 lines below and pass down a BAR value to userland. In that case
|
|
|
- * we'll also have to re-enable the matching code in
|
|
|
- * __pci_mmap_make_offset().
|
|
|
+ /* We pass a CPU physical address to userland for MMIO instead of a
|
|
|
+ * BAR value because X is lame and expects to be able to use that
|
|
|
+ * to pass to /dev/mem!
|
|
|
*
|
|
|
- * BenH.
|
|
|
+ * That means we may have 64-bit values where some apps only expect
|
|
|
+ * 32 (like X itself since it thinks only Sparc has 64-bit MMIO).
|
|
|
*/
|
|
|
-#if 0
|
|
|
- else if (rsrc->flags & IORESOURCE_MEM)
|
|
|
- offset = hose->pci_mem_offset;
|
|
|
-#endif
|
|
|
-
|
|
|
- *start = rsrc->start - offset;
|
|
|
- *end = rsrc->end - offset;
|
|
|
+ *start = rsrc->start;
|
|
|
+ *end = rsrc->end;
|
|
|
}
|
|
|
|
|
|
/**
|