|
@@ -5,6 +5,8 @@
|
|
|
#include <linux/module.h>
|
|
|
#include <linux/of_address.h>
|
|
|
#include <linux/pci_regs.h>
|
|
|
+#include <linux/sizes.h>
|
|
|
+#include <linux/slab.h>
|
|
|
#include <linux/string.h>
|
|
|
|
|
|
/* Max address size we deal with */
|
|
@@ -601,12 +603,119 @@ const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
|
|
|
}
|
|
|
EXPORT_SYMBOL(of_get_address);
|
|
|
|
|
|
+#ifdef PCI_IOBASE
|
|
|
+struct io_range {
|
|
|
+ struct list_head list;
|
|
|
+ phys_addr_t start;
|
|
|
+ resource_size_t size;
|
|
|
+};
|
|
|
+
|
|
|
+static LIST_HEAD(io_range_list);
|
|
|
+static DEFINE_SPINLOCK(io_range_lock);
|
|
|
+#endif
|
|
|
+
|
|
|
+/*
|
|
|
+ * Record the PCI IO range (expressed as CPU physical address + size).
|
|
|
+ * Return a negative value if an error has occured, zero otherwise
|
|
|
+ */
|
|
|
+int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
|
|
|
+{
|
|
|
+ int err = 0;
|
|
|
+
|
|
|
+#ifdef PCI_IOBASE
|
|
|
+ struct io_range *range;
|
|
|
+ resource_size_t allocated_size = 0;
|
|
|
+
|
|
|
+ /* check if the range hasn't been previously recorded */
|
|
|
+ spin_lock(&io_range_lock);
|
|
|
+ list_for_each_entry(range, &io_range_list, list) {
|
|
|
+ if (addr >= range->start && addr + size <= range->start + size) {
|
|
|
+ /* range already registered, bail out */
|
|
|
+ goto end_register;
|
|
|
+ }
|
|
|
+ allocated_size += range->size;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* range not registed yet, check for available space */
|
|
|
+ if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
|
|
|
+ /* if it's too big check if 64K space can be reserved */
|
|
|
+ if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
|
|
|
+ err = -E2BIG;
|
|
|
+ goto end_register;
|
|
|
+ }
|
|
|
+
|
|
|
+ size = SZ_64K;
|
|
|
+ pr_warn("Requested IO range too big, new size set to 64K\n");
|
|
|
+ }
|
|
|
+
|
|
|
+ /* add the range to the list */
|
|
|
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
|
|
|
+ if (!range) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto end_register;
|
|
|
+ }
|
|
|
+
|
|
|
+ range->start = addr;
|
|
|
+ range->size = size;
|
|
|
+
|
|
|
+ list_add_tail(&range->list, &io_range_list);
|
|
|
+
|
|
|
+end_register:
|
|
|
+ spin_unlock(&io_range_lock);
|
|
|
+#endif
|
|
|
+
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+phys_addr_t pci_pio_to_address(unsigned long pio)
|
|
|
+{
|
|
|
+ phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
|
|
|
+
|
|
|
+#ifdef PCI_IOBASE
|
|
|
+ struct io_range *range;
|
|
|
+ resource_size_t allocated_size = 0;
|
|
|
+
|
|
|
+ if (pio > IO_SPACE_LIMIT)
|
|
|
+ return address;
|
|
|
+
|
|
|
+ spin_lock(&io_range_lock);
|
|
|
+ list_for_each_entry(range, &io_range_list, list) {
|
|
|
+ if (pio >= allocated_size && pio < allocated_size + range->size) {
|
|
|
+ address = range->start + pio - allocated_size;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ allocated_size += range->size;
|
|
|
+ }
|
|
|
+ spin_unlock(&io_range_lock);
|
|
|
+#endif
|
|
|
+
|
|
|
+ return address;
|
|
|
+}
|
|
|
+
|
|
|
unsigned long __weak pci_address_to_pio(phys_addr_t address)
|
|
|
{
|
|
|
+#ifdef PCI_IOBASE
|
|
|
+ struct io_range *res;
|
|
|
+ resource_size_t offset = 0;
|
|
|
+ unsigned long addr = -1;
|
|
|
+
|
|
|
+ spin_lock(&io_range_lock);
|
|
|
+ list_for_each_entry(res, &io_range_list, list) {
|
|
|
+ if (address >= res->start && address < res->start + res->size) {
|
|
|
+ addr = res->start - address + offset;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ offset += res->size;
|
|
|
+ }
|
|
|
+ spin_unlock(&io_range_lock);
|
|
|
+
|
|
|
+ return addr;
|
|
|
+#else
|
|
|
if (address > IO_SPACE_LIMIT)
|
|
|
return (unsigned long)-1;
|
|
|
|
|
|
return (unsigned long) address;
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
static int __of_address_to_resource(struct device_node *dev,
|