|
@@ -550,15 +550,23 @@ error_attrs:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
|
|
|
+static struct msi_desc *
|
|
|
+msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity)
|
|
|
{
|
|
|
- u16 control;
|
|
|
+ struct cpumask *masks = NULL;
|
|
|
struct msi_desc *entry;
|
|
|
+ u16 control;
|
|
|
+
|
|
|
+ if (affinity) {
|
|
|
+ masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
|
|
|
+ if (!masks)
|
|
|
+ pr_err("Unable to allocate affinity masks, ignoring\n");
|
|
|
+ }
|
|
|
|
|
|
/* MSI Entry Initialization */
|
|
|
- entry = alloc_msi_entry(&dev->dev);
|
|
|
+ entry = alloc_msi_entry(&dev->dev, nvec, masks);
|
|
|
if (!entry)
|
|
|
- return NULL;
|
|
|
+ goto out;
|
|
|
|
|
|
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
|
|
|
|
|
@@ -569,8 +577,6 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
|
|
|
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
|
|
|
entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
|
|
|
entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
|
|
|
- entry->nvec_used = nvec;
|
|
|
- entry->affinity = dev->irq_affinity;
|
|
|
|
|
|
if (control & PCI_MSI_FLAGS_64BIT)
|
|
|
entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
|
|
@@ -581,6 +587,8 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
|
|
|
if (entry->msi_attrib.maskbit)
|
|
|
pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
|
|
|
|
|
|
+out:
|
|
|
+ kfree(masks);
|
|
|
return entry;
|
|
|
}
|
|
|
|
|
@@ -609,7 +617,7 @@ static int msi_verify_entries(struct pci_dev *dev)
|
|
|
* an error, and a positive return value indicates the number of interrupts
|
|
|
* which could have been allocated.
|
|
|
*/
|
|
|
-static int msi_capability_init(struct pci_dev *dev, int nvec)
|
|
|
+static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity)
|
|
|
{
|
|
|
struct msi_desc *entry;
|
|
|
int ret;
|
|
@@ -617,7 +625,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
|
|
|
|
|
|
pci_msi_set_enable(dev, 0); /* Disable MSI during set up */
|
|
|
|
|
|
- entry = msi_setup_entry(dev, nvec);
|
|
|
+ entry = msi_setup_entry(dev, nvec, affinity);
|
|
|
if (!entry)
|
|
|
return -ENOMEM;
|
|
|
|
|
@@ -680,28 +688,29 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
|
|
|
}
|
|
|
|
|
|
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
|
|
|
- struct msix_entry *entries, int nvec)
|
|
|
+ struct msix_entry *entries, int nvec,
|
|
|
+ bool affinity)
|
|
|
{
|
|
|
- const struct cpumask *mask = NULL;
|
|
|
+ struct cpumask *curmsk, *masks = NULL;
|
|
|
struct msi_desc *entry;
|
|
|
- int cpu = -1, i;
|
|
|
-
|
|
|
- for (i = 0; i < nvec; i++) {
|
|
|
- if (dev->irq_affinity) {
|
|
|
- cpu = cpumask_next(cpu, dev->irq_affinity);
|
|
|
- if (cpu >= nr_cpu_ids)
|
|
|
- cpu = cpumask_first(dev->irq_affinity);
|
|
|
- mask = cpumask_of(cpu);
|
|
|
- }
|
|
|
+ int ret, i;
|
|
|
+
|
|
|
+ if (affinity) {
|
|
|
+ masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
|
|
|
+ if (!masks)
|
|
|
+ pr_err("Unable to allocate affinity masks, ignoring\n");
|
|
|
+ }
|
|
|
|
|
|
- entry = alloc_msi_entry(&dev->dev);
|
|
|
+ for (i = 0, curmsk = masks; i < nvec; i++) {
|
|
|
+ entry = alloc_msi_entry(&dev->dev, 1, curmsk);
|
|
|
if (!entry) {
|
|
|
if (!i)
|
|
|
iounmap(base);
|
|
|
else
|
|
|
free_msi_irqs(dev);
|
|
|
/* No enough memory. Don't try again */
|
|
|
- return -ENOMEM;
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
entry->msi_attrib.is_msix = 1;
|
|
@@ -712,12 +721,14 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
|
|
|
entry->msi_attrib.entry_nr = i;
|
|
|
entry->msi_attrib.default_irq = dev->irq;
|
|
|
entry->mask_base = base;
|
|
|
- entry->nvec_used = 1;
|
|
|
- entry->affinity = mask;
|
|
|
|
|
|
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
|
|
|
+ if (masks)
|
|
|
+ curmsk++;
|
|
|
}
|
|
|
-
|
|
|
+ ret = 0;
|
|
|
+out:
|
|
|
+ kfree(masks);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -746,8 +757,8 @@ static void msix_program_entries(struct pci_dev *dev,
|
|
|
* single MSI-X irq. A return of zero indicates the successful setup of
|
|
|
* requested MSI-X entries with allocated irqs or non-zero for otherwise.
|
|
|
**/
|
|
|
-static int msix_capability_init(struct pci_dev *dev,
|
|
|
- struct msix_entry *entries, int nvec)
|
|
|
+static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
|
|
|
+ int nvec, bool affinity)
|
|
|
{
|
|
|
int ret;
|
|
|
u16 control;
|
|
@@ -762,7 +773,7 @@ static int msix_capability_init(struct pci_dev *dev,
|
|
|
if (!base)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- ret = msix_setup_entries(dev, base, entries, nvec);
|
|
|
+ ret = msix_setup_entries(dev, base, entries, nvec, affinity);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
@@ -942,22 +953,8 @@ int pci_msix_vec_count(struct pci_dev *dev)
|
|
|
}
|
|
|
EXPORT_SYMBOL(pci_msix_vec_count);
|
|
|
|
|
|
-/**
|
|
|
- * pci_enable_msix - configure device's MSI-X capability structure
|
|
|
- * @dev: pointer to the pci_dev data structure of MSI-X device function
|
|
|
- * @entries: pointer to an array of MSI-X entries (optional)
|
|
|
- * @nvec: number of MSI-X irqs requested for allocation by device driver
|
|
|
- *
|
|
|
- * Setup the MSI-X capability structure of device function with the number
|
|
|
- * of requested irqs upon its software driver call to request for
|
|
|
- * MSI-X mode enabled on its hardware device function. A return of zero
|
|
|
- * indicates the successful configuration of MSI-X capability structure
|
|
|
- * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
|
|
|
- * Or a return of > 0 indicates that driver request is exceeding the number
|
|
|
- * of irqs or MSI-X vectors available. Driver should use the returned value to
|
|
|
- * re-send its request.
|
|
|
- **/
|
|
|
-int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
|
|
|
+static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
|
|
|
+ int nvec, bool affinity)
|
|
|
{
|
|
|
int nr_entries;
|
|
|
int i, j;
|
|
@@ -989,7 +986,27 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
|
|
|
dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- return msix_capability_init(dev, entries, nvec);
|
|
|
+ return msix_capability_init(dev, entries, nvec, affinity);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * pci_enable_msix - configure device's MSI-X capability structure
|
|
|
+ * @dev: pointer to the pci_dev data structure of MSI-X device function
|
|
|
+ * @entries: pointer to an array of MSI-X entries (optional)
|
|
|
+ * @nvec: number of MSI-X irqs requested for allocation by device driver
|
|
|
+ *
|
|
|
+ * Setup the MSI-X capability structure of device function with the number
|
|
|
+ * of requested irqs upon its software driver call to request for
|
|
|
+ * MSI-X mode enabled on its hardware device function. A return of zero
|
|
|
+ * indicates the successful configuration of MSI-X capability structure
|
|
|
+ * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
|
|
|
+ * Or a return of > 0 indicates that driver request is exceeding the number
|
|
|
+ * of irqs or MSI-X vectors available. Driver should use the returned value to
|
|
|
+ * re-send its request.
|
|
|
+ **/
|
|
|
+int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
|
|
|
+{
|
|
|
+ return __pci_enable_msix(dev, entries, nvec, false);
|
|
|
}
|
|
|
EXPORT_SYMBOL(pci_enable_msix);
|
|
|
|
|
@@ -1042,6 +1059,7 @@ EXPORT_SYMBOL(pci_msi_enabled);
|
|
|
static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
|
|
|
unsigned int flags)
|
|
|
{
|
|
|
+ bool affinity = flags & PCI_IRQ_AFFINITY;
|
|
|
int nvec;
|
|
|
int rc;
|
|
|
|
|
@@ -1070,19 +1088,17 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
|
|
|
nvec = maxvec;
|
|
|
|
|
|
for (;;) {
|
|
|
- if (flags & PCI_IRQ_AFFINITY) {
|
|
|
- dev->irq_affinity = irq_create_affinity_mask(&nvec);
|
|
|
+ if (affinity) {
|
|
|
+ nvec = irq_calc_affinity_vectors(dev->irq_affinity,
|
|
|
+ nvec);
|
|
|
if (nvec < minvec)
|
|
|
return -ENOSPC;
|
|
|
}
|
|
|
|
|
|
- rc = msi_capability_init(dev, nvec);
|
|
|
+ rc = msi_capability_init(dev, nvec, affinity);
|
|
|
if (rc == 0)
|
|
|
return nvec;
|
|
|
|
|
|
- kfree(dev->irq_affinity);
|
|
|
- dev->irq_affinity = NULL;
|
|
|
-
|
|
|
if (rc < 0)
|
|
|
return rc;
|
|
|
if (rc < minvec)
|
|
@@ -1114,26 +1130,24 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
|
|
|
struct msix_entry *entries, int minvec, int maxvec,
|
|
|
unsigned int flags)
|
|
|
{
|
|
|
- int nvec = maxvec;
|
|
|
- int rc;
|
|
|
+ bool affinity = flags & PCI_IRQ_AFFINITY;
|
|
|
+ int rc, nvec = maxvec;
|
|
|
|
|
|
if (maxvec < minvec)
|
|
|
return -ERANGE;
|
|
|
|
|
|
for (;;) {
|
|
|
- if (flags & PCI_IRQ_AFFINITY) {
|
|
|
- dev->irq_affinity = irq_create_affinity_mask(&nvec);
|
|
|
+ if (affinity) {
|
|
|
+ nvec = irq_calc_affinity_vectors(dev->irq_affinity,
|
|
|
+ nvec);
|
|
|
if (nvec < minvec)
|
|
|
return -ENOSPC;
|
|
|
}
|
|
|
|
|
|
- rc = pci_enable_msix(dev, entries, nvec);
|
|
|
+ rc = __pci_enable_msix(dev, entries, nvec, affinity);
|
|
|
if (rc == 0)
|
|
|
return nvec;
|
|
|
|
|
|
- kfree(dev->irq_affinity);
|
|
|
- dev->irq_affinity = NULL;
|
|
|
-
|
|
|
if (rc < 0)
|
|
|
return rc;
|
|
|
if (rc < minvec)
|
|
@@ -1257,6 +1271,37 @@ int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
|
|
|
}
|
|
|
EXPORT_SYMBOL(pci_irq_vector);
|
|
|
|
|
|
+/**
|
|
|
+ * pci_irq_get_affinity - return the affinity of a particular msi vector
|
|
|
+ * @dev: PCI device to operate on
|
|
|
+ * @nr: device-relative interrupt vector index (0-based).
|
|
|
+ */
|
|
|
+const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
|
|
|
+{
|
|
|
+ if (dev->msix_enabled) {
|
|
|
+ struct msi_desc *entry;
|
|
|
+ int i = 0;
|
|
|
+
|
|
|
+ for_each_pci_msi_entry(entry, dev) {
|
|
|
+ if (i == nr)
|
|
|
+ return entry->affinity;
|
|
|
+ i++;
|
|
|
+ }
|
|
|
+ WARN_ON_ONCE(1);
|
|
|
+ return NULL;
|
|
|
+ } else if (dev->msi_enabled) {
|
|
|
+ struct msi_desc *entry = first_pci_msi_entry(dev);
|
|
|
+
|
|
|
+ if (WARN_ON_ONCE(!entry || nr >= entry->nvec_used))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ return &entry->affinity[nr];
|
|
|
+ } else {
|
|
|
+ return cpu_possible_mask;
|
|
|
+ }
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(pci_irq_get_affinity);
|
|
|
+
|
|
|
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
|
|
|
{
|
|
|
return to_pci_dev(desc->dev);
|