|
@@ -1513,13 +1513,14 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
|
|
|
struct iwl_trans *trans)
|
|
|
{
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
- int max_irqs, num_irqs, i, ret;
|
|
|
+ int max_irqs, num_irqs, i, ret, nr_online_cpus;
|
|
|
u16 pci_cmd;
|
|
|
|
|
|
if (!trans->cfg->mq_rx_supported)
|
|
|
goto enable_msi;
|
|
|
|
|
|
- max_irqs = min_t(u32, num_possible_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
|
|
|
+ nr_online_cpus = num_online_cpus();
|
|
|
+ max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES);
|
|
|
for (i = 0; i < max_irqs; i++)
|
|
|
trans_pcie->msix_entries[i].entry = i;
|
|
|
|
|
@@ -1545,11 +1546,11 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
|
|
|
* Two interrupts less: non rx causes shared with FBQ and RSS.
|
|
|
* More than two interrupts: we will use fewer RSS queues.
|
|
|
*/
|
|
|
- if (num_irqs <= num_online_cpus()) {
|
|
|
+ if (num_irqs <= nr_online_cpus) {
|
|
|
trans_pcie->trans->num_rx_queues = num_irqs + 1;
|
|
|
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
|
|
|
IWL_SHARED_IRQ_FIRST_RSS;
|
|
|
- } else if (num_irqs == num_online_cpus() + 1) {
|
|
|
+ } else if (num_irqs == nr_online_cpus + 1) {
|
|
|
trans_pcie->trans->num_rx_queues = num_irqs;
|
|
|
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
|
|
|
} else {
|