Browse Source

Merge branch 'mlx4'

Amir Vadai says:

====================
net/mlx4: Mellanox driver update 01-01-2014

This small patchset has a fix to a bogus usage of
netif_get_num_default_rss_queues() in mlx4_en driver.

Changes from V1:
- Removed affinity_hint patch, to make it a generic instead of mlx specific

Changes from V0:
- Instead of reverting the netif_get_num_default_rss_queues() in mlx4_en,
  fixing it to limit the actual number of receive queues instead of limiting
  the number of IRQ's.

Patchset was applied and tested against commit: cb6e926 "ipv6:fix checkpatch
errors with assignment in if condition"
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 11 năm trước cách đây
mục cha
commit
a1991c749a

+ 2 - 13
drivers/net/ethernet/mellanox/mlx4/en_main.c

@@ -274,19 +274,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
 	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
 		mlx4_en_init_timestamp(mdev);
 
-	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
-		if (!dev->caps.comp_pool) {
-			mdev->profile.prof[i].rx_ring_num =
-				rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
-							   min_t(int,
-								 dev->caps.num_comp_vectors,
-								 DEF_RX_RINGS)));
-		} else {
-			mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
-				min_t(int, dev->caps.comp_pool/
-				      dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
-		}
-	}
+	/* Set default number of RX rings*/
+	mlx4_en_set_num_rx_rings(mdev);
 
 	/* Create our own workqueue for reset/multicast tasks
 	 * Note: we cannot use the shared workqueue because of deadlocks caused

+ 25 - 0
drivers/net/ethernet/mellanox/mlx4/en_rx.c

@@ -318,6 +318,31 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
 	}
 }
 
+void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
+{
+	int i;
+	int num_of_eqs;
+	int num_rx_rings;
+	struct mlx4_dev *dev = mdev->dev;
+
+	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+		if (!dev->caps.comp_pool)
+			num_of_eqs = max_t(int, MIN_RX_RINGS,
+					   min_t(int,
+						 dev->caps.num_comp_vectors,
+						 DEF_RX_RINGS));
+		else
+			num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
+					   dev->caps.comp_pool/
+					   dev->caps.num_ports) - 1;
+
+		num_rx_rings = min_t(int, num_of_eqs,
+				     netif_get_num_default_rss_queues());
+		mdev->profile.prof[i].rx_ring_num =
+			rounddown_pow_of_two(num_rx_rings);
+	}
+}
+
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 			   struct mlx4_en_rx_ring **pring,
 			   u32 size, u16 stride, int node)

+ 1 - 2
drivers/net/ethernet/mellanox/mlx4/main.c

@@ -41,7 +41,6 @@
 #include <linux/slab.h>
 #include <linux/io-mapping.h>
 #include <linux/delay.h>
-#include <linux/netdevice.h>
 #include <linux/kmod.h>
 
 #include <linux/mlx4/device.h>
@@ -1974,7 +1973,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct msix_entry *entries;
 	int nreq = min_t(int, dev->caps.num_ports *
-			 min_t(int, netif_get_num_default_rss_queues() + 1,
+			 min_t(int, num_online_cpus() + 1,
 			       MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
 	int i;
 

+ 1 - 1
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h

@@ -737,7 +737,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 			     int cq, int user_prio);
 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
 				struct mlx4_en_tx_ring *ring);
-
+void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 			   struct mlx4_en_rx_ring **pring,
 			   u32 size, u16 stride, int node);