Browse Source

Merge branch 'bcmgenet-next'

Florian Fainelli says:

====================
net: bcmgenet: checkpatch fixes

This patch series contains cleanups for CHECK and WARNINGS reported by
checkpatch.pl. I removed one patch from this series since Joe reported
this was a false positive due to me not using the latest version.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 11 years ago
parent
commit
8942a6d5f2

+ 135 - 150
drivers/net/ethernet/broadcom/genet/bcmgenet.c

@@ -6,15 +6,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
 #define pr_fmt(fmt)				"bcmgenet: " fmt
@@ -79,13 +70,13 @@
 				TOTAL_DESC * DMA_DESC_SIZE)
 
 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
-						void __iomem *d, u32 value)
+					     void __iomem *d, u32 value)
 {
 	__raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
 }
 
 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
-						void __iomem *d)
+					    void __iomem *d)
 {
 	return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
 }
@@ -108,7 +99,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
 
 /* Combined address + length/status setter */
 static inline void dmadesc_set(struct bcmgenet_priv *priv,
-				void __iomem *d, dma_addr_t addr, u32 val)
+			       void __iomem *d, dma_addr_t addr, u32 val)
 {
 	dmadesc_set_length_status(priv, d, val);
 	dmadesc_set_addr(priv, d, addr);
@@ -242,7 +233,7 @@ static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
 }
 
 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
-					enum dma_reg r)
+				      enum dma_reg r)
 {
 	return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
 			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
@@ -256,7 +247,7 @@ static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
 }
 
 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
-					enum dma_reg r)
+				      enum dma_reg r)
 {
 	return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
 			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
@@ -333,8 +324,8 @@ static const u8 genet_dma_ring_regs_v123[] = {
 static const u8 *genet_dma_ring_regs;
 
 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
-						unsigned int ring,
-						enum dma_ring_reg r)
+					   unsigned int ring,
+					   enum dma_ring_reg r)
 {
 	return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
 			(DMA_RING_SIZE * ring) +
@@ -342,9 +333,8 @@ static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
 }
 
 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
-						unsigned int ring,
-						u32 val,
-						enum dma_ring_reg r)
+					     unsigned int ring, u32 val,
+					     enum dma_ring_reg r)
 {
 	__raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
 			(DMA_RING_SIZE * ring) +
@@ -352,8 +342,8 @@ static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
 }
 
 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
-						unsigned int ring,
-						enum dma_ring_reg r)
+					   unsigned int ring,
+					   enum dma_ring_reg r)
 {
 	return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
 			(DMA_RING_SIZE * ring) +
@@ -361,9 +351,8 @@ static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
 }
 
 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
-						unsigned int ring,
-						u32 val,
-						enum dma_ring_reg r)
+					     unsigned int ring, u32 val,
+					     enum dma_ring_reg r)
 {
 	__raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
 			(DMA_RING_SIZE * ring) +
@@ -371,7 +360,7 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
 }
 
 static int bcmgenet_get_settings(struct net_device *dev,
-		struct ethtool_cmd *cmd)
+				 struct ethtool_cmd *cmd)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 
@@ -385,7 +374,7 @@ static int bcmgenet_get_settings(struct net_device *dev,
 }
 
 static int bcmgenet_set_settings(struct net_device *dev,
-		struct ethtool_cmd *cmd)
+				 struct ethtool_cmd *cmd)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 
@@ -458,7 +447,7 @@ static int bcmgenet_set_tx_csum(struct net_device *dev,
 }
 
 static int bcmgenet_set_features(struct net_device *dev,
-		netdev_features_t features)
+				 netdev_features_t features)
 {
 	netdev_features_t changed = features ^ dev->features;
 	netdev_features_t wanted = dev->wanted_features;
@@ -625,12 +614,11 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
 #define BCMGENET_STATS_LEN	ARRAY_SIZE(bcmgenet_gstrings_stats)
 
 static void bcmgenet_get_drvinfo(struct net_device *dev,
-		struct ethtool_drvinfo *info)
+				 struct ethtool_drvinfo *info)
 {
 	strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
 	strlcpy(info->version, "v2.0", sizeof(info->version));
 	info->n_stats = BCMGENET_STATS_LEN;
-
 }
 
 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -643,8 +631,8 @@ static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
 	}
 }
 
-static void bcmgenet_get_strings(struct net_device *dev,
-				u32 stringset, u8 *data)
+static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
+				 u8 *data)
 {
 	int i;
 
@@ -652,8 +640,8 @@ static void bcmgenet_get_strings(struct net_device *dev,
 	case ETH_SS_STATS:
 		for (i = 0; i < BCMGENET_STATS_LEN; i++) {
 			memcpy(data + i * ETH_GSTRING_LEN,
-				bcmgenet_gstrings_stats[i].stat_string,
-				ETH_GSTRING_LEN);
+			       bcmgenet_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
 		}
 		break;
 	}
@@ -678,8 +666,8 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
 		case BCMGENET_STAT_RUNT:
 			if (s->type != BCMGENET_STAT_MIB_RX)
 				offset = BCMGENET_STAT_OFFSET;
-			val = bcmgenet_umac_readl(priv, UMAC_MIB_START +
-								j + offset);
+			val = bcmgenet_umac_readl(priv,
+						  UMAC_MIB_START + j + offset);
 			break;
 		case BCMGENET_STAT_MISC:
 			val = bcmgenet_umac_readl(priv, s->reg_offset);
@@ -696,8 +684,8 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
 }
 
 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
-					struct ethtool_stats *stats,
-					u64 *data)
+				       struct ethtool_stats *stats,
+				       u64 *data)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	int i;
@@ -765,7 +753,7 @@ static void bcmgenet_power_down(struct bcmgenet_priv *priv,
 }
 
 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
-				enum bcmgenet_power_mode mode)
+			      enum bcmgenet_power_mode mode)
 {
 	u32 reg;
 
@@ -850,37 +838,37 @@ static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
 						  struct bcmgenet_tx_ring *ring)
 {
 	bcmgenet_intrl2_0_writel(priv,
-			UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
-			INTRL2_CPU_MASK_SET);
+				 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+				 INTRL2_CPU_MASK_SET);
 }
 
 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
 						 struct bcmgenet_tx_ring *ring)
 {
 	bcmgenet_intrl2_0_writel(priv,
-			UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
-			INTRL2_CPU_MASK_CLEAR);
+				 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+				 INTRL2_CPU_MASK_CLEAR);
 }
 
 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
-						struct bcmgenet_tx_ring *ring)
+					       struct bcmgenet_tx_ring *ring)
 {
-	bcmgenet_intrl2_1_writel(priv,
-			(1 << ring->index), INTRL2_CPU_MASK_CLEAR);
+	bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+				 INTRL2_CPU_MASK_CLEAR);
 	priv->int1_mask &= ~(1 << ring->index);
 }
 
 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
 						struct bcmgenet_tx_ring *ring)
 {
-	bcmgenet_intrl2_1_writel(priv,
-			(1 << ring->index), INTRL2_CPU_MASK_SET);
+	bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+				 INTRL2_CPU_MASK_SET);
 	priv->int1_mask |= (1 << ring->index);
 }
 
 /* Unlocked version of the reclaim routine */
 static void __bcmgenet_tx_reclaim(struct net_device *dev,
-				struct bcmgenet_tx_ring *ring)
+				  struct bcmgenet_tx_ring *ring)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	int last_tx_cn, last_c_index, num_tx_bds;
@@ -903,9 +891,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
 		last_tx_cn = num_tx_bds - last_c_index + c_index;
 
 	netif_dbg(priv, tx_done, dev,
-			"%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
-			__func__, ring->index,
-			c_index, last_tx_cn, last_c_index);
+		  "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
+		  __func__, ring->index,
+		  c_index, last_tx_cn, last_c_index);
 
 	/* Reclaim transmitted buffers */
 	while (last_tx_cn-- > 0) {
@@ -913,17 +901,17 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
 		if (tx_cb_ptr->skb) {
 			dev->stats.tx_bytes += tx_cb_ptr->skb->len;
 			dma_unmap_single(&dev->dev,
-					dma_unmap_addr(tx_cb_ptr, dma_addr),
-					tx_cb_ptr->skb->len,
-					DMA_TO_DEVICE);
+					 dma_unmap_addr(tx_cb_ptr, dma_addr),
+					 tx_cb_ptr->skb->len,
+					 DMA_TO_DEVICE);
 			bcmgenet_free_cb(tx_cb_ptr);
 		} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
 			dev->stats.tx_bytes +=
 				dma_unmap_len(tx_cb_ptr, dma_len);
 			dma_unmap_page(&dev->dev,
-					dma_unmap_addr(tx_cb_ptr, dma_addr),
-					dma_unmap_len(tx_cb_ptr, dma_len),
-					DMA_TO_DEVICE);
+				       dma_unmap_addr(tx_cb_ptr, dma_addr),
+				       dma_unmap_len(tx_cb_ptr, dma_len),
+				       DMA_TO_DEVICE);
 			dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
 		}
 		dev->stats.tx_packets++;
@@ -943,7 +931,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
 }
 
 static void bcmgenet_tx_reclaim(struct net_device *dev,
-		struct bcmgenet_tx_ring *ring)
+				struct bcmgenet_tx_ring *ring)
 {
 	unsigned long flags;
 
@@ -1019,9 +1007,9 @@ static int bcmgenet_xmit_single(struct net_device *dev,
 
 /* Transmit a SKB fragement */
 static int bcmgenet_xmit_frag(struct net_device *dev,
-				skb_frag_t *frag,
-				u16 dma_desc_flags,
-				struct bcmgenet_tx_ring *ring)
+			      skb_frag_t *frag,
+			      u16 dma_desc_flags,
+			      struct bcmgenet_tx_ring *ring)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	struct device *kdev = &priv->pdev->dev;
@@ -1036,11 +1024,11 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
 	tx_cb_ptr->skb = NULL;
 
 	mapping = skb_frag_dma_map(kdev, frag, 0,
-		skb_frag_size(frag), DMA_TO_DEVICE);
+				   skb_frag_size(frag), DMA_TO_DEVICE);
 	ret = dma_mapping_error(kdev, mapping);
 	if (ret) {
 		netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
-				__func__);
+			  __func__);
 		return ret;
 	}
 
@@ -1048,8 +1036,8 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
 	dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
 
 	dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
-			(frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
-			(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
+		    (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+		    (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
 
 
 	ring->free_bds -= 1;
@@ -1112,8 +1100,9 @@ static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
 			tx_csum_info |= STATUS_TX_CSUM_LV;
 			if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
 				tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
-		} else
+		} else {
 			tx_csum_info = 0;
+		}
 
 		status->tx_csum_info = tx_csum_info;
 	}
@@ -1153,7 +1142,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
 	if (ring->free_bds <= nr_frags + 1) {
 		netif_tx_stop_queue(txq);
 		netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
-				__func__, index, ring->queue);
+			   __func__, index, ring->queue);
 		ret = NETDEV_TX_BUSY;
 		goto out;
 	}
@@ -1181,8 +1170,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
 	/* xmit fragment */
 	for (i = 0; i < nr_frags; i++) {
 		ret = bcmgenet_xmit_frag(dev,
-				&skb_shinfo(skb)->frags[i],
-				(i == nr_frags - 1) ? DMA_EOP : 0, ring);
+					 &skb_shinfo(skb)->frags[i],
+					 (i == nr_frags - 1) ? DMA_EOP : 0,
+					 ring);
 		if (ret) {
 			ret = NETDEV_TX_OK;
 			goto out;
@@ -1195,7 +1185,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
 	 * producer index, now write it down to the hardware
 	 */
 	bcmgenet_tdma_ring_writel(priv, ring->index,
-			ring->prod_index, TDMA_PROD_INDEX);
+				  ring->prod_index, TDMA_PROD_INDEX);
 
 	if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
 		netif_tx_stop_queue(txq);
@@ -1209,16 +1199,14 @@ out:
 }
 
 
-static int bcmgenet_rx_refill(struct bcmgenet_priv *priv,
-				struct enet_cb *cb)
+static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb)
 {
 	struct device *kdev = &priv->pdev->dev;
 	struct sk_buff *skb;
 	dma_addr_t mapping;
 	int ret;
 
-	skb = netdev_alloc_skb(priv->dev,
-				priv->rx_buf_len + SKB_ALIGNMENT);
+	skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
 	if (!skb)
 		return -ENOMEM;
 
@@ -1226,12 +1214,12 @@ static int bcmgenet_rx_refill(struct bcmgenet_priv *priv,
 	WARN_ON(cb->skb != NULL);
 	cb->skb = skb;
 	mapping = dma_map_single(kdev, skb->data,
-			priv->rx_buf_len, DMA_FROM_DEVICE);
+				 priv->rx_buf_len, DMA_FROM_DEVICE);
 	ret = dma_mapping_error(kdev, mapping);
 	if (ret) {
 		bcmgenet_free_cb(cb);
 		netif_err(priv, rx_err, priv->dev,
-				"%s DMA map failed\n", __func__);
+			  "%s DMA map failed\n", __func__);
 		return ret;
 	}
 
@@ -1266,8 +1254,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
 	unsigned int p_index;
 	unsigned int chksum_ok = 0;
 
-	p_index = bcmgenet_rdma_ring_readl(priv,
-			DESC_INDEX, RDMA_PROD_INDEX);
+	p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX);
 	p_index &= DMA_P_INDEX_MASK;
 
 	if (p_index < priv->rx_c_index)
@@ -1277,11 +1264,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
 		rxpkttoprocess = p_index - priv->rx_c_index;
 
 	netif_dbg(priv, rx_status, dev,
-		"RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
+		  "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
 
 	while ((rxpktprocessed < rxpkttoprocess) &&
-			(rxpktprocessed < budget)) {
-
+	       (rxpktprocessed < budget)) {
 		/* Unmap the packet contents such that we can use the
 		 * RSV from the 64 bytes descriptor when enabled and save
 		 * a 32-bits register read
@@ -1289,15 +1275,17 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
 		cb = &priv->rx_cbs[priv->rx_read_ptr];
 		skb = cb->skb;
 		dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
-				priv->rx_buf_len, DMA_FROM_DEVICE);
+				 priv->rx_buf_len, DMA_FROM_DEVICE);
 
 		if (!priv->desc_64b_en) {
-			dma_length_status = dmadesc_get_length_status(priv,
-							priv->rx_bds +
-							(priv->rx_read_ptr *
-							 DMA_DESC_SIZE));
+			dma_length_status =
+				dmadesc_get_length_status(priv,
+							  priv->rx_bds +
+							  (priv->rx_read_ptr *
+							   DMA_DESC_SIZE));
 		} else {
 			struct status_64 *status;
+
 			status = (struct status_64 *)skb->data;
 			dma_length_status = status->length_status;
 		}
@@ -1309,9 +1297,9 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
 		len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
 
 		netif_dbg(priv, rx_status, dev,
-			"%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
-			__func__, p_index, priv->rx_c_index, priv->rx_read_ptr,
-			dma_length_status);
+			  "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
+			  __func__, p_index, priv->rx_c_index,
+			  priv->rx_read_ptr, dma_length_status);
 
 		rxpktprocessed++;
 
@@ -1327,7 +1315,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
 
 		if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
 			netif_err(priv, rx_status, dev,
-					"Droping fragmented packet!\n");
+				  "dropping fragmented packet!\n");
 			dev->stats.rx_dropped++;
 			dev->stats.rx_errors++;
 			dev_kfree_skb_any(cb->skb);
@@ -1341,7 +1329,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
 						DMA_RX_LG |
 						DMA_RX_RXER))) {
 			netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
-						(unsigned int)dma_flag);
+				  (unsigned int)dma_flag);
 			if (dma_flag & DMA_RX_CRC_ERROR)
 				dev->stats.rx_crc_errors++;
 			if (dma_flag & DMA_RX_OV)
@@ -1360,7 +1348,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
 		} /* error packet */
 
 		chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
-				priv->desc_rxchk_en;
+			     priv->desc_rxchk_en;
 
 		skb_put(skb, len);
 		if (priv->desc_64b_en) {
@@ -1420,7 +1408,6 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
 		ret = bcmgenet_rx_refill(priv, cb);
 		if (ret)
 			break;
-
 	}
 
 	return ret;
@@ -1436,8 +1423,8 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
 
 		if (dma_unmap_addr(cb, dma_addr)) {
 			dma_unmap_single(&priv->dev->dev,
-					dma_unmap_addr(cb, dma_addr),
-					priv->rx_buf_len, DMA_FROM_DEVICE);
+					 dma_unmap_addr(cb, dma_addr),
+					 priv->rx_buf_len, DMA_FROM_DEVICE);
 			dma_unmap_addr_set(cb, dma_addr, 0);
 		}
 
@@ -1446,8 +1433,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
 	}
 }
 
-static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask,
-				bool enable)
+static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
 {
 	u32 reg;
 
@@ -1523,7 +1509,8 @@ static int init_umac(struct bcmgenet_priv *priv)
 	bcmgenet_umac_writel(priv, 0, UMAC_CMD);
 	/* clear tx/rx counter */
 	bcmgenet_umac_writel(priv,
-		MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL);
+			     MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
+			     UMAC_MIB_CTRL);
 	bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
 
 	bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
@@ -1543,11 +1530,11 @@ static int init_umac(struct bcmgenet_priv *priv)
 	dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
 
 	/* Monitor cable plug/unpluged event for internal PHY */
-	if (phy_is_internal(priv->phydev))
+	if (phy_is_internal(priv->phydev)) {
 		cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
-	else if (priv->ext_phy)
+	} else if (priv->ext_phy) {
 		cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
-	else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+	} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
 		reg = bcmgenet_bp_mc_get(priv);
 		reg |= BIT(priv->hw_params->bp_in_en_shift);
 
@@ -1563,8 +1550,7 @@ static int init_umac(struct bcmgenet_priv *priv)
 	if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
 		cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
 
-	bcmgenet_intrl2_0_writel(priv, cpu_mask_clear,
-		INTRL2_CPU_MASK_CLEAR);
+	bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
 
 	/* Enable rx/tx engine.*/
 	dev_dbg(kdev, "done init umac\n");
@@ -1613,28 +1599,28 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
 	bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
 	/* Disable rate control for now */
 	bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
-			TDMA_FLOW_PERIOD);
+				  TDMA_FLOW_PERIOD);
 	/* Unclassified traffic goes to ring 16 */
 	bcmgenet_tdma_ring_writel(priv, index,
-			((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
-			DMA_RING_BUF_SIZE);
+				  ((size << DMA_RING_SIZE_SHIFT) |
+				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
 
 	first_bd = write_ptr;
 
 	/* Set start and end address, read and write pointers */
 	bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
-			DMA_START_ADDR);
+				  DMA_START_ADDR);
 	bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
-			TDMA_READ_PTR);
+				  TDMA_READ_PTR);
 	bcmgenet_tdma_ring_writel(priv, index, first_bd,
-			TDMA_WRITE_PTR);
+				  TDMA_WRITE_PTR);
 	bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
-			DMA_END_ADDR);
+				  DMA_END_ADDR);
 }
 
 /* Initialize a RDMA ring */
 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
-				  unsigned int index, unsigned int size)
+				 unsigned int index, unsigned int size)
 {
 	u32 words_per_bd = WORDS_PER_BD(priv);
 	int ret;
@@ -1645,8 +1631,8 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
 	priv->rx_bd_assign_index = 0;
 	priv->rx_c_index = 0;
 	priv->rx_read_ptr = 0;
-	priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb),
-				GFP_KERNEL);
+	priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
+			       GFP_KERNEL);
 	if (!priv->rx_cbs)
 		return -ENOMEM;
 
@@ -1660,14 +1646,15 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
 	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
 	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
 	bcmgenet_rdma_ring_writel(priv, index,
-		((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
-		DMA_RING_BUF_SIZE);
+				  ((size << DMA_RING_SIZE_SHIFT) |
+				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
 	bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
 	bcmgenet_rdma_ring_writel(priv, index,
-		words_per_bd * size - 1, DMA_END_ADDR);
+				  words_per_bd * size - 1, DMA_END_ADDR);
 	bcmgenet_rdma_ring_writel(priv, index,
-			(DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) |
-			DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
+				  (DMA_FC_THRESH_LO <<
+				   DMA_XOFF_THRESHOLD_SHIFT) |
+				   DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
 	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
 
 	return ret;
@@ -1714,8 +1701,8 @@ static void bcmgenet_init_multiq(struct net_device *dev)
 		 * (ring 16)
 		 */
 		bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
-					i * priv->hw_params->bds_cnt,
-					(i + 1) * priv->hw_params->bds_cnt);
+				      i * priv->hw_params->bds_cnt,
+				      (i + 1) * priv->hw_params->bds_cnt);
 
 		/* Configure ring as decriptor ring and setup priority */
 		ring_cfg |= 1 << i;
@@ -1786,8 +1773,8 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
 	/* Initialize commont TX ring structures */
 	priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
 	priv->num_tx_bds = TOTAL_DESC;
-	priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb),
-				GFP_KERNEL);
+	priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
+			       GFP_KERNEL);
 	if (!priv->tx_cbs) {
 		bcmgenet_fini_dma(priv);
 		return -ENOMEM;
@@ -1798,8 +1785,9 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
 
 	/* initialize special ring 16 */
 	bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
-			priv->hw_params->tx_queues * priv->hw_params->bds_cnt,
-			TOTAL_DESC);
+			      priv->hw_params->tx_queues *
+			      priv->hw_params->bds_cnt,
+			      TOTAL_DESC);
 
 	return 0;
 }
@@ -1820,11 +1808,11 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
 	priv->rx_c_index += work_done;
 	priv->rx_c_index &= DMA_C_INDEX_MASK;
 	bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
-				priv->rx_c_index, RDMA_CONS_INDEX);
+				  priv->rx_c_index, RDMA_CONS_INDEX);
 	if (work_done < budget) {
 		napi_complete(napi);
-		bcmgenet_intrl2_0_writel(priv,
-			UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR);
+		bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
+					 INTRL2_CPU_MASK_CLEAR);
 	}
 
 	return work_done;
@@ -1847,9 +1835,9 @@ static void bcmgenet_irq_task(struct work_struct *work)
 
 	/* Link UP/DOWN event */
 	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
-		(priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
+	    (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
 		phy_mac_interrupt(priv->phydev,
-			priv->irq0_stat & UMAC_IRQ_LINK_UP);
+				  priv->irq0_stat & UMAC_IRQ_LINK_UP);
 		priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
 	}
 }
@@ -1868,7 +1856,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
 	bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
 
 	netif_dbg(priv, intr, priv->dev,
-		"%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+		  "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
 	/* Check the MBDONE interrupts.
 	 * packet is done, reclaim descriptors
 	 */
@@ -1877,7 +1865,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
 		for (index = 0; index < 16; index++) {
 			if (priv->irq1_stat & (1 << index))
 				bcmgenet_tx_reclaim(priv->dev,
-						&priv->tx_rings[index]);
+						    &priv->tx_rings[index]);
 		}
 	}
 	return IRQ_HANDLED;
@@ -1896,7 +1884,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
 	bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
 
 	netif_dbg(priv, intr, priv->dev,
-		"IRQ=0x%x\n", priv->irq0_stat);
+		  "IRQ=0x%x\n", priv->irq0_stat);
 
 	if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
 		/* We use NAPI(software interrupt throttling, if
@@ -1904,8 +1892,8 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
 		 * Disable interrupt, will be enabled in the poll method.
 		 */
 		if (likely(napi_schedule_prep(&priv->napi))) {
-			bcmgenet_intrl2_0_writel(priv,
-				UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET);
+			bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
+						 INTRL2_CPU_MASK_SET);
 			__napi_schedule(&priv->napi);
 		}
 	}
@@ -1926,7 +1914,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
 	}
 
 	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
-		priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+	    priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
 		priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
 		wake_up(&priv->wq);
 	}
@@ -1958,7 +1946,7 @@ static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
 }
 
 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
-				  unsigned char *addr)
+				 unsigned char *addr)
 {
 	bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
 			(addr[2] << 8) | addr[3], UMAC_MAC0);
@@ -2079,14 +2067,14 @@ static int bcmgenet_open(struct net_device *dev)
 	bcmgenet_enable_dma(priv, dma_ctrl);
 
 	ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
-			dev->name, priv);
+			  dev->name, priv);
 	if (ret < 0) {
 		netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
 		goto err_fini_dma;
 	}
 
 	ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
-				dev->name, priv);
+			  dev->name, priv);
 	if (ret < 0) {
 		netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
 		goto err_irq0;
@@ -2127,8 +2115,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
 	}
 
 	if (timeout == DMA_TIMEOUT_VAL) {
-		netdev_warn(priv->dev,
-			"Timed out while disabling TX DMA\n");
+		netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
 		ret = -ETIMEDOUT;
 	}
 
@@ -2151,9 +2138,8 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
 	}
 
 	if (timeout == DMA_TIMEOUT_VAL) {
-		netdev_warn(priv->dev,
-			"Timed out while disabling RX DMA\n");
-			ret = -ETIMEDOUT;
+		netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
+		ret = -ETIMEDOUT;
 	}
 
 	return ret;
@@ -2232,12 +2218,11 @@ static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
 {
 	u32 reg;
 
-	bcmgenet_umac_writel(priv,
-			addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4));
-	bcmgenet_umac_writel(priv,
-			addr[2] << 24 | addr[3] << 16 |
-			addr[4] << 8 | addr[5],
-			UMAC_MDF_ADDR + ((*i + 1) * 4));
+	bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
+			     UMAC_MDF_ADDR + (*i * 4));
+	bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
+			     addr[4] << 8 | addr[5],
+			     UMAC_MDF_ADDR + ((*i + 1) * 4));
 	reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
 	reg |= (1 << (MAX_MC_COUNT - *mc));
 	bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
@@ -2434,7 +2419,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
 
 	/* Print the GENET core version */
 	dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
-		major, (reg >> 16) & 0x0f, reg & 0xffff);
+		 major, (reg >> 16) & 0x0f, reg & 0xffff);
 
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
 	if (!(params->flags & GENET_HAS_40BITS))

+ 4 - 14
drivers/net/ethernet/broadcom/genet/bcmgenet.h

@@ -4,18 +4,8 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- *
-*/
+ */
+
 #ifndef __BCMGENET_H__
 #define __BCMGENET_H__
 
@@ -514,9 +504,9 @@ struct bcmgenet_tx_ring {
 	unsigned int	cb_ptr;		/* Tx ring initial CB ptr */
 	unsigned int	end_ptr;	/* Tx ring end CB ptr */
 	void (*int_enable)(struct bcmgenet_priv *priv,
-				struct bcmgenet_tx_ring *);
+			   struct bcmgenet_tx_ring *);
 	void (*int_disable)(struct bcmgenet_priv *priv,
-				struct bcmgenet_tx_ring *);
+			    struct bcmgenet_tx_ring *);
 };
 
 /* device context */

+ 15 - 24
drivers/net/ethernet/broadcom/genet/bcmmii.c

@@ -6,15 +6,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
 
@@ -44,15 +35,15 @@ static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
 	u32 reg;
 
 	bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) |
-			(location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
+			     (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
 	/* Start MDIO transaction*/
 	reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
 	reg |= MDIO_START_BUSY;
 	bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
 	wait_event_timeout(priv->wq,
-			!(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
-				& MDIO_START_BUSY),
-			HZ / 100);
+			   !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
+			   & MDIO_START_BUSY),
+			   HZ / 100);
 	ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
 
 	if (ret & MDIO_READ_FAIL)
@@ -63,22 +54,22 @@ static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
 
 /* write a value to the MII */
 static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
-			int location, u16 val)
+			      int location, u16 val)
 {
 	struct net_device *dev = bus->priv;
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	u32 reg;
 
 	bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
-			(location << MDIO_REG_SHIFT) | (0xffff & val)),
-			UMAC_MDIO_CMD);
+			     (location << MDIO_REG_SHIFT) | (0xffff & val)),
+			     UMAC_MDIO_CMD);
 	reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
 	reg |= MDIO_START_BUSY;
 	bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
 	wait_event_timeout(priv->wq,
-			!(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
-				MDIO_START_BUSY),
-			HZ / 100);
+			   !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
+			   MDIO_START_BUSY),
+			   HZ / 100);
 
 	return 0;
 }
@@ -248,7 +239,7 @@ int bcmgenet_mii_config(struct net_device *dev)
 		phy_name = "external MII";
 		phydev->supported &= PHY_BASIC_FEATURES;
 		bcmgenet_sys_writel(priv,
-				PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
+				    PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
 		break;
 
 	case PHY_INTERFACE_MODE_REVMII:
@@ -284,7 +275,7 @@ int bcmgenet_mii_config(struct net_device *dev)
 		reg |= RGMII_MODE_EN | id_mode_dis;
 		bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
 		bcmgenet_sys_writel(priv,
-				PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
+				    PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
 		break;
 	default:
 		dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface);
@@ -363,7 +354,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
 		priv->mii_bus->irq[phydev->addr] = PHY_POLL;
 
 	pr_info("attached PHY at address %d [%s]\n",
-			phydev->addr, phydev->drv->name);
+		phydev->addr, phydev->drv->name);
 
 	return 0;
 }
@@ -388,9 +379,9 @@ static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
 	bus->read = bcmgenet_mii_read;
 	bus->write = bcmgenet_mii_write;
 	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d",
-			priv->pdev->name, priv->pdev->id);
+		 priv->pdev->name, priv->pdev->id);
 
-	bus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+	bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
 	if (!bus->irq) {
 		mdiobus_free(priv->mii_bus);
 		return -ENOMEM;