|
@@ -549,6 +549,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
|
|
|
if (can_dropped_invalid_skb(dev, skb))
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
+ spin_lock_bh(&priv->xmit_lock);
|
|
|
msg_obj_no = get_tx_next_msg_obj(priv);
|
|
|
|
|
|
/* prepare message object for transmission */
|
|
@@ -563,6 +564,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
|
|
|
if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
|
|
|
(priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
|
|
|
netif_stop_queue(dev);
|
|
|
+ spin_unlock_bh(&priv->xmit_lock);
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
@@ -787,7 +789,9 @@ static void c_can_do_tx(struct net_device *dev)
|
|
|
struct c_can_priv *priv = netdev_priv(dev);
|
|
|
struct net_device_stats *stats = &dev->stats;
|
|
|
|
|
|
- for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
|
|
|
+ spin_lock_bh(&priv->xmit_lock);
|
|
|
+
|
|
|
+ for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
|
|
|
msg_obj_no = get_tx_echo_msg_obj(priv);
|
|
|
val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
|
|
|
if (!(val & (1 << (msg_obj_no - 1)))) {
|
|
@@ -809,6 +813,8 @@ static void c_can_do_tx(struct net_device *dev)
|
|
|
if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
|
|
|
((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
|
|
|
netif_wake_queue(dev);
|
|
|
+
|
|
|
+ spin_unlock_bh(&priv->xmit_lock);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1262,6 +1268,7 @@ struct net_device *alloc_c_can_dev(void)
|
|
|
return NULL;
|
|
|
|
|
|
priv = netdev_priv(dev);
|
|
|
+ spin_lock_init(&priv->xmit_lock);
|
|
|
netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
|
|
|
|
|
|
priv->dev = dev;
|