|
@@ -282,27 +282,32 @@ static int
|
|
|
bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
|
|
|
{
|
|
|
struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
|
|
|
- int mtu, order;
|
|
|
+ int order;
|
|
|
|
|
|
bnad_rxq_alloc_uninit(bnad, rcb);
|
|
|
|
|
|
- mtu = bna_enet_mtu_get(&bnad->bna.enet);
|
|
|
- order = get_order(mtu);
|
|
|
+ order = get_order(rcb->rxq->buffer_size);
|
|
|
+
|
|
|
+ unmap_q->type = BNAD_RXBUF_PAGE;
|
|
|
|
|
|
if (bna_is_small_rxq(rcb->id)) {
|
|
|
unmap_q->alloc_order = 0;
|
|
|
unmap_q->map_size = rcb->rxq->buffer_size;
|
|
|
} else {
|
|
|
- unmap_q->alloc_order = order;
|
|
|
- unmap_q->map_size =
|
|
|
- (rcb->rxq->buffer_size > 2048) ?
|
|
|
- PAGE_SIZE << order : 2048;
|
|
|
+ if (rcb->rxq->multi_buffer) {
|
|
|
+ unmap_q->alloc_order = 0;
|
|
|
+ unmap_q->map_size = rcb->rxq->buffer_size;
|
|
|
+ unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
|
|
|
+ } else {
|
|
|
+ unmap_q->alloc_order = order;
|
|
|
+ unmap_q->map_size =
|
|
|
+ (rcb->rxq->buffer_size > 2048) ?
|
|
|
+ PAGE_SIZE << order : 2048;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
|
|
|
|
|
|
- unmap_q->type = BNAD_RXBUF_PAGE;
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -345,10 +350,10 @@ bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
|
|
|
for (i = 0; i < rcb->q_depth; i++) {
|
|
|
struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
|
|
|
|
|
|
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
|
|
|
- bnad_rxq_cleanup_page(bnad, unmap);
|
|
|
- else
|
|
|
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
|
|
|
bnad_rxq_cleanup_skb(bnad, unmap);
|
|
|
+ else
|
|
|
+ bnad_rxq_cleanup_page(bnad, unmap);
|
|
|
}
|
|
|
bnad_rxq_alloc_uninit(bnad, rcb);
|
|
|
}
|
|
@@ -480,10 +485,10 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
|
|
|
if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
|
|
|
return;
|
|
|
|
|
|
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
|
|
|
- bnad_rxq_refill_page(bnad, rcb, to_alloc);
|
|
|
- else
|
|
|
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
|
|
|
bnad_rxq_refill_skb(bnad, rcb, to_alloc);
|
|
|
+ else
|
|
|
+ bnad_rxq_refill_page(bnad, rcb, to_alloc);
|
|
|
}
|
|
|
|
|
|
#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
|
|
@@ -500,62 +505,91 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
|
|
|
#define flags_udp6 (BNA_CQ_EF_IPV6 | \
|
|
|
BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
|
|
|
|
|
|
-static inline struct sk_buff *
|
|
|
-bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl,
|
|
|
- struct bnad_rx_unmap_q *unmap_q,
|
|
|
- struct bnad_rx_unmap *unmap,
|
|
|
- u32 length, u32 flags)
|
|
|
+static void
|
|
|
+bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
|
|
|
+ u32 sop_ci, u32 nvecs)
|
|
|
{
|
|
|
- struct bnad *bnad = rx_ctrl->bnad;
|
|
|
- struct sk_buff *skb;
|
|
|
+ struct bnad_rx_unmap_q *unmap_q;
|
|
|
+ struct bnad_rx_unmap *unmap;
|
|
|
+ u32 ci, vec;
|
|
|
|
|
|
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) {
|
|
|
- skb = napi_get_frags(&rx_ctrl->napi);
|
|
|
- if (unlikely(!skb))
|
|
|
- return NULL;
|
|
|
+ unmap_q = rcb->unmap_q;
|
|
|
+ for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
|
|
|
+ unmap = &unmap_q->unmap[ci];
|
|
|
+ BNA_QE_INDX_INC(ci, rcb->q_depth);
|
|
|
+
|
|
|
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
|
|
|
+ bnad_rxq_cleanup_skb(bnad, unmap);
|
|
|
+ else
|
|
|
+ bnad_rxq_cleanup_page(bnad, unmap);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
|
|
|
+ u32 sop_ci, u32 nvecs, u32 last_fraglen)
|
|
|
+{
|
|
|
+ struct bnad *bnad;
|
|
|
+ u32 ci, vec, len, totlen = 0;
|
|
|
+ struct bnad_rx_unmap_q *unmap_q;
|
|
|
+ struct bnad_rx_unmap *unmap;
|
|
|
+
|
|
|
+ unmap_q = rcb->unmap_q;
|
|
|
+ bnad = rcb->bnad;
|
|
|
+ for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
|
|
|
+ unmap = &unmap_q->unmap[ci];
|
|
|
+ BNA_QE_INDX_INC(ci, rcb->q_depth);
|
|
|
|
|
|
dma_unmap_page(&bnad->pcidev->dev,
|
|
|
dma_unmap_addr(&unmap->vector, dma_addr),
|
|
|
unmap->vector.len, DMA_FROM_DEVICE);
|
|
|
+
|
|
|
+ len = (vec == nvecs) ?
|
|
|
+ last_fraglen : unmap->vector.len;
|
|
|
+ totlen += len;
|
|
|
+
|
|
|
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
|
|
|
- unmap->page, unmap->page_offset, length);
|
|
|
- skb->len += length;
|
|
|
- skb->data_len += length;
|
|
|
- skb->truesize += length;
|
|
|
+ unmap->page, unmap->page_offset, len);
|
|
|
|
|
|
unmap->page = NULL;
|
|
|
unmap->vector.len = 0;
|
|
|
-
|
|
|
- return skb;
|
|
|
}
|
|
|
|
|
|
- skb = unmap->skb;
|
|
|
- BUG_ON(!skb);
|
|
|
+ skb->len += totlen;
|
|
|
+ skb->data_len += totlen;
|
|
|
+ skb->truesize += totlen;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
|
|
|
+ struct bnad_rx_unmap *unmap, u32 len)
|
|
|
+{
|
|
|
+ prefetch(skb->data);
|
|
|
|
|
|
dma_unmap_single(&bnad->pcidev->dev,
|
|
|
dma_unmap_addr(&unmap->vector, dma_addr),
|
|
|
unmap->vector.len, DMA_FROM_DEVICE);
|
|
|
|
|
|
- skb_put(skb, length);
|
|
|
-
|
|
|
+ skb_put(skb, len);
|
|
|
skb->protocol = eth_type_trans(skb, bnad->netdev);
|
|
|
|
|
|
unmap->skb = NULL;
|
|
|
unmap->vector.len = 0;
|
|
|
- return skb;
|
|
|
}
|
|
|
|
|
|
static u32
|
|
|
bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
|
|
|
{
|
|
|
- struct bna_cq_entry *cq, *cmpl;
|
|
|
+ struct bna_cq_entry *cq, *cmpl, *next_cmpl;
|
|
|
struct bna_rcb *rcb = NULL;
|
|
|
struct bnad_rx_unmap_q *unmap_q;
|
|
|
- struct bnad_rx_unmap *unmap;
|
|
|
- struct sk_buff *skb;
|
|
|
+ struct bnad_rx_unmap *unmap = NULL;
|
|
|
+ struct sk_buff *skb = NULL;
|
|
|
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
|
|
|
struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
|
|
|
- u32 packets = 0, length = 0, flags, masked_flags;
|
|
|
+ u32 packets = 0, len = 0, totlen = 0;
|
|
|
+ u32 pi, vec, sop_ci = 0, nvecs = 0;
|
|
|
+ u32 flags, masked_flags;
|
|
|
|
|
|
prefetch(bnad->netdev);
|
|
|
|
|
@@ -563,9 +597,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
|
|
|
cmpl = &cq[ccb->producer_index];
|
|
|
|
|
|
while (cmpl->valid && (packets < budget)) {
|
|
|
- packets++;
|
|
|
- flags = ntohl(cmpl->flags);
|
|
|
- length = ntohs(cmpl->length);
|
|
|
BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
|
|
|
|
|
|
if (bna_is_small_rxq(cmpl->rxq_id))
|
|
@@ -574,25 +605,68 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
|
|
|
rcb = ccb->rcb[0];
|
|
|
|
|
|
unmap_q = rcb->unmap_q;
|
|
|
- unmap = &unmap_q->unmap[rcb->consumer_index];
|
|
|
|
|
|
- if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
|
|
|
- BNA_CQ_EF_FCS_ERROR |
|
|
|
- BNA_CQ_EF_TOO_LONG))) {
|
|
|
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
|
|
|
- bnad_rxq_cleanup_page(bnad, unmap);
|
|
|
- else
|
|
|
- bnad_rxq_cleanup_skb(bnad, unmap);
|
|
|
+ /* start of packet ci */
|
|
|
+ sop_ci = rcb->consumer_index;
|
|
|
+
|
|
|
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
|
|
|
+ unmap = &unmap_q->unmap[sop_ci];
|
|
|
+ skb = unmap->skb;
|
|
|
+ } else {
|
|
|
+ skb = napi_get_frags(&rx_ctrl->napi);
|
|
|
+ if (unlikely(!skb))
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ prefetch(skb);
|
|
|
+
|
|
|
+ flags = ntohl(cmpl->flags);
|
|
|
+ len = ntohs(cmpl->length);
|
|
|
+ totlen = len;
|
|
|
+ nvecs = 1;
|
|
|
+
|
|
|
+ /* Check all the completions for this frame.
|
|
|
+ * busy-wait doesn't help much, break here.
|
|
|
+ */
|
|
|
+ if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
|
|
|
+ (flags & BNA_CQ_EF_EOP) == 0) {
|
|
|
+ pi = ccb->producer_index;
|
|
|
+ do {
|
|
|
+ BNA_QE_INDX_INC(pi, ccb->q_depth);
|
|
|
+ next_cmpl = &cq[pi];
|
|
|
|
|
|
+ if (!next_cmpl->valid)
|
|
|
+ break;
|
|
|
+
|
|
|
+ len = ntohs(next_cmpl->length);
|
|
|
+ flags = ntohl(next_cmpl->flags);
|
|
|
+
|
|
|
+ nvecs++;
|
|
|
+ totlen += len;
|
|
|
+ } while ((flags & BNA_CQ_EF_EOP) == 0);
|
|
|
+
|
|
|
+ if (!next_cmpl->valid)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* TODO: BNA_CQ_EF_LOCAL ? */
|
|
|
+ if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
|
|
|
+ BNA_CQ_EF_FCS_ERROR |
|
|
|
+ BNA_CQ_EF_TOO_LONG))) {
|
|
|
+ bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
|
|
|
rcb->rxq->rx_packets_with_error++;
|
|
|
+
|
|
|
goto next;
|
|
|
}
|
|
|
|
|
|
- skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap,
|
|
|
- length, flags);
|
|
|
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
|
|
|
+ bnad_cq_setup_skb(bnad, skb, unmap, len);
|
|
|
+ else
|
|
|
+ bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
|
|
|
|
|
|
- if (unlikely(!skb))
|
|
|
- break;
|
|
|
+ packets++;
|
|
|
+ rcb->rxq->rx_packets++;
|
|
|
+ rcb->rxq->rx_bytes += totlen;
|
|
|
+ ccb->bytes_per_intr += totlen;
|
|
|
|
|
|
masked_flags = flags & flags_cksum_prot_mask;
|
|
|
|
|
@@ -606,21 +680,21 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
|
|
|
else
|
|
|
skb_checksum_none_assert(skb);
|
|
|
|
|
|
- rcb->rxq->rx_packets++;
|
|
|
- rcb->rxq->rx_bytes += length;
|
|
|
-
|
|
|
if (flags & BNA_CQ_EF_VLAN)
|
|
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
|
|
|
|
|
|
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
|
|
|
- napi_gro_frags(&rx_ctrl->napi);
|
|
|
- else
|
|
|
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
|
|
|
netif_receive_skb(skb);
|
|
|
+ else
|
|
|
+ napi_gro_frags(&rx_ctrl->napi);
|
|
|
|
|
|
next:
|
|
|
- cmpl->valid = 0;
|
|
|
- BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth);
|
|
|
- BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
|
|
|
+ BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
|
|
|
+ for (vec = 0; vec < nvecs; vec++) {
|
|
|
+ cmpl = &cq[ccb->producer_index];
|
|
|
+ cmpl->valid = 0;
|
|
|
+ BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
|
|
|
+ }
|
|
|
cmpl = &cq[ccb->producer_index];
|
|
|
}
|
|
|
|
|
@@ -1930,6 +2004,7 @@ err_return:
|
|
|
static void
|
|
|
bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
|
|
|
{
|
|
|
+ memset(rx_config, 0, sizeof(*rx_config));
|
|
|
rx_config->rx_type = BNA_RX_T_REGULAR;
|
|
|
rx_config->num_paths = bnad->num_rxp_per_rx;
|
|
|
rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
|
|
@@ -1950,10 +2025,39 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
|
|
|
memset(&rx_config->rss_config, 0,
|
|
|
sizeof(rx_config->rss_config));
|
|
|
}
|
|
|
+
|
|
|
+ rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
|
|
|
+ rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
|
|
|
+
|
|
|
+ /* BNA_RXP_SINGLE - one data-buffer queue
|
|
|
+ * BNA_RXP_SLR - one small-buffer and one large-buffer queues
|
|
|
+ * BNA_RXP_HDS - one header-buffer and one data-buffer queues
|
|
|
+ */
|
|
|
+ /* TODO: configurable param for queue type */
|
|
|
rx_config->rxp_type = BNA_RXP_SLR;
|
|
|
- rx_config->q_depth = bnad->rxq_depth;
|
|
|
|
|
|
- rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
|
|
|
+ if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
|
|
|
+ rx_config->frame_size > 4096) {
|
|
|
+ /* though size_routing_enable is set in SLR,
|
|
|
+ * small packets may get routed to same rxq.
|
|
|
+ * set buf_size to 2048 instead of PAGE_SIZE.
|
|
|
+ */
|
|
|
+ rx_config->q0_buf_size = 2048;
|
|
|
+ /* this should be in multiples of 2 */
|
|
|
+ rx_config->q0_num_vecs = 4;
|
|
|
+ rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
|
|
|
+ rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
|
|
|
+ } else {
|
|
|
+ rx_config->q0_buf_size = rx_config->frame_size;
|
|
|
+ rx_config->q0_num_vecs = 1;
|
|
|
+ rx_config->q0_depth = bnad->rxq_depth;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
|
|
|
+ if (rx_config->rxp_type == BNA_RXP_SLR) {
|
|
|
+ rx_config->q1_depth = bnad->rxq_depth;
|
|
|
+ rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
|
|
|
+ }
|
|
|
|
|
|
rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
|
|
|
}
|
|
@@ -1969,6 +2073,49 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
|
|
|
}
|
|
|
|
|
|
/* Called with mutex_lock(&bnad->conf_mutex) held */
|
|
|
+u32
|
|
|
+bnad_reinit_rx(struct bnad *bnad)
|
|
|
+{
|
|
|
+ struct net_device *netdev = bnad->netdev;
|
|
|
+ u32 err = 0, current_err = 0;
|
|
|
+ u32 rx_id = 0, count = 0;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ /* destroy and create new rx objects */
|
|
|
+ for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
|
|
|
+ if (!bnad->rx_info[rx_id].rx)
|
|
|
+ continue;
|
|
|
+ bnad_destroy_rx(bnad, rx_id);
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
+ bna_enet_mtu_set(&bnad->bna.enet,
|
|
|
+ BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
|
|
|
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
+
|
|
|
+ for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
|
|
|
+ count++;
|
|
|
+ current_err = bnad_setup_rx(bnad, rx_id);
|
|
|
+ if (current_err && !err) {
|
|
|
+ err = current_err;
|
|
|
+ pr_err("RXQ:%u setup failed\n", rx_id);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* restore rx configuration */
|
|
|
+ if (bnad->rx_info[0].rx && !err) {
|
|
|
+ bnad_restore_vlans(bnad, 0);
|
|
|
+ bnad_enable_default_bcast(bnad);
|
|
|
+ spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
+ bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
|
|
|
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
+ bnad_set_rx_mode(netdev);
|
|
|
+ }
|
|
|
+
|
|
|
+ return count;
|
|
|
+}
|
|
|
+
|
|
|
+/* Called with bnad_conf_lock() held */
|
|
|
void
|
|
|
bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
|
|
|
{
|
|
@@ -2047,13 +2194,19 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
/* Fill Unmap Q memory requirements */
|
|
|
- BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ],
|
|
|
- rx_config->num_paths +
|
|
|
- ((rx_config->rxp_type == BNA_RXP_SINGLE) ?
|
|
|
- 0 : rx_config->num_paths),
|
|
|
- ((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) +
|
|
|
- sizeof(struct bnad_rx_unmap_q)));
|
|
|
-
|
|
|
+ BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
|
|
|
+ rx_config->num_paths,
|
|
|
+ (rx_config->q0_depth *
|
|
|
+ sizeof(struct bnad_rx_unmap)) +
|
|
|
+ sizeof(struct bnad_rx_unmap_q));
|
|
|
+
|
|
|
+ if (rx_config->rxp_type != BNA_RXP_SINGLE) {
|
|
|
+ BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
|
|
|
+ rx_config->num_paths,
|
|
|
+ (rx_config->q1_depth *
|
|
|
+ sizeof(struct bnad_rx_unmap) +
|
|
|
+ sizeof(struct bnad_rx_unmap_q)));
|
|
|
+ }
|
|
|
/* Allocate resource */
|
|
|
err = bnad_rx_res_alloc(bnad, res_info, rx_id);
|
|
|
if (err)
|
|
@@ -2548,7 +2701,6 @@ bnad_open(struct net_device *netdev)
|
|
|
int err;
|
|
|
struct bnad *bnad = netdev_priv(netdev);
|
|
|
struct bna_pause_config pause_config;
|
|
|
- int mtu;
|
|
|
unsigned long flags;
|
|
|
|
|
|
mutex_lock(&bnad->conf_mutex);
|
|
@@ -2567,10 +2719,9 @@ bnad_open(struct net_device *netdev)
|
|
|
pause_config.tx_pause = 0;
|
|
|
pause_config.rx_pause = 0;
|
|
|
|
|
|
- mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
|
|
|
-
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
- bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
|
|
|
+ bna_enet_mtu_set(&bnad->bna.enet,
|
|
|
+ BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
|
|
|
bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
|
|
|
bna_enet_enable(&bnad->bna.enet);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
@@ -3092,14 +3243,14 @@ bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-bnad_mtu_set(struct bnad *bnad, int mtu)
|
|
|
+bnad_mtu_set(struct bnad *bnad, int frame_size)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
|
init_completion(&bnad->bnad_completions.mtu_comp);
|
|
|
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
- bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
|
|
|
+ bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
wait_for_completion(&bnad->bnad_completions.mtu_comp);
|
|
@@ -3110,18 +3261,34 @@ bnad_mtu_set(struct bnad *bnad, int mtu)
|
|
|
static int
|
|
|
bnad_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
{
|
|
|
- int err, mtu = netdev->mtu;
|
|
|
+ int err, mtu;
|
|
|
struct bnad *bnad = netdev_priv(netdev);
|
|
|
+ u32 rx_count = 0, frame, new_frame;
|
|
|
|
|
|
if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
|
|
|
return -EINVAL;
|
|
|
|
|
|
mutex_lock(&bnad->conf_mutex);
|
|
|
|
|
|
+ mtu = netdev->mtu;
|
|
|
netdev->mtu = new_mtu;
|
|
|
|
|
|
- mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
|
|
|
- err = bnad_mtu_set(bnad, mtu);
|
|
|
+ frame = BNAD_FRAME_SIZE(mtu);
|
|
|
+ new_frame = BNAD_FRAME_SIZE(new_mtu);
|
|
|
+
|
|
|
+ /* check if multi-buffer needs to be enabled */
|
|
|
+ if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
|
|
|
+ netif_running(bnad->netdev)) {
|
|
|
+ /* only when transition is over 4K */
|
|
|
+ if ((frame <= 4096 && new_frame > 4096) ||
|
|
|
+ (frame > 4096 && new_frame <= 4096))
|
|
|
+ rx_count = bnad_reinit_rx(bnad);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* rx_count > 0 - new rx created
|
|
|
+ * - Linux set err = 0 and return
|
|
|
+ */
|
|
|
+ err = bnad_mtu_set(bnad, new_frame);
|
|
|
if (err)
|
|
|
err = -EBUSY;
|
|
|
|