|
@@ -530,6 +530,10 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
|
|
|
val = PIDX_T5_V(q->pend_cred / 8) |
|
|
|
DBTYPE_F;
|
|
|
val |= DBPRIO_F;
|
|
|
+
|
|
|
+ /* Make sure all memory writes to the Free List queue are
|
|
|
+ * committed before we tell the hardware about them.
|
|
|
+ */
|
|
|
wmb();
|
|
|
|
|
|
/* If we don't have access to the new User Doorbell (T5+), use
|
|
@@ -920,7 +924,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
|
|
|
*/
|
|
|
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
|
|
|
{
|
|
|
- wmb(); /* write descriptors before telling HW */
|
|
|
+ /* Make sure that all writes to the TX Descriptors are committed
|
|
|
+ * before we tell the hardware about them.
|
|
|
+ */
|
|
|
+ wmb();
|
|
|
|
|
|
/* If we don't have access to the new User Doorbell (T5+), use the old
|
|
|
* doorbell mechanism; otherwise use the new BAR2 mechanism.
|
|
@@ -1037,7 +1044,7 @@ nocsum: /*
|
|
|
* unknown protocol, disable HW csum
|
|
|
* and hope a bad packet is detected
|
|
|
*/
|
|
|
- return TXPKT_L4CSUM_DIS;
|
|
|
+ return TXPKT_L4CSUM_DIS_F;
|
|
|
}
|
|
|
} else {
|
|
|
/*
|
|
@@ -1054,14 +1061,15 @@ nocsum: /*
|
|
|
}
|
|
|
|
|
|
if (likely(csum_type >= TX_CSUM_TCPIP))
|
|
|
- return TXPKT_CSUM_TYPE(csum_type) |
|
|
|
- TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
|
|
|
- TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
|
|
|
+ return TXPKT_CSUM_TYPE_V(csum_type) |
|
|
|
+ TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)) |
|
|
|
+ TXPKT_ETHHDR_LEN_V(skb_network_offset(skb) - ETH_HLEN);
|
|
|
else {
|
|
|
int start = skb_transport_offset(skb);
|
|
|
|
|
|
- return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
|
|
|
- TXPKT_CSUM_LOC(start + skb->csum_offset);
|
|
|
+ return TXPKT_CSUM_TYPE_V(csum_type) |
|
|
|
+ TXPKT_CSUM_START_V(start) |
|
|
|
+ TXPKT_CSUM_LOC_V(start + skb->csum_offset);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1102,11 +1110,11 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
|
|
|
return -ENOTSUPP;
|
|
|
|
|
|
/* FC CRC offload */
|
|
|
- *cntrl = TXPKT_CSUM_TYPE(TX_CSUM_FCOE) |
|
|
|
- TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS |
|
|
|
- TXPKT_CSUM_START(CXGB_FCOE_TXPKT_CSUM_START) |
|
|
|
- TXPKT_CSUM_END(CXGB_FCOE_TXPKT_CSUM_END) |
|
|
|
- TXPKT_CSUM_LOC(CXGB_FCOE_TXPKT_CSUM_END);
|
|
|
+ *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
|
|
|
+ TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
|
|
|
+ TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
|
|
|
+ TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
|
|
|
+ TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
|
|
|
return 0;
|
|
|
}
|
|
|
#endif /* CONFIG_CHELSIO_T4_FCOE */
|
|
@@ -1159,7 +1167,7 @@ out_free: dev_kfree_skb_any(skb);
|
|
|
q = &adap->sge.ethtxq[qidx + pi->first_qset];
|
|
|
|
|
|
reclaim_completed_tx(adap, &q->q, true);
|
|
|
- cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
|
|
|
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
|
|
|
|
|
|
#ifdef CONFIG_CHELSIO_T4_FCOE
|
|
|
err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
|
|
@@ -1210,23 +1218,23 @@ out_free: dev_kfree_skb_any(skb);
|
|
|
len += sizeof(*lso);
|
|
|
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
|
|
|
FW_WR_IMMDLEN_V(len));
|
|
|
- lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
|
|
|
- LSO_FIRST_SLICE | LSO_LAST_SLICE |
|
|
|
- LSO_IPV6(v6) |
|
|
|
- LSO_ETHHDR_LEN(eth_xtra_len / 4) |
|
|
|
- LSO_IPHDR_LEN(l3hdr_len / 4) |
|
|
|
- LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
|
|
|
+ lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
|
|
|
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
|
|
|
+ LSO_IPV6_V(v6) |
|
|
|
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
|
|
|
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
|
|
|
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
|
|
|
lso->c.ipid_ofst = htons(0);
|
|
|
lso->c.mss = htons(ssi->gso_size);
|
|
|
lso->c.seqno_offset = htonl(0);
|
|
|
if (is_t4(adap->params.chip))
|
|
|
lso->c.len = htonl(skb->len);
|
|
|
else
|
|
|
- lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len));
|
|
|
+ lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
|
|
|
cpl = (void *)(lso + 1);
|
|
|
- cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
|
|
|
- TXPKT_IPHDR_LEN(l3hdr_len) |
|
|
|
- TXPKT_ETHHDR_LEN(eth_xtra_len);
|
|
|
+ cntrl = TXPKT_CSUM_TYPE_V(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
|
|
|
+ TXPKT_IPHDR_LEN_V(l3hdr_len) |
|
|
|
+ TXPKT_ETHHDR_LEN_V(eth_xtra_len);
|
|
|
q->tso++;
|
|
|
q->tx_cso += ssi->gso_segs;
|
|
|
} else {
|
|
@@ -1235,23 +1243,24 @@ out_free: dev_kfree_skb_any(skb);
|
|
|
FW_WR_IMMDLEN_V(len));
|
|
|
cpl = (void *)(wr + 1);
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
|
- cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
|
|
|
+ cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS_F;
|
|
|
q->tx_cso++;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
if (skb_vlan_tag_present(skb)) {
|
|
|
q->vlan_ins++;
|
|
|
- cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
|
|
|
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
|
|
|
#ifdef CONFIG_CHELSIO_T4_FCOE
|
|
|
if (skb->protocol == htons(ETH_P_FCOE))
|
|
|
- cntrl |= TXPKT_VLAN(
|
|
|
+ cntrl |= TXPKT_VLAN_V(
|
|
|
((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
|
|
|
#endif /* CONFIG_CHELSIO_T4_FCOE */
|
|
|
}
|
|
|
|
|
|
- cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
|
|
|
- TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
|
|
|
+ cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
|
|
|
+ TXPKT_INTF_V(pi->tx_chan) |
|
|
|
+ TXPKT_PF_V(adap->fn));
|
|
|
cpl->pack = htons(0);
|
|
|
cpl->len = htons(skb->len);
|
|
|
cpl->ctrl1 = cpu_to_be64(cntrl);
|
|
@@ -1961,7 +1970,7 @@ static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
|
|
|
static inline bool is_new_response(const struct rsp_ctrl *r,
|
|
|
const struct sge_rspq *q)
|
|
|
{
|
|
|
- return RSPD_GEN(r->type_gen) == q->gen;
|
|
|
+ return (r->type_gen >> RSPD_GEN_S) == q->gen;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2008,19 +2017,19 @@ static int process_responses(struct sge_rspq *q, int budget)
|
|
|
break;
|
|
|
|
|
|
dma_rmb();
|
|
|
- rsp_type = RSPD_TYPE(rc->type_gen);
|
|
|
- if (likely(rsp_type == RSP_TYPE_FLBUF)) {
|
|
|
+ rsp_type = RSPD_TYPE_G(rc->type_gen);
|
|
|
+ if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
|
|
|
struct page_frag *fp;
|
|
|
struct pkt_gl si;
|
|
|
const struct rx_sw_desc *rsd;
|
|
|
u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
|
|
|
|
|
|
- if (len & RSPD_NEWBUF) {
|
|
|
+ if (len & RSPD_NEWBUF_F) {
|
|
|
if (likely(q->offset > 0)) {
|
|
|
free_rx_bufs(q->adap, &rxq->fl, 1);
|
|
|
q->offset = 0;
|
|
|
}
|
|
|
- len = RSPD_LEN(len);
|
|
|
+ len = RSPD_LEN_G(len);
|
|
|
}
|
|
|
si.tot_len = len;
|
|
|
|
|
@@ -2055,7 +2064,7 @@ static int process_responses(struct sge_rspq *q, int budget)
|
|
|
q->offset += ALIGN(fp->size, s->fl_align);
|
|
|
else
|
|
|
restore_rx_bufs(&si, &rxq->fl, frags);
|
|
|
- } else if (likely(rsp_type == RSP_TYPE_CPL)) {
|
|
|
+ } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
|
|
|
ret = q->handler(q, q->cur_desc, NULL);
|
|
|
} else {
|
|
|
ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
|
|
@@ -2063,7 +2072,7 @@ static int process_responses(struct sge_rspq *q, int budget)
|
|
|
|
|
|
if (unlikely(ret)) {
|
|
|
/* couldn't process descriptor, back off for recovery */
|
|
|
- q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
|
|
|
+ q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -2087,7 +2096,7 @@ int cxgb_busy_poll(struct napi_struct *napi)
|
|
|
return LL_FLUSH_BUSY;
|
|
|
|
|
|
work_done = process_responses(q, 4);
|
|
|
- params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN;
|
|
|
+ params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1);
|
|
|
q->next_intr_params = params;
|
|
|
val = CIDXINC_V(work_done) | SEINTARM_V(params);
|
|
|
|
|
@@ -2134,7 +2143,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
|
|
|
int timer_index;
|
|
|
|
|
|
napi_complete(napi);
|
|
|
- timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params);
|
|
|
+ timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
|
|
|
|
|
|
if (q->adaptive_rx) {
|
|
|
if (work_done > max(timer_pkt_quota[timer_index],
|
|
@@ -2144,15 +2153,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
|
|
|
timer_index = timer_index - 1;
|
|
|
|
|
|
timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
|
|
|
- q->next_intr_params = QINTR_TIMER_IDX(timer_index) |
|
|
|
- V_QINTR_CNT_EN;
|
|
|
+ q->next_intr_params =
|
|
|
+ QINTR_TIMER_IDX_V(timer_index) |
|
|
|
+ QINTR_CNT_EN_V(0);
|
|
|
params = q->next_intr_params;
|
|
|
} else {
|
|
|
params = q->next_intr_params;
|
|
|
q->next_intr_params = q->intr_params;
|
|
|
}
|
|
|
} else
|
|
|
- params = QINTR_TIMER_IDX(7);
|
|
|
+ params = QINTR_TIMER_IDX_V(7);
|
|
|
|
|
|
val = CIDXINC_V(work_done) | SEINTARM_V(params);
|
|
|
|
|
@@ -2200,7 +2210,7 @@ static unsigned int process_intrq(struct adapter *adap)
|
|
|
break;
|
|
|
|
|
|
dma_rmb();
|
|
|
- if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
|
|
|
+ if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
|
|
|
unsigned int qid = ntohl(rc->pldbuflen_qid);
|
|
|
|
|
|
qid -= adap->sge.ingr_start;
|
|
@@ -2411,7 +2421,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
|
|
|
FW_LEN16(c));
|
|
|
c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
|
|
|
FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
|
|
|
- FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) |
|
|
|
+ FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
|
|
|
+ FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
|
|
|
FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
|
|
|
-intr_idx - 1));
|
|
|
c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
|
|
@@ -2450,8 +2461,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
|
|
|
htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
|
|
|
FW_IQ_CMD_FL0CONGCIF_F |
|
|
|
FW_IQ_CMD_FL0CONGEN_F);
|
|
|
- c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) |
|
|
|
- FW_IQ_CMD_FL0FBMAX_V(3));
|
|
|
+ c.fl0dcaen_to_fl0cidxfthresh =
|
|
|
+ htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) |
|
|
|
+ FW_IQ_CMD_FL0FBMAX_V(FETCHBURSTMAX_512B_X));
|
|
|
c.fl0size = htons(flsz);
|
|
|
c.fl0addr = cpu_to_be64(fl->addr);
|
|
|
}
|
|
@@ -2595,14 +2607,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
|
|
|
FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
|
|
|
c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
|
|
|
FW_EQ_ETH_CMD_VIID_V(pi->viid));
|
|
|
- c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) |
|
|
|
- FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
|
|
|
- FW_EQ_ETH_CMD_FETCHRO_V(1) |
|
|
|
- FW_EQ_ETH_CMD_IQID_V(iqid));
|
|
|
- c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) |
|
|
|
- FW_EQ_ETH_CMD_FBMAX_V(3) |
|
|
|
- FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) |
|
|
|
- FW_EQ_ETH_CMD_EQSIZE_V(nentries));
|
|
|
+ c.fetchszm_to_iqid =
|
|
|
+ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
|
|
|
+ FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
|
|
|
+ FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
|
|
|
+ c.dcaen_to_eqsize =
|
|
|
+ htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
|
|
|
+ FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
|
|
|
+ FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
|
|
|
+ FW_EQ_ETH_CMD_EQSIZE_V(nentries));
|
|
|
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
|
|
|
|
|
|
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
|
|
@@ -2649,14 +2662,15 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
|
|
|
FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
|
|
|
c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
|
|
|
c.physeqid_pkd = htonl(0);
|
|
|
- c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) |
|
|
|
- FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
|
|
|
- FW_EQ_CTRL_CMD_FETCHRO_F |
|
|
|
- FW_EQ_CTRL_CMD_IQID_V(iqid));
|
|
|
- c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) |
|
|
|
- FW_EQ_CTRL_CMD_FBMAX_V(3) |
|
|
|
- FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) |
|
|
|
- FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
|
|
|
+ c.fetchszm_to_iqid =
|
|
|
+ htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
|
|
|
+ FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
|
|
|
+ FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
|
|
|
+ c.dcaen_to_eqsize =
|
|
|
+ htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
|
|
|
+ FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
|
|
|
+ FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
|
|
|
+ FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
|
|
|
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
|
|
|
|
|
|
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
|
|
@@ -2701,14 +2715,15 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
|
|
|
FW_EQ_OFLD_CMD_VFN_V(0));
|
|
|
c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
|
|
|
FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
|
|
|
- c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) |
|
|
|
- FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
|
|
|
- FW_EQ_OFLD_CMD_FETCHRO_F |
|
|
|
- FW_EQ_OFLD_CMD_IQID_V(iqid));
|
|
|
- c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) |
|
|
|
- FW_EQ_OFLD_CMD_FBMAX_V(3) |
|
|
|
- FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) |
|
|
|
- FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
|
|
|
+ c.fetchszm_to_iqid =
|
|
|
+ htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
|
|
|
+ FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
|
|
|
+ FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
|
|
|
+ c.dcaen_to_eqsize =
|
|
|
+ htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
|
|
|
+ FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
|
|
|
+ FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
|
|
|
+ FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
|
|
|
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
|
|
|
|
|
|
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
|
|
@@ -3023,7 +3038,11 @@ int t4_sge_init(struct adapter *adap)
|
|
|
* Packing Boundary. T5 introduced the ability to specify these
|
|
|
* separately. The actual Ingress Packet Data alignment boundary
|
|
|
* within Packed Buffer Mode is the maximum of these two
|
|
|
- * specifications.
|
|
|
+ * specifications. (Note that it makes no real practical sense to
|
|
|
+ * have the Pading Boudary be larger than the Packing Boundary but you
|
|
|
+ * could set the chip up that way and, in fact, legacy T4 code would
|
|
|
+ * end doing this because it would initialize the Padding Boundary and
|
|
|
+ * leave the Packing Boundary initialized to 0 (16 bytes).)
|
|
|
*/
|
|
|
ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
|
|
|
INGPADBOUNDARY_SHIFT_X);
|
|
@@ -3069,6 +3088,9 @@ int t4_sge_init(struct adapter *adap)
|
|
|
|
|
|
t4_idma_monitor_init(adap, &s->idma_monitor);
|
|
|
|
|
|
+ /* Set up timers used for recuring callbacks to process RX and TX
|
|
|
+ * administrative tasks.
|
|
|
+ */
|
|
|
setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
|
|
|
setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
|
|
|
|