|
@@ -553,6 +553,10 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
|
|
if (rc)
|
|
if (rc)
|
|
return rc;
|
|
return rc;
|
|
|
|
|
|
|
|
+ rc = reset_long_term_buff(adapter, &tx_pool->tso_ltb);
|
|
|
|
+ if (rc)
|
|
|
|
+ return rc;
|
|
|
|
+
|
|
memset(tx_pool->tx_buff, 0,
|
|
memset(tx_pool->tx_buff, 0,
|
|
adapter->req_tx_entries_per_subcrq *
|
|
adapter->req_tx_entries_per_subcrq *
|
|
sizeof(struct ibmvnic_tx_buff));
|
|
sizeof(struct ibmvnic_tx_buff));
|
|
@@ -562,6 +566,7 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
|
|
|
|
|
|
tx_pool->consumer_index = 0;
|
|
tx_pool->consumer_index = 0;
|
|
tx_pool->producer_index = 0;
|
|
tx_pool->producer_index = 0;
|
|
|
|
+ tx_pool->tso_index = 0;
|
|
}
|
|
}
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -581,6 +586,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
|
|
tx_pool = &adapter->tx_pool[i];
|
|
tx_pool = &adapter->tx_pool[i];
|
|
kfree(tx_pool->tx_buff);
|
|
kfree(tx_pool->tx_buff);
|
|
free_long_term_buff(adapter, &tx_pool->long_term_buff);
|
|
free_long_term_buff(adapter, &tx_pool->long_term_buff);
|
|
|
|
+ free_long_term_buff(adapter, &tx_pool->tso_ltb);
|
|
kfree(tx_pool->free_map);
|
|
kfree(tx_pool->free_map);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -625,6 +631,16 @@ static int init_tx_pools(struct net_device *netdev)
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /* alloc TSO ltb */
|
|
|
|
+ if (alloc_long_term_buff(adapter, &tx_pool->tso_ltb,
|
|
|
|
+ IBMVNIC_TSO_BUFS *
|
|
|
|
+ IBMVNIC_TSO_BUF_SZ)) {
|
|
|
|
+ release_tx_pools(adapter);
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ tx_pool->tso_index = 0;
|
|
|
|
+
|
|
tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
|
|
tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
|
|
sizeof(int), GFP_KERNEL);
|
|
sizeof(int), GFP_KERNEL);
|
|
if (!tx_pool->free_map) {
|
|
if (!tx_pool->free_map) {
|
|
@@ -1201,11 +1217,41 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
|
|
be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
|
|
|
|
|
|
index = tx_pool->free_map[tx_pool->consumer_index];
|
|
index = tx_pool->free_map[tx_pool->consumer_index];
|
|
- offset = index * adapter->req_mtu;
|
|
|
|
- dst = tx_pool->long_term_buff.buff + offset;
|
|
|
|
- memset(dst, 0, adapter->req_mtu);
|
|
|
|
- skb_copy_from_linear_data(skb, dst, skb->len);
|
|
|
|
- data_dma_addr = tx_pool->long_term_buff.addr + offset;
|
|
|
|
|
|
+
|
|
|
|
+ if (skb_is_gso(skb)) {
|
|
|
|
+ offset = tx_pool->tso_index * IBMVNIC_TSO_BUF_SZ;
|
|
|
|
+ dst = tx_pool->tso_ltb.buff + offset;
|
|
|
|
+ memset(dst, 0, IBMVNIC_TSO_BUF_SZ);
|
|
|
|
+ data_dma_addr = tx_pool->tso_ltb.addr + offset;
|
|
|
|
+ tx_pool->tso_index++;
|
|
|
|
+ if (tx_pool->tso_index == IBMVNIC_TSO_BUFS)
|
|
|
|
+ tx_pool->tso_index = 0;
|
|
|
|
+ } else {
|
|
|
|
+ offset = index * adapter->req_mtu;
|
|
|
|
+ dst = tx_pool->long_term_buff.buff + offset;
|
|
|
|
+ memset(dst, 0, adapter->req_mtu);
|
|
|
|
+ data_dma_addr = tx_pool->long_term_buff.addr + offset;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (skb_shinfo(skb)->nr_frags) {
|
|
|
|
+ int cur, i;
|
|
|
|
+
|
|
|
|
+ /* Copy the head */
|
|
|
|
+ skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
|
|
|
|
+ cur = skb_headlen(skb);
|
|
|
|
+
|
|
|
|
+ /* Copy the frags */
|
|
|
|
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
|
|
|
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
|
|
+
|
|
|
|
+ memcpy(dst + cur,
|
|
|
|
+ page_address(skb_frag_page(frag)) +
|
|
|
|
+ frag->page_offset, skb_frag_size(frag));
|
|
|
|
+ cur += skb_frag_size(frag);
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ skb_copy_from_linear_data(skb, dst, skb->len);
|
|
|
|
+ }
|
|
|
|
|
|
tx_pool->consumer_index =
|
|
tx_pool->consumer_index =
|
|
(tx_pool->consumer_index + 1) %
|
|
(tx_pool->consumer_index + 1) %
|
|
@@ -1226,7 +1272,10 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
tx_crq.v1.n_sge = 1;
|
|
tx_crq.v1.n_sge = 1;
|
|
tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
|
|
tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
|
|
tx_crq.v1.correlator = cpu_to_be32(index);
|
|
tx_crq.v1.correlator = cpu_to_be32(index);
|
|
- tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
|
|
|
|
|
|
+ if (skb_is_gso(skb))
|
|
|
|
+ tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->tso_ltb.map_id);
|
|
|
|
+ else
|
|
|
|
+ tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
|
|
tx_crq.v1.sge_len = cpu_to_be32(skb->len);
|
|
tx_crq.v1.sge_len = cpu_to_be32(skb->len);
|
|
tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
|
|
tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
|
|
|
|
|
|
@@ -1251,6 +1300,11 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
|
|
tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
|
|
hdrs += 2;
|
|
hdrs += 2;
|
|
}
|
|
}
|
|
|
|
+ if (skb_is_gso(skb)) {
|
|
|
|
+ tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
|
|
|
|
+ tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
|
|
|
|
+ hdrs += 2;
|
|
|
|
+ }
|
|
/* determine if l2/3/4 headers are sent to firmware */
|
|
/* determine if l2/3/4 headers are sent to firmware */
|
|
if ((*hdrs >> 7) & 1 &&
|
|
if ((*hdrs >> 7) & 1 &&
|
|
(skb->protocol == htons(ETH_P_IP) ||
|
|
(skb->protocol == htons(ETH_P_IP) ||
|
|
@@ -2941,14 +2995,14 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
|
|
adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
|
|
adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
|
|
adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
|
|
adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
|
|
adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
|
|
adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
|
|
|
|
+ adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
|
|
|
|
+ adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
|
|
|
|
|
|
- /* large_tx/rx disabled for now, additional features needed */
|
|
|
|
- adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
|
|
|
|
- adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
|
|
|
|
|
|
+ /* large_rx disabled for now, additional features needed */
|
|
adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
|
|
adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
|
|
adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
|
|
adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
|
|
|
|
|
|
- adapter->netdev->features = NETIF_F_GSO;
|
|
|
|
|
|
+ adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
|
|
|
|
|
|
if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
|
|
if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
|
|
adapter->netdev->features |= NETIF_F_IP_CSUM;
|
|
adapter->netdev->features |= NETIF_F_IP_CSUM;
|
|
@@ -2960,6 +3014,13 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
|
|
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
|
|
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
|
|
adapter->netdev->features |= NETIF_F_RXCSUM;
|
|
adapter->netdev->features |= NETIF_F_RXCSUM;
|
|
|
|
|
|
|
|
+ if (buf->large_tx_ipv4)
|
|
|
|
+ adapter->netdev->features |= NETIF_F_TSO;
|
|
|
|
+ if (buf->large_tx_ipv6)
|
|
|
|
+ adapter->netdev->features |= NETIF_F_TSO6;
|
|
|
|
+
|
|
|
|
+ adapter->netdev->hw_features |= adapter->netdev->features;
|
|
|
|
+
|
|
memset(&crq, 0, sizeof(crq));
|
|
memset(&crq, 0, sizeof(crq));
|
|
crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
|
|
crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
|
|
crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
|
|
crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
|