|
@@ -653,8 +653,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
|
|
|
cmd->hdr.return_code, card);
|
|
|
}
|
|
|
card->lan_online = 0;
|
|
|
- if (card->dev && netif_carrier_ok(card->dev))
|
|
|
- netif_carrier_off(card->dev);
|
|
|
+ netif_carrier_off(card->dev);
|
|
|
return NULL;
|
|
|
case IPA_CMD_STARTLAN:
|
|
|
dev_info(&card->gdev->dev,
|
|
@@ -1921,7 +1920,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
|
|
|
memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
|
|
|
&card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
|
|
|
}
|
|
|
- tmp = ((__u8)card->info.portno) | 0x80;
|
|
|
+ tmp = ((u8)card->dev->dev_port) | 0x80;
|
|
|
memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
|
|
|
memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
|
|
|
&card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
|
|
@@ -2279,19 +2278,42 @@ static int qeth_cm_setup(struct qeth_card *card)
|
|
|
|
|
|
}
|
|
|
|
|
|
-static int qeth_get_initial_mtu_for_card(struct qeth_card *card)
|
|
|
+static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
|
|
|
{
|
|
|
- switch (card->info.type) {
|
|
|
- case QETH_CARD_TYPE_IQD:
|
|
|
- return card->info.max_mtu;
|
|
|
- case QETH_CARD_TYPE_OSD:
|
|
|
- case QETH_CARD_TYPE_OSX:
|
|
|
- if (!card->options.layer2)
|
|
|
- return ETH_DATA_LEN - 8; /* L3: allow for LLC + SNAP */
|
|
|
- /* fall through */
|
|
|
- default:
|
|
|
- return ETH_DATA_LEN;
|
|
|
+ struct net_device *dev = card->dev;
|
|
|
+ unsigned int new_mtu;
|
|
|
+
|
|
|
+ if (!max_mtu) {
|
|
|
+ /* IQD needs accurate max MTU to set up its RX buffers: */
|
|
|
+ if (IS_IQD(card))
|
|
|
+ return -EINVAL;
|
|
|
+ /* tolerate quirky HW: */
|
|
|
+ max_mtu = ETH_MAX_MTU;
|
|
|
+ }
|
|
|
+
|
|
|
+ rtnl_lock();
|
|
|
+ if (IS_IQD(card)) {
|
|
|
+ /* move any device with default MTU to new max MTU: */
|
|
|
+ new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
|
|
|
+
|
|
|
+ /* adjust RX buffer size to new max MTU: */
|
|
|
+ card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
|
|
|
+ if (dev->max_mtu && dev->max_mtu != max_mtu)
|
|
|
+ qeth_free_qdio_buffers(card);
|
|
|
+ } else {
|
|
|
+ if (dev->mtu)
|
|
|
+ new_mtu = dev->mtu;
|
|
|
+ /* default MTUs for first setup: */
|
|
|
+ else if (card->options.layer2)
|
|
|
+ new_mtu = ETH_DATA_LEN;
|
|
|
+ else
|
|
|
+ new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
|
|
|
}
|
|
|
+
|
|
|
+ dev->max_mtu = max_mtu;
|
|
|
+ dev->mtu = min(new_mtu, max_mtu);
|
|
|
+ rtnl_unlock();
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static int qeth_get_mtu_outof_framesize(int framesize)
|
|
@@ -2310,21 +2332,6 @@ static int qeth_get_mtu_outof_framesize(int framesize)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
|
|
|
-{
|
|
|
- switch (card->info.type) {
|
|
|
- case QETH_CARD_TYPE_OSD:
|
|
|
- case QETH_CARD_TYPE_OSM:
|
|
|
- case QETH_CARD_TYPE_OSX:
|
|
|
- case QETH_CARD_TYPE_IQD:
|
|
|
- return ((mtu >= 576) &&
|
|
|
- (mtu <= card->info.max_mtu));
|
|
|
- case QETH_CARD_TYPE_OSN:
|
|
|
- default:
|
|
|
- return 1;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
|
|
|
unsigned long data)
|
|
|
{
|
|
@@ -2343,29 +2350,10 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
|
|
|
if (card->info.type == QETH_CARD_TYPE_IQD) {
|
|
|
memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
|
|
|
mtu = qeth_get_mtu_outof_framesize(framesize);
|
|
|
- if (!mtu) {
|
|
|
- iob->rc = -EINVAL;
|
|
|
- QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
|
|
|
- return 0;
|
|
|
- }
|
|
|
- if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) {
|
|
|
- /* frame size has changed */
|
|
|
- if (card->dev &&
|
|
|
- ((card->dev->mtu == card->info.initial_mtu) ||
|
|
|
- (card->dev->mtu > mtu)))
|
|
|
- card->dev->mtu = mtu;
|
|
|
- qeth_free_qdio_buffers(card);
|
|
|
- }
|
|
|
- card->info.initial_mtu = mtu;
|
|
|
- card->info.max_mtu = mtu;
|
|
|
- card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
|
|
|
} else {
|
|
|
- card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
|
|
|
- iob->data);
|
|
|
- card->info.initial_mtu = min(card->info.max_mtu,
|
|
|
- qeth_get_initial_mtu_for_card(card));
|
|
|
- card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
|
|
|
+ mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
|
|
|
}
|
|
|
+ *(u16 *)reply->param = mtu;
|
|
|
|
|
|
memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
|
|
|
if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
|
|
@@ -2384,6 +2372,7 @@ static int qeth_ulp_enable(struct qeth_card *card)
|
|
|
int rc;
|
|
|
char prot_type;
|
|
|
struct qeth_cmd_buffer *iob;
|
|
|
+ u16 max_mtu;
|
|
|
|
|
|
/*FIXME: trace view callbacks*/
|
|
|
QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
|
|
@@ -2391,8 +2380,7 @@ static int qeth_ulp_enable(struct qeth_card *card)
|
|
|
iob = qeth_wait_for_buffer(&card->write);
|
|
|
memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
|
|
|
|
|
|
- *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
|
|
|
- (__u8) card->info.portno;
|
|
|
+ *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
|
|
|
if (card->options.layer2)
|
|
|
if (card->info.type == QETH_CARD_TYPE_OSN)
|
|
|
prot_type = QETH_PROT_OSN2;
|
|
@@ -2407,9 +2395,10 @@ static int qeth_ulp_enable(struct qeth_card *card)
|
|
|
memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
|
|
|
&card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
|
|
|
rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
|
|
|
- qeth_ulp_enable_cb, NULL);
|
|
|
- return rc;
|
|
|
-
|
|
|
+ qeth_ulp_enable_cb, &max_mtu);
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
+ return qeth_update_max_mtu(card, max_mtu);
|
|
|
}
|
|
|
|
|
|
static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
|
|
@@ -2920,7 +2909,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
|
|
|
cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
|
|
|
/* cmd->hdr.seqno is set by qeth_send_control_data() */
|
|
|
cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
|
|
|
- cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
|
|
|
+ cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port;
|
|
|
if (card->options.layer2)
|
|
|
cmd->hdr.prim_version_no = 2;
|
|
|
else
|
|
@@ -3506,13 +3495,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
|
|
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
|
|
|
if (atomic_read(&queue->set_pci_flags_count))
|
|
|
qdio_flags |= QDIO_FLAG_PCI_OUT;
|
|
|
+ atomic_add(count, &queue->used_buffers);
|
|
|
+
|
|
|
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
|
|
|
queue->queue_no, index, count);
|
|
|
if (queue->card->options.performance_stats)
|
|
|
queue->card->perf_stats.outbound_do_qdio_time +=
|
|
|
qeth_get_micros() -
|
|
|
queue->card->perf_stats.outbound_do_qdio_start_time;
|
|
|
- atomic_add(count, &queue->used_buffers);
|
|
|
if (rc) {
|
|
|
queue->card->stats.tx_errors += count;
|
|
|
/* ignore temporary SIGA errors without busy condition */
|
|
@@ -3577,7 +3567,7 @@ static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
|
|
|
{
|
|
|
struct qeth_card *card = (struct qeth_card *)card_ptr;
|
|
|
|
|
|
- if (card->dev && (card->dev->flags & IFF_UP))
|
|
|
+ if (card->dev->flags & IFF_UP)
|
|
|
napi_schedule(&card->napi);
|
|
|
}
|
|
|
|
|
@@ -3841,6 +3831,17 @@ int qeth_get_elements_for_frags(struct sk_buff *skb)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
|
|
|
|
|
|
+static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset)
|
|
|
+{
|
|
|
+ unsigned int elements = qeth_get_elements_for_frags(skb);
|
|
|
+ addr_t end = (addr_t)skb->data + skb_headlen(skb);
|
|
|
+ addr_t start = (addr_t)skb->data + data_offset;
|
|
|
+
|
|
|
+ if (start != end)
|
|
|
+ elements += qeth_get_elements_for_range(start, end);
|
|
|
+ return elements;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* qeth_get_elements_no() - find number of SBALEs for skb data, inc. frags.
|
|
|
* @card: qeth card structure, to check max. elems.
|
|
@@ -3856,12 +3857,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
|
|
|
int qeth_get_elements_no(struct qeth_card *card,
|
|
|
struct sk_buff *skb, int extra_elems, int data_offset)
|
|
|
{
|
|
|
- addr_t end = (addr_t)skb->data + skb_headlen(skb);
|
|
|
- int elements = qeth_get_elements_for_frags(skb);
|
|
|
- addr_t start = (addr_t)skb->data + data_offset;
|
|
|
-
|
|
|
- if (start != end)
|
|
|
- elements += qeth_get_elements_for_range(start, end);
|
|
|
+ int elements = qeth_count_elements(skb, data_offset);
|
|
|
|
|
|
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
|
|
|
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
|
|
@@ -3895,32 +3891,87 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
|
|
|
EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
|
|
|
|
|
|
/**
|
|
|
- * qeth_push_hdr() - push a qeth_hdr onto an skb.
|
|
|
- * @skb: skb that the qeth_hdr should be pushed onto.
|
|
|
+ * qeth_add_hw_header() - add a HW header to an skb.
|
|
|
+ * @skb: skb that the HW header should be added to.
|
|
|
* @hdr: double pointer to a qeth_hdr. When returning with >= 0,
|
|
|
* it contains a valid pointer to a qeth_hdr.
|
|
|
- * @len: length of the hdr that needs to be pushed on.
|
|
|
+ * @hdr_len: length of the HW header.
|
|
|
+ * @proto_len: length of protocol headers that need to be in same page as the
|
|
|
+ * HW header.
|
|
|
*
|
|
|
* Returns the pushed length. If the header can't be pushed on
|
|
|
* (eg. because it would cross a page boundary), it is allocated from
|
|
|
* the cache instead and 0 is returned.
|
|
|
+ * The number of needed buffer elements is returned in @elements.
|
|
|
* Error to create the hdr is indicated by returning with < 0.
|
|
|
*/
|
|
|
-int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len)
|
|
|
-{
|
|
|
- if (skb_headroom(skb) >= len &&
|
|
|
- qeth_get_elements_for_range((addr_t)skb->data - len,
|
|
|
- (addr_t)skb->data) == 1) {
|
|
|
- *hdr = skb_push(skb, len);
|
|
|
- return len;
|
|
|
+int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
|
|
|
+ struct qeth_hdr **hdr, unsigned int hdr_len,
|
|
|
+ unsigned int proto_len, unsigned int *elements)
|
|
|
+{
|
|
|
+ const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
|
|
|
+ const unsigned int contiguous = proto_len ? proto_len : 1;
|
|
|
+ unsigned int __elements;
|
|
|
+ addr_t start, end;
|
|
|
+ bool push_ok;
|
|
|
+ int rc;
|
|
|
+
|
|
|
+check_layout:
|
|
|
+ start = (addr_t)skb->data - hdr_len;
|
|
|
+ end = (addr_t)skb->data;
|
|
|
+
|
|
|
+ if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
|
|
|
+ /* Push HW header into same page as first protocol header. */
|
|
|
+ push_ok = true;
|
|
|
+ __elements = qeth_count_elements(skb, 0);
|
|
|
+ } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) {
|
|
|
+ /* Push HW header into a new page. */
|
|
|
+ push_ok = true;
|
|
|
+ __elements = 1 + qeth_count_elements(skb, 0);
|
|
|
+ } else {
|
|
|
+ /* Use header cache, copy protocol headers up. */
|
|
|
+ push_ok = false;
|
|
|
+ __elements = 1 + qeth_count_elements(skb, proto_len);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Compress skb to fit into one IO buffer: */
|
|
|
+ if (__elements > max_elements) {
|
|
|
+ if (!skb_is_nonlinear(skb)) {
|
|
|
+ /* Drop it, no easy way of shrinking it further. */
|
|
|
+ QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
|
|
|
+ max_elements, __elements, skb->len);
|
|
|
+ return -E2BIG;
|
|
|
+ }
|
|
|
+
|
|
|
+ rc = skb_linearize(skb);
|
|
|
+ if (card->options.performance_stats) {
|
|
|
+ if (rc)
|
|
|
+ card->perf_stats.tx_linfail++;
|
|
|
+ else
|
|
|
+ card->perf_stats.tx_lin++;
|
|
|
+ }
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
+
|
|
|
+ /* Linearization changed the layout, re-evaluate: */
|
|
|
+ goto check_layout;
|
|
|
+ }
|
|
|
+
|
|
|
+ *elements = __elements;
|
|
|
+ /* Add the header: */
|
|
|
+ if (push_ok) {
|
|
|
+ *hdr = skb_push(skb, hdr_len);
|
|
|
+ return hdr_len;
|
|
|
}
|
|
|
/* fall back */
|
|
|
*hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
|
|
|
if (!*hdr)
|
|
|
return -ENOMEM;
|
|
|
+ /* Copy protocol headers behind HW header: */
|
|
|
+ skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
|
|
|
return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(qeth_push_hdr);
|
|
|
+EXPORT_SYMBOL_GPL(qeth_add_hw_header);
|
|
|
|
|
|
static void __qeth_fill_buffer(struct sk_buff *skb,
|
|
|
struct qeth_qdio_out_buffer *buf,
|
|
@@ -4200,24 +4251,6 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
|
|
|
|
|
|
-int qeth_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
-{
|
|
|
- struct qeth_card *card;
|
|
|
- char dbf_text[15];
|
|
|
-
|
|
|
- card = dev->ml_priv;
|
|
|
-
|
|
|
- QETH_CARD_TEXT(card, 4, "chgmtu");
|
|
|
- sprintf(dbf_text, "%8x", new_mtu);
|
|
|
- QETH_CARD_TEXT(card, 4, dbf_text);
|
|
|
-
|
|
|
- if (!qeth_mtu_is_valid(card, new_mtu))
|
|
|
- return -EINVAL;
|
|
|
- dev->mtu = new_mtu;
|
|
|
- return 0;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(qeth_change_mtu);
|
|
|
-
|
|
|
struct net_device_stats *qeth_get_stats(struct net_device *dev)
|
|
|
{
|
|
|
struct qeth_card *card;
|
|
@@ -4793,9 +4826,6 @@ int qeth_vm_request_mac(struct qeth_card *card)
|
|
|
|
|
|
QETH_DBF_TEXT(SETUP, 2, "vmreqmac");
|
|
|
|
|
|
- if (!card->dev)
|
|
|
- return -ENODEV;
|
|
|
-
|
|
|
request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
|
|
|
response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
|
|
|
if (!request || !response) {
|
|
@@ -5675,6 +5705,53 @@ static void qeth_clear_dbf_list(void)
|
|
|
mutex_unlock(&qeth_dbf_list_mutex);
|
|
|
}
|
|
|
|
|
|
+static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
|
|
|
+{
|
|
|
+ struct net_device *dev;
|
|
|
+
|
|
|
+ switch (card->info.type) {
|
|
|
+ case QETH_CARD_TYPE_IQD:
|
|
|
+ dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup);
|
|
|
+ break;
|
|
|
+ case QETH_CARD_TYPE_OSN:
|
|
|
+ dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ dev = alloc_etherdev(0);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!dev)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ dev->ml_priv = card;
|
|
|
+ dev->watchdog_timeo = QETH_TX_TIMEOUT;
|
|
|
+ dev->min_mtu = IS_OSN(card) ? 64 : 576;
|
|
|
+ /* initialized when device first goes online: */
|
|
|
+ dev->max_mtu = 0;
|
|
|
+ dev->mtu = 0;
|
|
|
+ SET_NETDEV_DEV(dev, &card->gdev->dev);
|
|
|
+ netif_carrier_off(dev);
|
|
|
+
|
|
|
+ if (!IS_OSN(card)) {
|
|
|
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
|
|
+ dev->hw_features |= NETIF_F_SG;
|
|
|
+ dev->vlan_features |= NETIF_F_SG;
|
|
|
+ }
|
|
|
+
|
|
|
+ return dev;
|
|
|
+}
|
|
|
+
|
|
|
+struct net_device *qeth_clone_netdev(struct net_device *orig)
|
|
|
+{
|
|
|
+ struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
|
|
|
+
|
|
|
+ if (!clone)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ clone->dev_port = orig->dev_port;
|
|
|
+ return clone;
|
|
|
+}
|
|
|
+
|
|
|
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
|
|
|
{
|
|
|
struct qeth_card *card;
|
|
@@ -5724,6 +5801,10 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
|
|
|
goto err_card;
|
|
|
}
|
|
|
|
|
|
+ card->dev = qeth_alloc_netdev(card);
|
|
|
+ if (!card->dev)
|
|
|
+ goto err_card;
|
|
|
+
|
|
|
qeth_determine_capabilities(card);
|
|
|
enforced_disc = qeth_enforce_discipline(card);
|
|
|
switch (enforced_disc) {
|
|
@@ -5734,7 +5815,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
|
|
|
card->info.layer_enforced = true;
|
|
|
rc = qeth_core_load_discipline(card, enforced_disc);
|
|
|
if (rc)
|
|
|
- goto err_card;
|
|
|
+ goto err_load;
|
|
|
|
|
|
gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
|
|
|
? card->discipline->devtype
|
|
@@ -5752,6 +5833,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
|
|
|
|
|
|
err_disc:
|
|
|
qeth_core_free_discipline(card);
|
|
|
+err_load:
|
|
|
+ free_netdev(card->dev);
|
|
|
err_card:
|
|
|
qeth_core_free_card(card);
|
|
|
err_dev:
|
|
@@ -5774,10 +5857,10 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
|
|
|
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
|
|
|
list_del(&card->list);
|
|
|
write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
|
|
|
+ free_netdev(card->dev);
|
|
|
qeth_core_free_card(card);
|
|
|
dev_set_drvdata(&gdev->dev, NULL);
|
|
|
put_device(&gdev->dev);
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
static int qeth_core_set_online(struct ccwgroup_device *gdev)
|
|
@@ -5955,7 +6038,7 @@ static struct {
|
|
|
{"tx skbs packing"},
|
|
|
{"tx buffers packing"},
|
|
|
{"tx sg skbs"},
|
|
|
- {"tx sg frags"},
|
|
|
+ {"tx buffer elements"},
|
|
|
/* 10 */{"rx sg skbs"},
|
|
|
{"rx sg frags"},
|
|
|
{"rx sg page allocs"},
|
|
@@ -6014,7 +6097,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
|
|
|
data[6] = card->perf_stats.skbs_sent_pack;
|
|
|
data[7] = card->perf_stats.bufs_sent_pack;
|
|
|
data[8] = card->perf_stats.sg_skbs_sent;
|
|
|
- data[9] = card->perf_stats.sg_frags_sent;
|
|
|
+ data[9] = card->perf_stats.buf_elements_sent;
|
|
|
data[10] = card->perf_stats.sg_skbs_rx;
|
|
|
data[11] = card->perf_stats.sg_frags_rx;
|
|
|
data[12] = card->perf_stats.sg_alloc_page_rx;
|