|
@@ -618,6 +618,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
|
case WMI_TLV_TDLS_PEER_EVENTID:
|
|
case WMI_TLV_TDLS_PEER_EVENTID:
|
|
ath10k_wmi_event_tdls_peer(ar, skb);
|
|
ath10k_wmi_event_tdls_peer(ar, skb);
|
|
break;
|
|
break;
|
|
|
|
+ case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
|
|
|
|
+ ath10k_wmi_event_mgmt_tx_compl(ar, skb);
|
|
|
|
+ break;
|
|
default:
|
|
default:
|
|
ath10k_warn(ar, "Unknown eventid: %d\n", id);
|
|
ath10k_warn(ar, "Unknown eventid: %d\n", id);
|
|
break;
|
|
break;
|
|
@@ -659,6 +662,31 @@ static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int
|
|
|
|
+ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
|
|
|
|
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
|
|
|
|
+{
|
|
|
|
+ const void **tb;
|
|
|
|
+ const struct wmi_tlv_mgmt_tx_compl_ev *ev;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
|
|
|
+ if (IS_ERR(tb)) {
|
|
|
|
+ ret = PTR_ERR(tb);
|
|
|
|
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
|
|
|
|
+
|
|
|
|
+ arg->desc_id = ev->desc_id;
|
|
|
|
+ arg->status = ev->status;
|
|
|
|
+ arg->pdev_id = ev->pdev_id;
|
|
|
|
+
|
|
|
|
+ kfree(tb);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
|
|
static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
|
|
struct sk_buff *skb,
|
|
struct sk_buff *skb,
|
|
struct wmi_mgmt_rx_ev_arg *arg)
|
|
struct wmi_mgmt_rx_ev_arg *arg)
|
|
@@ -2612,6 +2640,30 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
|
|
return skb;
|
|
return skb;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int
|
|
|
|
+ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
|
|
|
|
+ dma_addr_t paddr)
|
|
|
|
+{
|
|
|
|
+ struct ath10k_wmi *wmi = &ar->wmi;
|
|
|
|
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC);
|
|
|
|
+ if (!pkt_addr)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ pkt_addr->vaddr = skb;
|
|
|
|
+ pkt_addr->paddr = paddr;
|
|
|
|
+
|
|
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
|
|
+ ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0,
|
|
|
|
+ wmi->mgmt_max_num_pending_tx, GFP_ATOMIC);
|
|
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
|
|
+
|
|
|
|
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret);
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
static struct sk_buff *
|
|
static struct sk_buff *
|
|
ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
|
|
ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
|
|
dma_addr_t paddr)
|
|
dma_addr_t paddr)
|
|
@@ -2623,9 +2675,9 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
|
|
u32 buf_len = msdu->len;
|
|
u32 buf_len = msdu->len;
|
|
struct wmi_tlv *tlv;
|
|
struct wmi_tlv *tlv;
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
|
|
+ int len, desc_id;
|
|
u32 vdev_id;
|
|
u32 vdev_id;
|
|
void *ptr;
|
|
void *ptr;
|
|
- int len;
|
|
|
|
|
|
|
|
if (!cb->vif)
|
|
if (!cb->vif)
|
|
return ERR_PTR(-EINVAL);
|
|
return ERR_PTR(-EINVAL);
|
|
@@ -2656,13 +2708,17 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
|
|
if (!skb)
|
|
if (!skb)
|
|
return ERR_PTR(-ENOMEM);
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
+ desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr);
|
|
|
|
+ if (desc_id < 0)
|
|
|
|
+ goto err_free_skb;
|
|
|
|
+
|
|
ptr = (void *)skb->data;
|
|
ptr = (void *)skb->data;
|
|
tlv = ptr;
|
|
tlv = ptr;
|
|
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
|
|
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
|
|
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
cmd = (void *)tlv->value;
|
|
cmd = (void *)tlv->value;
|
|
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
- cmd->desc_id = 0;
|
|
|
|
|
|
+ cmd->desc_id = __cpu_to_le32(desc_id);
|
|
cmd->chanfreq = 0;
|
|
cmd->chanfreq = 0;
|
|
cmd->buf_len = __cpu_to_le32(buf_len);
|
|
cmd->buf_len = __cpu_to_le32(buf_len);
|
|
cmd->frame_len = __cpu_to_le32(msdu->len);
|
|
cmd->frame_len = __cpu_to_le32(msdu->len);
|
|
@@ -2679,6 +2735,10 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
|
|
memcpy(ptr, msdu->data, buf_len);
|
|
memcpy(ptr, msdu->data, buf_len);
|
|
|
|
|
|
return skb;
|
|
return skb;
|
|
|
|
+
|
|
|
|
+err_free_skb:
|
|
|
|
+ dev_kfree_skb(skb);
|
|
|
|
+ return ERR_PTR(desc_id);
|
|
}
|
|
}
|
|
|
|
|
|
static struct sk_buff *
|
|
static struct sk_buff *
|
|
@@ -3843,6 +3903,7 @@ static const struct wmi_ops wmi_tlv_ops = {
|
|
|
|
|
|
.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
|
|
.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
|
|
.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
|
|
.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
|
|
|
|
+ .pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
|
|
.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
|
|
.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
|
|
.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
|
|
.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
|
|
.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
|
|
.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
|