|
@@ -111,12 +111,71 @@ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+struct qed_mcp_cmd_elem {
|
|
|
+ struct list_head list;
|
|
|
+ struct qed_mcp_mb_params *p_mb_params;
|
|
|
+ u16 expected_seq_num;
|
|
|
+ bool b_is_completed;
|
|
|
+};
|
|
|
+
|
|
|
+/* Must be called while cmd_lock is acquired */
|
|
|
+static struct qed_mcp_cmd_elem *
|
|
|
+qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
|
|
|
+ struct qed_mcp_mb_params *p_mb_params,
|
|
|
+ u16 expected_seq_num)
|
|
|
+{
|
|
|
+ struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
|
|
|
+
|
|
|
+ p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
|
|
|
+ if (!p_cmd_elem)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ p_cmd_elem->p_mb_params = p_mb_params;
|
|
|
+ p_cmd_elem->expected_seq_num = expected_seq_num;
|
|
|
+ list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
|
|
|
+out:
|
|
|
+ return p_cmd_elem;
|
|
|
+}
|
|
|
+
|
|
|
+/* Must be called while cmd_lock is acquired */
|
|
|
+static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
|
|
|
+ struct qed_mcp_cmd_elem *p_cmd_elem)
|
|
|
+{
|
|
|
+ list_del(&p_cmd_elem->list);
|
|
|
+ kfree(p_cmd_elem);
|
|
|
+}
|
|
|
+
|
|
|
+/* Must be called while cmd_lock is acquired */
|
|
|
+static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
|
|
|
+ u16 seq_num)
|
|
|
+{
|
|
|
+ struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
|
|
|
+
|
|
|
+ list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
|
|
|
+ if (p_cmd_elem->expected_seq_num == seq_num)
|
|
|
+ return p_cmd_elem;
|
|
|
+ }
|
|
|
+
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
int qed_mcp_free(struct qed_hwfn *p_hwfn)
|
|
|
{
|
|
|
if (p_hwfn->mcp_info) {
|
|
|
+ struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
|
|
|
+
|
|
|
kfree(p_hwfn->mcp_info->mfw_mb_cur);
|
|
|
kfree(p_hwfn->mcp_info->mfw_mb_shadow);
|
|
|
+
|
|
|
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
|
|
|
+ list_for_each_entry_safe(p_cmd_elem,
|
|
|
+ p_tmp,
|
|
|
+ &p_hwfn->mcp_info->cmd_list, list) {
|
|
|
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
|
|
|
+ }
|
|
|
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
|
|
|
}
|
|
|
+
|
|
|
kfree(p_hwfn->mcp_info);
|
|
|
|
|
|
return 0;
|
|
@@ -160,7 +219,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|
|
p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
|
|
|
DRV_PULSE_SEQ_MASK;
|
|
|
|
|
|
- p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
|
|
|
+ p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -176,6 +235,12 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|
|
goto err;
|
|
|
p_info = p_hwfn->mcp_info;
|
|
|
|
|
|
+ /* Initialize the MFW spinlock */
|
|
|
+ spin_lock_init(&p_info->cmd_lock);
|
|
|
+ spin_lock_init(&p_info->link_lock);
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&p_info->cmd_list);
|
|
|
+
|
|
|
if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
|
|
|
DP_NOTICE(p_hwfn, "MCP is not initialized\n");
|
|
|
/* Do not free mcp_info here, since public_base indicate that
|
|
@@ -190,10 +255,6 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|
|
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
|
|
|
goto err;
|
|
|
|
|
|
- /* Initialize the MFW spinlock */
|
|
|
- spin_lock_init(&p_info->lock);
|
|
|
- spin_lock_init(&p_info->link_lock);
|
|
|
-
|
|
|
return 0;
|
|
|
|
|
|
err:
|
|
@@ -201,68 +262,39 @@ err:
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
-/* Locks the MFW mailbox of a PF to ensure a single access.
|
|
|
- * The lock is achieved in most cases by holding a spinlock, causing other
|
|
|
- * threads to wait till a previous access is done.
|
|
|
- * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
|
|
|
- * access is achieved by setting a blocking flag, which will fail other
|
|
|
- * competing contexts to send their mailboxes.
|
|
|
- */
|
|
|
-static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
|
|
|
+static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
|
|
|
+ struct qed_ptt *p_ptt)
|
|
|
{
|
|
|
- spin_lock_bh(&p_hwfn->mcp_info->lock);
|
|
|
-
|
|
|
- /* The spinlock shouldn't be acquired when the mailbox command is
|
|
|
- * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
|
|
|
- * pending [UN]LOAD_REQ command of another PF together with a spinlock
|
|
|
- * (i.e. interrupts are disabled) - can lead to a deadlock.
|
|
|
- * It is assumed that for a single PF, no other mailbox commands can be
|
|
|
- * sent from another context while sending LOAD_REQ, and that any
|
|
|
- * parallel commands to UNLOAD_REQ can be cancelled.
|
|
|
- */
|
|
|
- if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
|
|
|
- p_hwfn->mcp_info->block_mb_sending = false;
|
|
|
+ u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
|
|
|
|
|
|
- if (p_hwfn->mcp_info->block_mb_sending) {
|
|
|
- DP_NOTICE(p_hwfn,
|
|
|
- "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
|
|
|
- cmd);
|
|
|
- spin_unlock_bh(&p_hwfn->mcp_info->lock);
|
|
|
- return -EBUSY;
|
|
|
- }
|
|
|
+ /* Use MCP history register to check if MCP reset occurred between init
|
|
|
+ * time and now.
|
|
|
+ */
|
|
|
+ if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
|
|
|
+ DP_VERBOSE(p_hwfn,
|
|
|
+ QED_MSG_SP,
|
|
|
+ "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
|
|
|
+ p_hwfn->mcp_info->mcp_hist, generic_por_0);
|
|
|
|
|
|
- if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
|
|
|
- p_hwfn->mcp_info->block_mb_sending = true;
|
|
|
- spin_unlock_bh(&p_hwfn->mcp_info->lock);
|
|
|
+ qed_load_mcp_offsets(p_hwfn, p_ptt);
|
|
|
+ qed_mcp_cmd_port_init(p_hwfn, p_ptt);
|
|
|
}
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
|
|
|
-{
|
|
|
- if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
|
|
|
- spin_unlock_bh(&p_hwfn->mcp_info->lock);
|
|
|
}
|
|
|
|
|
|
int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|
|
{
|
|
|
- u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
|
|
|
- u8 delay = CHIP_MCP_RESP_ITER_US;
|
|
|
- u32 org_mcp_reset_seq, cnt = 0;
|
|
|
+ u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
|
|
|
int rc = 0;
|
|
|
|
|
|
- /* Ensure that only a single thread is accessing the mailbox at a
|
|
|
- * certain time.
|
|
|
- */
|
|
|
- rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
|
|
|
- if (rc != 0)
|
|
|
- return rc;
|
|
|
+ /* Ensure that only a single thread is accessing the mailbox */
|
|
|
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
|
|
|
|
|
|
- /* Set drv command along with the updated sequence */
|
|
|
org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
|
|
|
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
|
|
|
- (DRV_MSG_CODE_MCP_RESET | seq));
|
|
|
+
|
|
|
+ /* Set drv command along with the updated sequence */
|
|
|
+ qed_mcp_reread_offsets(p_hwfn, p_ptt);
|
|
|
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
|
|
|
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
|
|
|
|
|
|
do {
|
|
|
/* Wait for MFW response */
|
|
@@ -281,72 +313,205 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|
|
rc = -EAGAIN;
|
|
|
}
|
|
|
|
|
|
- qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
|
|
|
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
|
|
|
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
-static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
|
|
|
- struct qed_ptt *p_ptt,
|
|
|
- u32 cmd,
|
|
|
- u32 param,
|
|
|
- u32 *o_mcp_resp,
|
|
|
- u32 *o_mcp_param)
|
|
|
+/* Must be called while cmd_lock is acquired */
|
|
|
+static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
|
|
|
{
|
|
|
- u8 delay = CHIP_MCP_RESP_ITER_US;
|
|
|
- u32 seq, cnt = 1, actual_mb_seq;
|
|
|
- int rc = 0;
|
|
|
-
|
|
|
- /* Get actual driver mailbox sequence */
|
|
|
- actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
|
|
|
- DRV_MSG_SEQ_NUMBER_MASK;
|
|
|
+ struct qed_mcp_cmd_elem *p_cmd_elem;
|
|
|
|
|
|
- /* Use MCP history register to check if MCP reset occurred between
|
|
|
- * init time and now.
|
|
|
+ /* There is at most one pending command at a certain time, and if it
|
|
|
+ * exists - it is placed at the HEAD of the list.
|
|
|
*/
|
|
|
- if (p_hwfn->mcp_info->mcp_hist !=
|
|
|
- qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
|
|
|
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
|
|
|
- qed_load_mcp_offsets(p_hwfn, p_ptt);
|
|
|
- qed_mcp_cmd_port_init(p_hwfn, p_ptt);
|
|
|
+ if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
|
|
|
+ p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
|
|
|
+ struct qed_mcp_cmd_elem, list);
|
|
|
+ return !p_cmd_elem->b_is_completed;
|
|
|
}
|
|
|
- seq = ++p_hwfn->mcp_info->drv_mb_seq;
|
|
|
|
|
|
- /* Set drv param */
|
|
|
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
|
|
|
+ return false;
|
|
|
+}
|
|
|
|
|
|
- /* Set drv command along with the updated sequence */
|
|
|
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
|
|
|
+/* Must be called while cmd_lock is acquired */
|
|
|
+static int
|
|
|
+qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|
|
+{
|
|
|
+ struct qed_mcp_mb_params *p_mb_params;
|
|
|
+ struct qed_mcp_cmd_elem *p_cmd_elem;
|
|
|
+ u32 mcp_resp;
|
|
|
+ u16 seq_num;
|
|
|
+
|
|
|
+ mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
|
|
|
+ seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
|
|
|
+
|
|
|
+ /* Return if no new non-handled response has been received */
|
|
|
+ if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
|
|
|
+ return -EAGAIN;
|
|
|
+
|
|
|
+ p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
|
|
|
+ if (!p_cmd_elem) {
|
|
|
+ DP_ERR(p_hwfn,
|
|
|
+ "Failed to find a pending mailbox cmd that expects sequence number %d\n",
|
|
|
+ seq_num);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ p_mb_params = p_cmd_elem->p_mb_params;
|
|
|
+
|
|
|
+ /* Get the MFW response along with the sequence number */
|
|
|
+ p_mb_params->mcp_resp = mcp_resp;
|
|
|
+
|
|
|
+ /* Get the MFW param */
|
|
|
+ p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
|
|
|
+
|
|
|
+ /* Get the union data */
|
|
|
+ if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
|
|
|
+ u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
|
|
|
+ offsetof(struct public_drv_mb,
|
|
|
+ union_data);
|
|
|
+ qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
|
|
|
+ union_data_addr, p_mb_params->data_dst_size);
|
|
|
+ }
|
|
|
+
|
|
|
+ p_cmd_elem->b_is_completed = true;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/* Must be called while cmd_lock is acquired */
|
|
|
+static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
|
|
|
+ struct qed_ptt *p_ptt,
|
|
|
+ struct qed_mcp_mb_params *p_mb_params,
|
|
|
+ u16 seq_num)
|
|
|
+{
|
|
|
+ union drv_union_data union_data;
|
|
|
+ u32 union_data_addr;
|
|
|
+
|
|
|
+ /* Set the union data */
|
|
|
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
|
|
|
+ offsetof(struct public_drv_mb, union_data);
|
|
|
+ memset(&union_data, 0, sizeof(union_data));
|
|
|
+ if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
|
|
|
+ memcpy(&union_data, p_mb_params->p_data_src,
|
|
|
+ p_mb_params->data_src_size);
|
|
|
+ qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
|
|
|
+ sizeof(union_data));
|
|
|
+
|
|
|
+ /* Set the drv param */
|
|
|
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
|
|
|
+
|
|
|
+ /* Set the drv command along with the sequence number */
|
|
|
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
|
|
|
|
|
|
DP_VERBOSE(p_hwfn, QED_MSG_SP,
|
|
|
- "wrote command (%x) to MFW MB param 0x%08x\n",
|
|
|
- (cmd | seq), param);
|
|
|
+ "MFW mailbox: command 0x%08x param 0x%08x\n",
|
|
|
+ (p_mb_params->cmd | seq_num), p_mb_params->param);
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
|
|
|
+ struct qed_ptt *p_ptt,
|
|
|
+ struct qed_mcp_mb_params *p_mb_params,
|
|
|
+ u32 max_retries, u32 delay)
|
|
|
+{
|
|
|
+ struct qed_mcp_cmd_elem *p_cmd_elem;
|
|
|
+ u32 cnt = 0;
|
|
|
+ u16 seq_num;
|
|
|
+ int rc = 0;
|
|
|
|
|
|
+ /* Wait until the mailbox is non-occupied */
|
|
|
do {
|
|
|
- /* Wait for MFW response */
|
|
|
+ /* Exit the loop if there is no pending command, or if the
|
|
|
+ * pending command is completed during this iteration.
|
|
|
+ * The spinlock stays locked until the command is sent.
|
|
|
+ */
|
|
|
+
|
|
|
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
|
|
|
+
|
|
|
+ if (!qed_mcp_has_pending_cmd(p_hwfn))
|
|
|
+ break;
|
|
|
+
|
|
|
+ rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
|
|
|
+ if (!rc)
|
|
|
+ break;
|
|
|
+ else if (rc != -EAGAIN)
|
|
|
+ goto err;
|
|
|
+
|
|
|
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
|
|
|
udelay(delay);
|
|
|
- *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
|
|
|
+ } while (++cnt < max_retries);
|
|
|
+
|
|
|
+ if (cnt >= max_retries) {
|
|
|
+ DP_NOTICE(p_hwfn,
|
|
|
+ "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
|
|
|
+ p_mb_params->cmd, p_mb_params->param);
|
|
|
+ return -EAGAIN;
|
|
|
+ }
|
|
|
|
|
|
- /* Give the FW up to 5 second (500*10ms) */
|
|
|
- } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
|
|
|
- (cnt++ < QED_DRV_MB_MAX_RETRIES));
|
|
|
+ /* Send the mailbox command */
|
|
|
+ qed_mcp_reread_offsets(p_hwfn, p_ptt);
|
|
|
+ seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
|
|
|
+ p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
|
|
|
+ if (!p_cmd_elem)
|
|
|
+ goto err;
|
|
|
|
|
|
- DP_VERBOSE(p_hwfn, QED_MSG_SP,
|
|
|
- "[after %d ms] read (%x) seq is (%x) from FW MB\n",
|
|
|
- cnt * delay, *o_mcp_resp, seq);
|
|
|
-
|
|
|
- /* Is this a reply to our command? */
|
|
|
- if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
|
|
|
- *o_mcp_resp &= FW_MSG_CODE_MASK;
|
|
|
- /* Get the MCP param */
|
|
|
- *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
|
|
|
- } else {
|
|
|
- /* FW BUG! */
|
|
|
- DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
|
|
|
- cmd, param);
|
|
|
- *o_mcp_resp = 0;
|
|
|
- rc = -EAGAIN;
|
|
|
+ __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
|
|
|
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
|
|
|
+
|
|
|
+ /* Wait for the MFW response */
|
|
|
+ do {
|
|
|
+ /* Exit the loop if the command is already completed, or if the
|
|
|
+ * command is completed during this iteration.
|
|
|
+ * The spinlock stays locked until the list element is removed.
|
|
|
+ */
|
|
|
+
|
|
|
+ udelay(delay);
|
|
|
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
|
|
|
+
|
|
|
+ if (p_cmd_elem->b_is_completed)
|
|
|
+ break;
|
|
|
+
|
|
|
+ rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
|
|
|
+ if (!rc)
|
|
|
+ break;
|
|
|
+ else if (rc != -EAGAIN)
|
|
|
+ goto err;
|
|
|
+
|
|
|
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
|
|
|
+ } while (++cnt < max_retries);
|
|
|
+
|
|
|
+ if (cnt >= max_retries) {
|
|
|
+ DP_NOTICE(p_hwfn,
|
|
|
+ "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
|
|
|
+ p_mb_params->cmd, p_mb_params->param);
|
|
|
+
|
|
|
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
|
|
|
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
|
|
|
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
|
|
|
+
|
|
|
+ return -EAGAIN;
|
|
|
}
|
|
|
+
|
|
|
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
|
|
|
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
|
|
|
+
|
|
|
+ DP_VERBOSE(p_hwfn,
|
|
|
+ QED_MSG_SP,
|
|
|
+ "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
|
|
|
+ p_mb_params->mcp_resp,
|
|
|
+ p_mb_params->mcp_param,
|
|
|
+ (cnt * delay) / 1000, (cnt * delay) % 1000);
|
|
|
+
|
|
|
+ /* Clear the sequence number from the MFW response */
|
|
|
+ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+err:
|
|
|
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
@@ -354,9 +519,9 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
|
|
|
struct qed_ptt *p_ptt,
|
|
|
struct qed_mcp_mb_params *p_mb_params)
|
|
|
{
|
|
|
- u32 union_data_addr;
|
|
|
-
|
|
|
- int rc;
|
|
|
+ size_t union_data_size = sizeof(union drv_union_data);
|
|
|
+ u32 max_retries = QED_DRV_MB_MAX_RETRIES;
|
|
|
+ u32 delay = CHIP_MCP_RESP_ITER_US;
|
|
|
|
|
|
/* MCP not initialized */
|
|
|
if (!qed_mcp_is_init(p_hwfn)) {
|
|
@@ -364,33 +529,17 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
|
|
|
return -EBUSY;
|
|
|
}
|
|
|
|
|
|
- union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
|
|
|
- offsetof(struct public_drv_mb, union_data);
|
|
|
-
|
|
|
- /* Ensure that only a single thread is accessing the mailbox at a
|
|
|
- * certain time.
|
|
|
- */
|
|
|
- rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
|
|
|
- if (rc)
|
|
|
- return rc;
|
|
|
-
|
|
|
- if (p_mb_params->p_data_src != NULL)
|
|
|
- qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
|
|
|
- p_mb_params->p_data_src,
|
|
|
- sizeof(*p_mb_params->p_data_src));
|
|
|
-
|
|
|
- rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
|
|
|
- p_mb_params->param, &p_mb_params->mcp_resp,
|
|
|
- &p_mb_params->mcp_param);
|
|
|
-
|
|
|
- if (p_mb_params->p_data_dst != NULL)
|
|
|
- qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
|
|
|
- union_data_addr,
|
|
|
- sizeof(*p_mb_params->p_data_dst));
|
|
|
-
|
|
|
- qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
|
|
|
+ if (p_mb_params->data_src_size > union_data_size ||
|
|
|
+ p_mb_params->data_dst_size > union_data_size) {
|
|
|
+ DP_ERR(p_hwfn,
|
|
|
+ "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
|
|
|
+ p_mb_params->data_src_size,
|
|
|
+ p_mb_params->data_dst_size, union_data_size);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
|
|
|
- return rc;
|
|
|
+ return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
|
|
|
+ delay);
|
|
|
}
|
|
|
|
|
|
int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
|
|
@@ -401,11 +550,10 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
|
|
|
u32 *o_mcp_param)
|
|
|
{
|
|
|
struct qed_mcp_mb_params mb_params;
|
|
|
- union drv_union_data data_src;
|
|
|
+ struct mcp_mac wol_mac;
|
|
|
int rc;
|
|
|
|
|
|
memset(&mb_params, 0, sizeof(mb_params));
|
|
|
- memset(&data_src, 0, sizeof(data_src));
|
|
|
mb_params.cmd = cmd;
|
|
|
mb_params.param = param;
|
|
|
|
|
@@ -414,17 +562,18 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
|
|
|
(p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) {
|
|
|
u8 *p_mac = p_hwfn->cdev->wol_mac;
|
|
|
|
|
|
- data_src.wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
|
|
|
- data_src.wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
|
|
|
- p_mac[4] << 8 | p_mac[5];
|
|
|
+ memset(&wol_mac, 0, sizeof(wol_mac));
|
|
|
+ wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
|
|
|
+ wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
|
|
|
+ p_mac[4] << 8 | p_mac[5];
|
|
|
|
|
|
DP_VERBOSE(p_hwfn,
|
|
|
(QED_MSG_SP | NETIF_MSG_IFDOWN),
|
|
|
"Setting WoL MAC: %pM --> [%08x,%08x]\n",
|
|
|
- p_mac, data_src.wol_mac.mac_upper,
|
|
|
- data_src.wol_mac.mac_lower);
|
|
|
+ p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
|
|
|
|
|
|
- mb_params.p_data_src = &data_src;
|
|
|
+ mb_params.p_data_src = &wol_mac;
|
|
|
+ mb_params.data_src_size = sizeof(wol_mac);
|
|
|
}
|
|
|
|
|
|
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
|
|
@@ -445,13 +594,17 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
|
|
|
u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
|
|
|
{
|
|
|
struct qed_mcp_mb_params mb_params;
|
|
|
- union drv_union_data union_data;
|
|
|
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
|
|
|
int rc;
|
|
|
|
|
|
memset(&mb_params, 0, sizeof(mb_params));
|
|
|
mb_params.cmd = cmd;
|
|
|
mb_params.param = param;
|
|
|
- mb_params.p_data_dst = &union_data;
|
|
|
+ mb_params.p_data_dst = raw_data;
|
|
|
+
|
|
|
+ /* Use the maximal value since the actual one is part of the response */
|
|
|
+ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
|
|
|
+
|
|
|
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
|
|
|
if (rc)
|
|
|
return rc;
|
|
@@ -460,7 +613,7 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
|
|
|
*o_mcp_param = mb_params.mcp_param;
|
|
|
|
|
|
*o_txn_size = *o_mcp_param;
|
|
|
- memcpy(o_buf, &union_data.raw_data, *o_txn_size);
|
|
|
+ memcpy(o_buf, raw_data, *o_txn_size);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -480,6 +633,7 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
|
|
|
cdev->drv_type;
|
|
|
memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
|
|
|
mb_params.p_data_src = &union_data;
|
|
|
+ mb_params.data_src_size = sizeof(union_data.ver_str);
|
|
|
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
|
|
|
|
|
|
/* if mcp fails to respond we must abort */
|
|
@@ -549,7 +703,6 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
|
|
|
u32 func_addr = SECTION_ADDR(mfw_func_offsize,
|
|
|
MCP_PF_ID(p_hwfn));
|
|
|
struct qed_mcp_mb_params mb_params;
|
|
|
- union drv_union_data union_data;
|
|
|
int rc;
|
|
|
int i;
|
|
|
|
|
@@ -560,8 +713,8 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
|
|
|
|
|
|
memset(&mb_params, 0, sizeof(mb_params));
|
|
|
mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
|
|
|
- memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
|
|
|
- mb_params.p_data_src = &union_data;
|
|
|
+ mb_params.p_data_src = vfs_to_ack;
|
|
|
+ mb_params.data_src_size = VF_MAX_STATIC / 8;
|
|
|
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
|
|
|
if (rc) {
|
|
|
DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
|
|
@@ -744,33 +897,31 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
|
|
|
{
|
|
|
struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
|
|
|
struct qed_mcp_mb_params mb_params;
|
|
|
- union drv_union_data union_data;
|
|
|
- struct eth_phy_cfg *phy_cfg;
|
|
|
+ struct eth_phy_cfg phy_cfg;
|
|
|
int rc = 0;
|
|
|
u32 cmd;
|
|
|
|
|
|
/* Set the shmem configuration according to params */
|
|
|
- phy_cfg = &union_data.drv_phy_cfg;
|
|
|
- memset(phy_cfg, 0, sizeof(*phy_cfg));
|
|
|
+ memset(&phy_cfg, 0, sizeof(phy_cfg));
|
|
|
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
|
|
|
if (!params->speed.autoneg)
|
|
|
- phy_cfg->speed = params->speed.forced_speed;
|
|
|
- phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
|
|
|
- phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
|
|
|
- phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
|
|
|
- phy_cfg->adv_speed = params->speed.advertised_speeds;
|
|
|
- phy_cfg->loopback_mode = params->loopback_mode;
|
|
|
+ phy_cfg.speed = params->speed.forced_speed;
|
|
|
+ phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
|
|
|
+ phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
|
|
|
+ phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
|
|
|
+ phy_cfg.adv_speed = params->speed.advertised_speeds;
|
|
|
+ phy_cfg.loopback_mode = params->loopback_mode;
|
|
|
|
|
|
p_hwfn->b_drv_link_init = b_up;
|
|
|
|
|
|
if (b_up) {
|
|
|
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
|
|
|
"Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
|
|
|
- phy_cfg->speed,
|
|
|
- phy_cfg->pause,
|
|
|
- phy_cfg->adv_speed,
|
|
|
- phy_cfg->loopback_mode,
|
|
|
- phy_cfg->feature_config_flags);
|
|
|
+ phy_cfg.speed,
|
|
|
+ phy_cfg.pause,
|
|
|
+ phy_cfg.adv_speed,
|
|
|
+ phy_cfg.loopback_mode,
|
|
|
+ phy_cfg.feature_config_flags);
|
|
|
} else {
|
|
|
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
|
|
|
"Resetting link\n");
|
|
@@ -778,7 +929,8 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
|
|
|
|
|
|
memset(&mb_params, 0, sizeof(mb_params));
|
|
|
mb_params.cmd = cmd;
|
|
|
- mb_params.p_data_src = &union_data;
|
|
|
+ mb_params.p_data_src = &phy_cfg;
|
|
|
+ mb_params.data_src_size = sizeof(phy_cfg);
|
|
|
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
|
|
|
|
|
|
/* if mcp fails to respond we must abort */
|
|
@@ -805,7 +957,6 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
|
|
|
enum qed_mcp_protocol_type stats_type;
|
|
|
union qed_mcp_protocol_stats stats;
|
|
|
struct qed_mcp_mb_params mb_params;
|
|
|
- union drv_union_data union_data;
|
|
|
u32 hsi_param;
|
|
|
|
|
|
switch (type) {
|
|
@@ -835,8 +986,8 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
|
|
|
memset(&mb_params, 0, sizeof(mb_params));
|
|
|
mb_params.cmd = DRV_MSG_CODE_GET_STATS;
|
|
|
mb_params.param = hsi_param;
|
|
|
- memcpy(&union_data, &stats, sizeof(stats));
|
|
|
- mb_params.p_data_src = &union_data;
|
|
|
+ mb_params.p_data_src = &stats;
|
|
|
+ mb_params.data_src_size = sizeof(stats);
|
|
|
qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
|
|
|
}
|
|
|
|
|
@@ -963,7 +1114,7 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
|
|
|
qed_mcp_update_bw(p_hwfn, p_ptt);
|
|
|
break;
|
|
|
default:
|
|
|
- DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
|
|
|
+ DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
|
|
|
rc = -EINVAL;
|
|
|
}
|
|
|
}
|
|
@@ -1316,24 +1467,23 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
|
|
|
struct qed_ptt *p_ptt,
|
|
|
struct qed_mcp_drv_version *p_ver)
|
|
|
{
|
|
|
- struct drv_version_stc *p_drv_version;
|
|
|
struct qed_mcp_mb_params mb_params;
|
|
|
- union drv_union_data union_data;
|
|
|
+ struct drv_version_stc drv_version;
|
|
|
__be32 val;
|
|
|
u32 i;
|
|
|
int rc;
|
|
|
|
|
|
- p_drv_version = &union_data.drv_version;
|
|
|
- p_drv_version->version = p_ver->version;
|
|
|
-
|
|
|
+ memset(&drv_version, 0, sizeof(drv_version));
|
|
|
+ drv_version.version = p_ver->version;
|
|
|
for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
|
|
|
val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
|
|
|
- *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
|
|
|
+ *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
|
|
|
}
|
|
|
|
|
|
memset(&mb_params, 0, sizeof(mb_params));
|
|
|
mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
|
|
|
- mb_params.p_data_src = &union_data;
|
|
|
+ mb_params.p_data_src = &drv_version;
|
|
|
+ mb_params.data_src_size = sizeof(drv_version);
|
|
|
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
|
|
|
if (rc)
|
|
|
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
|
|
@@ -1450,7 +1600,7 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
|
|
|
struct qed_ptt *p_ptt, u8 *mac)
|
|
|
{
|
|
|
struct qed_mcp_mb_params mb_params;
|
|
|
- union drv_union_data union_data;
|
|
|
+ u32 mfw_mac[2];
|
|
|
int rc;
|
|
|
|
|
|
memset(&mb_params, 0, sizeof(mb_params));
|
|
@@ -1458,8 +1608,17 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
|
|
|
mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
|
|
|
DRV_MSG_CODE_VMAC_TYPE_SHIFT;
|
|
|
mb_params.param |= MCP_PF_ID(p_hwfn);
|
|
|
- ether_addr_copy(&union_data.raw_data[0], mac);
|
|
|
- mb_params.p_data_src = &union_data;
|
|
|
+
|
|
|
+ /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
|
|
|
+ * in 32-bit granularity.
|
|
|
+ * So the MAC has to be set in native order [and not byte order],
|
|
|
+ * otherwise it would be read incorrectly by MFW after swap.
|
|
|
+ */
|
|
|
+ mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
|
|
|
+ mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
|
|
|
+
|
|
|
+ mb_params.p_data_src = (u8 *)mfw_mac;
|
|
|
+ mb_params.data_src_size = 8;
|
|
|
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
|
|
|
if (rc)
|
|
|
DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
|
|
@@ -1737,27 +1896,21 @@ int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
|
|
|
u32 *p_mcp_resp, u32 *p_mcp_param)
|
|
|
{
|
|
|
struct qed_mcp_mb_params mb_params;
|
|
|
- union drv_union_data union_data;
|
|
|
int rc;
|
|
|
|
|
|
memset(&mb_params, 0, sizeof(mb_params));
|
|
|
- memset(&union_data, 0, sizeof(union_data));
|
|
|
mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
|
|
|
mb_params.param = QED_RESC_ALLOC_VERSION;
|
|
|
|
|
|
- /* Need to have a sufficient large struct, as the cmd_and_union
|
|
|
- * is going to do memcpy from and to it.
|
|
|
- */
|
|
|
- memcpy(&union_data.resource, p_resc_info, sizeof(*p_resc_info));
|
|
|
-
|
|
|
- mb_params.p_data_src = &union_data;
|
|
|
- mb_params.p_data_dst = &union_data;
|
|
|
+ mb_params.p_data_src = p_resc_info;
|
|
|
+ mb_params.data_src_size = sizeof(*p_resc_info);
|
|
|
+ mb_params.p_data_dst = p_resc_info;
|
|
|
+ mb_params.data_dst_size = sizeof(*p_resc_info);
|
|
|
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
|
|
|
if (rc)
|
|
|
return rc;
|
|
|
|
|
|
/* Copy the data back */
|
|
|
- memcpy(p_resc_info, &union_data.resource, sizeof(*p_resc_info));
|
|
|
*p_mcp_resp = mb_params.mcp_resp;
|
|
|
*p_mcp_param = mb_params.mcp_param;
|
|
|
|