|
@@ -117,87 +117,7 @@ static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-/* VFOP - VF slow-path operation support */
|
|
|
-
|
|
|
-#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
|
|
|
-
|
|
|
/* VFOP operations states */
|
|
|
-enum bnx2x_vfop_qctor_state {
|
|
|
- BNX2X_VFOP_QCTOR_INIT,
|
|
|
- BNX2X_VFOP_QCTOR_SETUP,
|
|
|
- BNX2X_VFOP_QCTOR_INT_EN
|
|
|
-};
|
|
|
-
|
|
|
-enum bnx2x_vfop_qdtor_state {
|
|
|
- BNX2X_VFOP_QDTOR_HALT,
|
|
|
- BNX2X_VFOP_QDTOR_TERMINATE,
|
|
|
- BNX2X_VFOP_QDTOR_CFCDEL,
|
|
|
- BNX2X_VFOP_QDTOR_DONE
|
|
|
-};
|
|
|
-
|
|
|
-enum bnx2x_vfop_vlan_mac_state {
|
|
|
- BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
|
|
|
- BNX2X_VFOP_VLAN_MAC_CLEAR,
|
|
|
- BNX2X_VFOP_VLAN_MAC_CHK_DONE,
|
|
|
- BNX2X_VFOP_MAC_CONFIG_LIST,
|
|
|
- BNX2X_VFOP_VLAN_CONFIG_LIST,
|
|
|
- BNX2X_VFOP_VLAN_CONFIG_LIST_0
|
|
|
-};
|
|
|
-
|
|
|
-enum bnx2x_vfop_qsetup_state {
|
|
|
- BNX2X_VFOP_QSETUP_CTOR,
|
|
|
- BNX2X_VFOP_QSETUP_VLAN0,
|
|
|
- BNX2X_VFOP_QSETUP_DONE
|
|
|
-};
|
|
|
-
|
|
|
-enum bnx2x_vfop_mcast_state {
|
|
|
- BNX2X_VFOP_MCAST_DEL,
|
|
|
- BNX2X_VFOP_MCAST_ADD,
|
|
|
- BNX2X_VFOP_MCAST_CHK_DONE
|
|
|
-};
|
|
|
-enum bnx2x_vfop_qflr_state {
|
|
|
- BNX2X_VFOP_QFLR_CLR_VLAN,
|
|
|
- BNX2X_VFOP_QFLR_CLR_MAC,
|
|
|
- BNX2X_VFOP_QFLR_TERMINATE,
|
|
|
- BNX2X_VFOP_QFLR_DONE
|
|
|
-};
|
|
|
-
|
|
|
-enum bnx2x_vfop_flr_state {
|
|
|
- BNX2X_VFOP_FLR_QUEUES,
|
|
|
- BNX2X_VFOP_FLR_HW
|
|
|
-};
|
|
|
-
|
|
|
-enum bnx2x_vfop_close_state {
|
|
|
- BNX2X_VFOP_CLOSE_QUEUES,
|
|
|
- BNX2X_VFOP_CLOSE_HW
|
|
|
-};
|
|
|
-
|
|
|
-enum bnx2x_vfop_rxmode_state {
|
|
|
- BNX2X_VFOP_RXMODE_CONFIG,
|
|
|
- BNX2X_VFOP_RXMODE_DONE
|
|
|
-};
|
|
|
-
|
|
|
-enum bnx2x_vfop_qteardown_state {
|
|
|
- BNX2X_VFOP_QTEARDOWN_RXMODE,
|
|
|
- BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
|
|
|
- BNX2X_VFOP_QTEARDOWN_CLR_MAC,
|
|
|
- BNX2X_VFOP_QTEARDOWN_CLR_MCAST,
|
|
|
- BNX2X_VFOP_QTEARDOWN_QDTOR,
|
|
|
- BNX2X_VFOP_QTEARDOWN_DONE
|
|
|
-};
|
|
|
-
|
|
|
-enum bnx2x_vfop_rss_state {
|
|
|
- BNX2X_VFOP_RSS_CONFIG,
|
|
|
- BNX2X_VFOP_RSS_DONE
|
|
|
-};
|
|
|
-
|
|
|
-enum bnx2x_vfop_tpa_state {
|
|
|
- BNX2X_VFOP_TPA_CONFIG,
|
|
|
- BNX2X_VFOP_TPA_DONE
|
|
|
-};
|
|
|
-
|
|
|
-#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
|
|
|
-
|
|
|
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|
|
struct bnx2x_queue_init_params *init_params,
|
|
|
struct bnx2x_queue_setup_params *setup_params,
|
|
@@ -241,7 +161,7 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|
|
void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
|
|
|
struct bnx2x_virtf *vf,
|
|
|
struct bnx2x_vf_queue *q,
|
|
|
- struct bnx2x_vfop_qctor_params *p,
|
|
|
+ struct bnx2x_vf_queue_construct_params *p,
|
|
|
unsigned long q_type)
|
|
|
{
|
|
|
struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
|
|
@@ -310,191 +230,85 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/* VFOP queue construction */
|
|
|
-static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
+static int bnx2x_vf_queue_create(struct bnx2x *bp,
|
|
|
+ struct bnx2x_virtf *vf, int qid,
|
|
|
+ struct bnx2x_vf_queue_construct_params *qctor)
|
|
|
{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
|
|
|
- struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
|
|
|
- struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
|
|
|
- enum bnx2x_vfop_qctor_state state = vfop->state;
|
|
|
-
|
|
|
- bnx2x_vfop_reset_wq(vf);
|
|
|
-
|
|
|
- if (vfop->rc < 0)
|
|
|
- goto op_err;
|
|
|
-
|
|
|
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
|
|
|
-
|
|
|
- switch (state) {
|
|
|
- case BNX2X_VFOP_QCTOR_INIT:
|
|
|
-
|
|
|
- /* has this queue already been opened? */
|
|
|
- if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
|
|
|
- BNX2X_Q_LOGICAL_STATE_ACTIVE) {
|
|
|
- DP(BNX2X_MSG_IOV,
|
|
|
- "Entered qctor but queue was already up. Aborting gracefully\n");
|
|
|
- goto op_done;
|
|
|
- }
|
|
|
-
|
|
|
- /* next state */
|
|
|
- vfop->state = BNX2X_VFOP_QCTOR_SETUP;
|
|
|
-
|
|
|
- q_params->cmd = BNX2X_Q_CMD_INIT;
|
|
|
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
|
|
|
-
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
|
|
|
-
|
|
|
- case BNX2X_VFOP_QCTOR_SETUP:
|
|
|
- /* next state */
|
|
|
- vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
|
|
|
-
|
|
|
- /* copy pre-prepared setup params to the queue-state params */
|
|
|
- vfop->op_p->qctor.qstate.params.setup =
|
|
|
- vfop->op_p->qctor.prep_qsetup;
|
|
|
-
|
|
|
- q_params->cmd = BNX2X_Q_CMD_SETUP;
|
|
|
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
|
|
|
+ struct bnx2x_queue_state_params *q_params;
|
|
|
+ int rc = 0;
|
|
|
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
|
|
|
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
|
|
|
|
|
|
- case BNX2X_VFOP_QCTOR_INT_EN:
|
|
|
+ /* Prepare ramrod information */
|
|
|
+ q_params = &qctor->qstate;
|
|
|
+ q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
|
|
|
+ set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
|
|
|
|
|
|
- /* enable interrupts */
|
|
|
- bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
|
|
|
- USTORM_ID, 0, IGU_INT_ENABLE, 0);
|
|
|
- goto op_done;
|
|
|
- default:
|
|
|
- bnx2x_vfop_default(state);
|
|
|
+ if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
|
|
|
+ BNX2X_Q_LOGICAL_STATE_ACTIVE) {
|
|
|
+ DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
|
|
|
+ goto out;
|
|
|
}
|
|
|
-op_err:
|
|
|
- BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
|
|
|
- vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
|
|
|
-op_done:
|
|
|
- bnx2x_vfop_end(bp, vf, vfop);
|
|
|
-op_pending:
|
|
|
- return;
|
|
|
-}
|
|
|
-
|
|
|
-static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd,
|
|
|
- int qid)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
|
|
|
|
|
- if (vfop) {
|
|
|
- vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
|
|
|
+ /* Run Queue 'construction' ramrods */
|
|
|
+ q_params->cmd = BNX2X_Q_CMD_INIT;
|
|
|
+ rc = bnx2x_queue_state_change(bp, q_params);
|
|
|
+ if (rc)
|
|
|
+ goto out;
|
|
|
|
|
|
- vfop->args.qctor.qid = qid;
|
|
|
- vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
|
|
|
+ memcpy(&q_params->params.setup, &qctor->prep_qsetup,
|
|
|
+ sizeof(struct bnx2x_queue_setup_params));
|
|
|
+ q_params->cmd = BNX2X_Q_CMD_SETUP;
|
|
|
+ rc = bnx2x_queue_state_change(bp, q_params);
|
|
|
+ if (rc)
|
|
|
+ goto out;
|
|
|
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
|
|
|
- bnx2x_vfop_qctor, cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
|
|
|
- cmd->block);
|
|
|
- }
|
|
|
- return -ENOMEM;
|
|
|
+ /* enable interrupts */
|
|
|
+ bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
|
|
|
+ USTORM_ID, 0, IGU_INT_ENABLE, 0);
|
|
|
+out:
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
-/* VFOP queue destruction */
|
|
|
-static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
+static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|
|
+ int qid)
|
|
|
{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
|
|
|
- struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
|
|
|
- struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
|
|
|
- enum bnx2x_vfop_qdtor_state state = vfop->state;
|
|
|
-
|
|
|
- bnx2x_vfop_reset_wq(vf);
|
|
|
-
|
|
|
- if (vfop->rc < 0)
|
|
|
- goto op_err;
|
|
|
-
|
|
|
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
|
|
|
-
|
|
|
- switch (state) {
|
|
|
- case BNX2X_VFOP_QDTOR_HALT:
|
|
|
-
|
|
|
- /* has this queue already been stopped? */
|
|
|
- if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
|
|
|
- BNX2X_Q_LOGICAL_STATE_STOPPED) {
|
|
|
- DP(BNX2X_MSG_IOV,
|
|
|
- "Entered qdtor but queue was already stopped. Aborting gracefully\n");
|
|
|
-
|
|
|
- /* next state */
|
|
|
- vfop->state = BNX2X_VFOP_QDTOR_DONE;
|
|
|
-
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
|
|
|
- }
|
|
|
-
|
|
|
- /* next state */
|
|
|
- vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
|
|
|
-
|
|
|
- q_params->cmd = BNX2X_Q_CMD_HALT;
|
|
|
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
|
|
|
-
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
|
|
|
-
|
|
|
- case BNX2X_VFOP_QDTOR_TERMINATE:
|
|
|
- /* next state */
|
|
|
- vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
|
|
|
-
|
|
|
- q_params->cmd = BNX2X_Q_CMD_TERMINATE;
|
|
|
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
|
|
|
+ enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
|
|
|
+ BNX2X_Q_CMD_TERMINATE,
|
|
|
+ BNX2X_Q_CMD_CFC_DEL};
|
|
|
+ struct bnx2x_queue_state_params q_params;
|
|
|
+ int rc, i;
|
|
|
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
|
|
|
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
|
|
|
|
|
|
- case BNX2X_VFOP_QDTOR_CFCDEL:
|
|
|
- /* next state */
|
|
|
- vfop->state = BNX2X_VFOP_QDTOR_DONE;
|
|
|
+ /* Prepare ramrod information */
|
|
|
+ memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
|
|
|
+ q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
|
|
|
+ set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
|
|
|
|
|
|
- q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
|
|
|
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
|
|
|
+ if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
|
|
|
+ BNX2X_Q_LOGICAL_STATE_STOPPED) {
|
|
|
+ DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
|
|
|
-op_err:
|
|
|
- BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
|
|
|
- vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
|
|
|
-op_done:
|
|
|
- case BNX2X_VFOP_QDTOR_DONE:
|
|
|
- /* invalidate the context */
|
|
|
- if (qdtor->cxt) {
|
|
|
- qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
|
|
|
- qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
|
|
|
+ /* Run Queue 'destruction' ramrods */
|
|
|
+ for (i = 0; i < ARRAY_SIZE(cmds); i++) {
|
|
|
+ q_params.cmd = cmds[i];
|
|
|
+ rc = bnx2x_queue_state_change(bp, &q_params);
|
|
|
+ if (rc) {
|
|
|
+ BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
|
|
|
+ return rc;
|
|
|
}
|
|
|
- bnx2x_vfop_end(bp, vf, vfop);
|
|
|
- return;
|
|
|
- default:
|
|
|
- bnx2x_vfop_default(state);
|
|
|
}
|
|
|
-op_pending:
|
|
|
- return;
|
|
|
-}
|
|
|
-
|
|
|
-static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd,
|
|
|
- int qid)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
|
|
-
|
|
|
- if (vfop) {
|
|
|
- struct bnx2x_queue_state_params *qstate =
|
|
|
- &vf->op_params.qctor.qstate;
|
|
|
-
|
|
|
- memset(qstate, 0, sizeof(*qstate));
|
|
|
- qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
|
|
|
-
|
|
|
- vfop->args.qdtor.qid = qid;
|
|
|
- vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
|
|
|
-
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
|
|
|
- bnx2x_vfop_qdtor, cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
|
|
|
- cmd->block);
|
|
|
- } else {
|
|
|
- BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
|
|
|
- return -ENOMEM;
|
|
|
+out:
|
|
|
+ /* Clean Context */
|
|
|
+ if (bnx2x_vfq(vf, qid, cxt)) {
|
|
|
+ bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
|
|
|
+ bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
|
|
|
}
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -516,731 +330,291 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
|
|
|
BP_VFDB(bp)->vf_sbs_pool++;
|
|
|
}
|
|
|
|
|
|
-/* VFOP MAC/VLAN helpers */
|
|
|
-static inline void bnx2x_vfop_credit(struct bnx2x *bp,
|
|
|
- struct bnx2x_vfop *vfop,
|
|
|
- struct bnx2x_vlan_mac_obj *obj)
|
|
|
+static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
|
|
|
+ struct bnx2x_vlan_mac_obj *obj,
|
|
|
+ atomic_t *counter)
|
|
|
{
|
|
|
- struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
|
|
|
-
|
|
|
- /* update credit only if there is no error
|
|
|
- * and a valid credit counter
|
|
|
- */
|
|
|
- if (!vfop->rc && args->credit) {
|
|
|
- struct list_head *pos;
|
|
|
- int read_lock;
|
|
|
- int cnt = 0;
|
|
|
+ struct list_head *pos;
|
|
|
+ int read_lock;
|
|
|
+ int cnt = 0;
|
|
|
|
|
|
- read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
|
|
|
- if (read_lock)
|
|
|
- DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
|
|
|
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
|
|
|
+ if (read_lock)
|
|
|
+ DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
|
|
|
|
|
|
- list_for_each(pos, &obj->head)
|
|
|
- cnt++;
|
|
|
+ list_for_each(pos, &obj->head)
|
|
|
+ cnt++;
|
|
|
|
|
|
- if (!read_lock)
|
|
|
- bnx2x_vlan_mac_h_read_unlock(bp, obj);
|
|
|
+ if (!read_lock)
|
|
|
+ bnx2x_vlan_mac_h_read_unlock(bp, obj);
|
|
|
|
|
|
- atomic_set(args->credit, cnt);
|
|
|
- }
|
|
|
+ atomic_set(counter, cnt);
|
|
|
}
|
|
|
|
|
|
-static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
|
|
|
- struct bnx2x_vfop_filter *pos,
|
|
|
- struct bnx2x_vlan_mac_data *user_req)
|
|
|
+static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|
|
+ int qid, bool drv_only, bool mac)
|
|
|
{
|
|
|
- user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
|
|
|
- BNX2X_VLAN_MAC_DEL;
|
|
|
-
|
|
|
- switch (pos->type) {
|
|
|
- case BNX2X_VFOP_FILTER_MAC:
|
|
|
- memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
|
|
|
- break;
|
|
|
- case BNX2X_VFOP_FILTER_VLAN:
|
|
|
- user_req->u.vlan.vlan = pos->vid;
|
|
|
- break;
|
|
|
- default:
|
|
|
- BNX2X_ERR("Invalid filter type, skipping\n");
|
|
|
- return 1;
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int bnx2x_vfop_config_list(struct bnx2x *bp,
|
|
|
- struct bnx2x_vfop_filters *filters,
|
|
|
- struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
|
|
|
-{
|
|
|
- struct bnx2x_vfop_filter *pos, *tmp;
|
|
|
- struct list_head rollback_list, *filters_list = &filters->head;
|
|
|
- struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
|
|
|
- int rc = 0, cnt = 0;
|
|
|
-
|
|
|
- INIT_LIST_HEAD(&rollback_list);
|
|
|
-
|
|
|
- list_for_each_entry_safe(pos, tmp, filters_list, link) {
|
|
|
- if (bnx2x_vfop_set_user_req(bp, pos, user_req))
|
|
|
- continue;
|
|
|
+ struct bnx2x_vlan_mac_ramrod_params ramrod;
|
|
|
+ int rc;
|
|
|
|
|
|
- rc = bnx2x_config_vlan_mac(bp, vlan_mac);
|
|
|
- if (rc >= 0) {
|
|
|
- cnt += pos->add ? 1 : -1;
|
|
|
- list_move(&pos->link, &rollback_list);
|
|
|
- rc = 0;
|
|
|
- } else if (rc == -EEXIST) {
|
|
|
- rc = 0;
|
|
|
- } else {
|
|
|
- BNX2X_ERR("Failed to add a new vlan_mac command\n");
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
+ DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
|
|
|
+ mac ? "MACs" : "VLANs");
|
|
|
|
|
|
- /* rollback if error or too many rules added */
|
|
|
- if (rc || cnt > filters->add_cnt) {
|
|
|
- BNX2X_ERR("error or too many rules added. Performing rollback\n");
|
|
|
- list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
|
|
|
- pos->add = !pos->add; /* reverse op */
|
|
|
- bnx2x_vfop_set_user_req(bp, pos, user_req);
|
|
|
- bnx2x_config_vlan_mac(bp, vlan_mac);
|
|
|
- list_del(&pos->link);
|
|
|
- }
|
|
|
- cnt = 0;
|
|
|
- if (!rc)
|
|
|
- rc = -EINVAL;
|
|
|
+ /* Prepare ramrod params */
|
|
|
+ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
|
|
|
+ if (mac) {
|
|
|
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
|
|
|
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
|
|
|
+ } else {
|
|
|
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
|
|
|
+ &ramrod.user_req.vlan_mac_flags);
|
|
|
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
|
|
|
}
|
|
|
- filters->add_cnt = cnt;
|
|
|
- return rc;
|
|
|
-}
|
|
|
+ ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
|
|
|
|
|
|
-/* VFOP set VLAN/MAC */
|
|
|
-static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
|
|
|
- struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
|
|
|
- struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
|
|
|
- struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
|
|
|
-
|
|
|
- enum bnx2x_vfop_vlan_mac_state state = vfop->state;
|
|
|
-
|
|
|
- if (vfop->rc < 0)
|
|
|
- goto op_err;
|
|
|
-
|
|
|
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
|
|
|
-
|
|
|
- bnx2x_vfop_reset_wq(vf);
|
|
|
-
|
|
|
- switch (state) {
|
|
|
- case BNX2X_VFOP_VLAN_MAC_CLEAR:
|
|
|
- /* next state */
|
|
|
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
|
|
|
-
|
|
|
- /* do delete */
|
|
|
- vfop->rc = obj->delete_all(bp, obj,
|
|
|
- &vlan_mac->user_req.vlan_mac_flags,
|
|
|
- &vlan_mac->ramrod_flags);
|
|
|
-
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
|
|
|
-
|
|
|
- case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
|
|
|
- /* next state */
|
|
|
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
|
|
|
-
|
|
|
- /* do config */
|
|
|
- vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
|
|
|
- if (vfop->rc == -EEXIST)
|
|
|
- vfop->rc = 0;
|
|
|
-
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
|
|
|
-
|
|
|
- case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
|
|
|
- vfop->rc = !!obj->raw.check_pending(&obj->raw);
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
|
|
|
-
|
|
|
- case BNX2X_VFOP_MAC_CONFIG_LIST:
|
|
|
- /* next state */
|
|
|
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
|
|
|
-
|
|
|
- /* do list config */
|
|
|
- vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
|
|
|
- if (vfop->rc)
|
|
|
- goto op_err;
|
|
|
-
|
|
|
- set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
|
|
|
- vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
|
|
|
-
|
|
|
- case BNX2X_VFOP_VLAN_CONFIG_LIST:
|
|
|
- /* next state */
|
|
|
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
|
|
|
-
|
|
|
- /* do list config */
|
|
|
- vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
|
|
|
- if (!vfop->rc) {
|
|
|
- set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
|
|
|
- vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
|
|
|
- }
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
|
|
|
+ set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
|
|
|
+ if (drv_only)
|
|
|
+ set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
|
|
|
+ else
|
|
|
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
|
|
|
|
|
|
- default:
|
|
|
- bnx2x_vfop_default(state);
|
|
|
+ /* Start deleting */
|
|
|
+ rc = ramrod.vlan_mac_obj->delete_all(bp,
|
|
|
+ ramrod.vlan_mac_obj,
|
|
|
+ &ramrod.user_req.vlan_mac_flags,
|
|
|
+ &ramrod.ramrod_flags);
|
|
|
+ if (rc) {
|
|
|
+ BNX2X_ERR("Failed to delete all %s\n",
|
|
|
+ mac ? "MACs" : "VLANs");
|
|
|
+ return rc;
|
|
|
}
|
|
|
-op_err:
|
|
|
- BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
|
|
|
-op_done:
|
|
|
- kfree(filters);
|
|
|
- bnx2x_vfop_credit(bp, vfop, obj);
|
|
|
- bnx2x_vfop_end(bp, vf, vfop);
|
|
|
-op_pending:
|
|
|
- return;
|
|
|
-}
|
|
|
|
|
|
-struct bnx2x_vfop_vlan_mac_flags {
|
|
|
- bool drv_only;
|
|
|
- bool dont_consume;
|
|
|
- bool single_cmd;
|
|
|
- bool add;
|
|
|
-};
|
|
|
+ /* Clear the vlan counters */
|
|
|
+ if (!mac)
|
|
|
+ atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
|
|
|
|
|
|
-static void
|
|
|
-bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
|
|
|
- struct bnx2x_vfop_vlan_mac_flags *flags)
|
|
|
-{
|
|
|
- struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
|
|
|
-
|
|
|
- memset(ramrod, 0, sizeof(*ramrod));
|
|
|
-
|
|
|
- /* ramrod flags */
|
|
|
- if (flags->drv_only)
|
|
|
- set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
|
|
|
- if (flags->single_cmd)
|
|
|
- set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
|
|
|
-
|
|
|
- /* mac_vlan flags */
|
|
|
- if (flags->dont_consume)
|
|
|
- set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
|
|
|
-
|
|
|
- /* cmd */
|
|
|
- ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
|
|
|
- struct bnx2x_vfop_vlan_mac_flags *flags)
|
|
|
-{
|
|
|
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
|
|
|
- set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd,
|
|
|
- int qid, bool drv_only)
|
|
|
+static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
|
|
|
+ struct bnx2x_virtf *vf, int qid,
|
|
|
+ struct bnx2x_vf_mac_vlan_filter *filter,
|
|
|
+ bool drv_only)
|
|
|
{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
|
|
-
|
|
|
- if (vfop) {
|
|
|
- struct bnx2x_vfop_args_filters filters = {
|
|
|
- .multi_filter = NULL, /* single */
|
|
|
- .credit = NULL, /* consume credit */
|
|
|
- };
|
|
|
- struct bnx2x_vfop_vlan_mac_flags flags = {
|
|
|
- .drv_only = drv_only,
|
|
|
- .dont_consume = (filters.credit != NULL),
|
|
|
- .single_cmd = true,
|
|
|
- .add = false /* don't care */,
|
|
|
- };
|
|
|
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
|
|
|
- &vf->op_params.vlan_mac;
|
|
|
-
|
|
|
- /* set ramrod params */
|
|
|
- bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
|
|
|
-
|
|
|
- /* set object */
|
|
|
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
|
|
|
-
|
|
|
- /* set extra args */
|
|
|
- vfop->args.filters = filters;
|
|
|
+ struct bnx2x_vlan_mac_ramrod_params ramrod;
|
|
|
+ int rc;
|
|
|
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
|
|
|
- bnx2x_vfop_vlan_mac, cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
|
|
|
- cmd->block);
|
|
|
+ DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
|
|
|
+ vf->abs_vfid, filter->add ? "Adding" : "Deleting",
|
|
|
+ filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
|
|
|
+
|
|
|
+ /* Prepare ramrod params */
|
|
|
+ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
|
|
|
+ if (filter->type == BNX2X_VF_FILTER_VLAN) {
|
|
|
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
|
|
|
+ &ramrod.user_req.vlan_mac_flags);
|
|
|
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
|
|
|
+ ramrod.user_req.u.vlan.vlan = filter->vid;
|
|
|
+ } else {
|
|
|
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
|
|
|
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
|
|
|
+ memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
|
|
|
+ }
|
|
|
+ ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
|
|
|
+ BNX2X_VLAN_MAC_DEL;
|
|
|
+
|
|
|
+ /* Verify there are available vlan credits */
|
|
|
+ if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
|
|
|
+ (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
|
|
|
+ vf_vlan_rules_cnt(vf))) {
|
|
|
+ BNX2X_ERR("No credits for vlan\n");
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
- return -ENOMEM;
|
|
|
-}
|
|
|
-
|
|
|
-int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd,
|
|
|
- struct bnx2x_vfop_filters *macs,
|
|
|
- int qid, bool drv_only)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop;
|
|
|
-
|
|
|
- if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
|
|
|
- return -EINVAL;
|
|
|
|
|
|
- vfop = bnx2x_vfop_add(bp, vf);
|
|
|
- if (vfop) {
|
|
|
- struct bnx2x_vfop_args_filters filters = {
|
|
|
- .multi_filter = macs,
|
|
|
- .credit = NULL, /* consume credit */
|
|
|
- };
|
|
|
- struct bnx2x_vfop_vlan_mac_flags flags = {
|
|
|
- .drv_only = drv_only,
|
|
|
- .dont_consume = (filters.credit != NULL),
|
|
|
- .single_cmd = false,
|
|
|
- .add = false, /* don't care since only the items in the
|
|
|
- * filters list affect the sp operation,
|
|
|
- * not the list itself
|
|
|
- */
|
|
|
- };
|
|
|
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
|
|
|
- &vf->op_params.vlan_mac;
|
|
|
-
|
|
|
- /* set ramrod params */
|
|
|
- bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
|
|
|
-
|
|
|
- /* set object */
|
|
|
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
|
|
|
-
|
|
|
- /* set extra args */
|
|
|
- filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
|
|
|
- vfop->args.filters = filters;
|
|
|
-
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
|
|
|
- bnx2x_vfop_vlan_mac, cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
|
|
|
- cmd->block);
|
|
|
+ set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
|
|
|
+ if (drv_only)
|
|
|
+ set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
|
|
|
+ else
|
|
|
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
|
|
|
+
|
|
|
+ /* Add/Remove the filter */
|
|
|
+ rc = bnx2x_config_vlan_mac(bp, &ramrod);
|
|
|
+ if (rc && rc != -EEXIST) {
|
|
|
+ BNX2X_ERR("Failed to %s %s\n",
|
|
|
+ filter->add ? "add" : "delete",
|
|
|
+ filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
|
|
|
+ "VLAN");
|
|
|
+ return rc;
|
|
|
}
|
|
|
- return -ENOMEM;
|
|
|
-}
|
|
|
-
|
|
|
-static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd,
|
|
|
- int qid, u16 vid, bool add)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop;
|
|
|
|
|
|
- if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
|
|
|
- return -EINVAL;
|
|
|
+ /* Update the vlan counters */
|
|
|
+ if (filter->type == BNX2X_VF_FILTER_VLAN)
|
|
|
+ bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
|
|
|
+ &bnx2x_vfq(vf, qid, vlan_count));
|
|
|
|
|
|
- vfop = bnx2x_vfop_add(bp, vf);
|
|
|
- if (vfop) {
|
|
|
- struct bnx2x_vfop_args_filters filters = {
|
|
|
- .multi_filter = NULL, /* single command */
|
|
|
- .credit = &bnx2x_vfq(vf, qid, vlan_count),
|
|
|
- };
|
|
|
- struct bnx2x_vfop_vlan_mac_flags flags = {
|
|
|
- .drv_only = false,
|
|
|
- .dont_consume = (filters.credit != NULL),
|
|
|
- .single_cmd = true,
|
|
|
- .add = add,
|
|
|
- };
|
|
|
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
|
|
|
- &vf->op_params.vlan_mac;
|
|
|
-
|
|
|
- /* set ramrod params */
|
|
|
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
|
|
|
- ramrod->user_req.u.vlan.vlan = vid;
|
|
|
-
|
|
|
- /* set object */
|
|
|
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
|
|
|
-
|
|
|
- /* set extra args */
|
|
|
- vfop->args.filters = filters;
|
|
|
-
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
|
|
|
- bnx2x_vfop_vlan_mac, cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
|
|
|
- cmd->block);
|
|
|
- }
|
|
|
- return -ENOMEM;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd,
|
|
|
- int qid, bool drv_only)
|
|
|
+int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|
|
+ struct bnx2x_vf_mac_vlan_filters *filters,
|
|
|
+ int qid, bool drv_only)
|
|
|
{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
|
|
-
|
|
|
- if (vfop) {
|
|
|
- struct bnx2x_vfop_args_filters filters = {
|
|
|
- .multi_filter = NULL, /* single command */
|
|
|
- .credit = &bnx2x_vfq(vf, qid, vlan_count),
|
|
|
- };
|
|
|
- struct bnx2x_vfop_vlan_mac_flags flags = {
|
|
|
- .drv_only = drv_only,
|
|
|
- .dont_consume = (filters.credit != NULL),
|
|
|
- .single_cmd = true,
|
|
|
- .add = false, /* don't care */
|
|
|
- };
|
|
|
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
|
|
|
- &vf->op_params.vlan_mac;
|
|
|
-
|
|
|
- /* set ramrod params */
|
|
|
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
|
|
|
+ int rc = 0, i;
|
|
|
|
|
|
- /* set object */
|
|
|
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
|
|
|
-
|
|
|
- /* set extra args */
|
|
|
- vfop->args.filters = filters;
|
|
|
-
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
|
|
|
- bnx2x_vfop_vlan_mac, cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
|
|
|
- cmd->block);
|
|
|
- }
|
|
|
- return -ENOMEM;
|
|
|
-}
|
|
|
-
|
|
|
-int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd,
|
|
|
- struct bnx2x_vfop_filters *vlans,
|
|
|
- int qid, bool drv_only)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop;
|
|
|
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
|
|
|
|
|
|
if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
|
|
|
return -EINVAL;
|
|
|
|
|
|
- vfop = bnx2x_vfop_add(bp, vf);
|
|
|
- if (vfop) {
|
|
|
- struct bnx2x_vfop_args_filters filters = {
|
|
|
- .multi_filter = vlans,
|
|
|
- .credit = &bnx2x_vfq(vf, qid, vlan_count),
|
|
|
- };
|
|
|
- struct bnx2x_vfop_vlan_mac_flags flags = {
|
|
|
- .drv_only = drv_only,
|
|
|
- .dont_consume = (filters.credit != NULL),
|
|
|
- .single_cmd = false,
|
|
|
- .add = false, /* don't care */
|
|
|
- };
|
|
|
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
|
|
|
- &vf->op_params.vlan_mac;
|
|
|
-
|
|
|
- /* set ramrod params */
|
|
|
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
|
|
|
-
|
|
|
- /* set object */
|
|
|
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
|
|
|
-
|
|
|
- /* set extra args */
|
|
|
- filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
|
|
|
- atomic_read(filters.credit);
|
|
|
-
|
|
|
- vfop->args.filters = filters;
|
|
|
-
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
|
|
|
- bnx2x_vfop_vlan_mac, cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
|
|
|
- cmd->block);
|
|
|
+ /* Prepare ramrod params */
|
|
|
+ for (i = 0; i < filters->count; i++) {
|
|
|
+ rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
|
|
|
+ &filters->filters[i], drv_only);
|
|
|
+ if (rc)
|
|
|
+ break;
|
|
|
}
|
|
|
- return -ENOMEM;
|
|
|
-}
|
|
|
-
|
|
|
-/* VFOP queue setup (queue constructor + set vlan 0) */
|
|
|
-static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
|
|
|
- int qid = vfop->args.qctor.qid;
|
|
|
- enum bnx2x_vfop_qsetup_state state = vfop->state;
|
|
|
- struct bnx2x_vfop_cmd cmd = {
|
|
|
- .done = bnx2x_vfop_qsetup,
|
|
|
- .block = false,
|
|
|
- };
|
|
|
-
|
|
|
- if (vfop->rc < 0)
|
|
|
- goto op_err;
|
|
|
-
|
|
|
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
|
|
|
|
|
|
- switch (state) {
|
|
|
- case BNX2X_VFOP_QSETUP_CTOR:
|
|
|
- /* init the queue ctor command */
|
|
|
- vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
|
|
|
- vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
|
|
|
- if (vfop->rc)
|
|
|
- goto op_err;
|
|
|
- return;
|
|
|
-
|
|
|
- case BNX2X_VFOP_QSETUP_VLAN0:
|
|
|
- /* skip if non-leading or FPGA/EMU*/
|
|
|
- if (qid)
|
|
|
- goto op_done;
|
|
|
-
|
|
|
- /* init the queue set-vlan command (for vlan 0) */
|
|
|
- vfop->state = BNX2X_VFOP_QSETUP_DONE;
|
|
|
- vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
|
|
|
- if (vfop->rc)
|
|
|
- goto op_err;
|
|
|
- return;
|
|
|
-op_err:
|
|
|
- BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
|
|
|
-op_done:
|
|
|
- case BNX2X_VFOP_QSETUP_DONE:
|
|
|
- vf->cfg_flags |= VF_CFG_VLAN;
|
|
|
- bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
|
|
|
- BNX2X_MSG_IOV);
|
|
|
- bnx2x_vfop_end(bp, vf, vfop);
|
|
|
- return;
|
|
|
- default:
|
|
|
- bnx2x_vfop_default(state);
|
|
|
+ /* Rollback if needed */
|
|
|
+ if (i != filters->count) {
|
|
|
+ BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
|
|
|
+ i, filters->count + 1);
|
|
|
+ while (--i >= 0) {
|
|
|
+ filters->filters[i].add = !filters->filters[i].add;
|
|
|
+ bnx2x_vf_mac_vlan_config(bp, vf, qid,
|
|
|
+ &filters->filters[i],
|
|
|
+ drv_only);
|
|
|
+ }
|
|
|
}
|
|
|
-}
|
|
|
-
|
|
|
-int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd,
|
|
|
- int qid)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
|
|
|
|
|
- if (vfop) {
|
|
|
- vfop->args.qctor.qid = qid;
|
|
|
+ /* It's our responsibility to free the filters */
|
|
|
+ kfree(filters);
|
|
|
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
|
|
|
- bnx2x_vfop_qsetup, cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
|
|
|
- cmd->block);
|
|
|
- }
|
|
|
- return -ENOMEM;
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
-/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
|
|
|
-static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
+int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
|
|
|
+ struct bnx2x_vf_queue_construct_params *qctor)
|
|
|
{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
|
|
|
- int qid = vfop->args.qx.qid;
|
|
|
- enum bnx2x_vfop_qflr_state state = vfop->state;
|
|
|
- struct bnx2x_queue_state_params *qstate;
|
|
|
- struct bnx2x_vfop_cmd cmd;
|
|
|
+ int rc;
|
|
|
|
|
|
- bnx2x_vfop_reset_wq(vf);
|
|
|
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
|
|
|
|
|
|
- if (vfop->rc < 0)
|
|
|
+ rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
|
|
|
+ if (rc)
|
|
|
goto op_err;
|
|
|
|
|
|
- DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
|
|
|
-
|
|
|
- cmd.done = bnx2x_vfop_qflr;
|
|
|
- cmd.block = false;
|
|
|
-
|
|
|
- switch (state) {
|
|
|
- case BNX2X_VFOP_QFLR_CLR_VLAN:
|
|
|
- /* vlan-clear-all: driver-only, don't consume credit */
|
|
|
- vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
|
|
|
+ /* Configure vlan0 for leading queue */
|
|
|
+ if (!qid) {
|
|
|
+ struct bnx2x_vf_mac_vlan_filter filter;
|
|
|
|
|
|
- /* the vlan_mac vfop will re-schedule us */
|
|
|
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
|
|
|
- if (vfop->rc)
|
|
|
+ memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
|
|
|
+ filter.type = BNX2X_VF_FILTER_VLAN;
|
|
|
+ filter.add = true;
|
|
|
+ filter.vid = 0;
|
|
|
+ rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
|
|
|
+ if (rc)
|
|
|
goto op_err;
|
|
|
- return;
|
|
|
-
|
|
|
- case BNX2X_VFOP_QFLR_CLR_MAC:
|
|
|
- /* mac-clear-all: driver only consume credit */
|
|
|
- vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
|
|
|
- /* the vlan_mac vfop will re-schedule us */
|
|
|
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
|
|
|
- if (vfop->rc)
|
|
|
- goto op_err;
|
|
|
- return;
|
|
|
-
|
|
|
- case BNX2X_VFOP_QFLR_TERMINATE:
|
|
|
- qstate = &vfop->op_p->qctor.qstate;
|
|
|
- memset(qstate , 0, sizeof(*qstate));
|
|
|
- qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
|
|
|
- vfop->state = BNX2X_VFOP_QFLR_DONE;
|
|
|
-
|
|
|
- DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
|
|
|
- vf->abs_vfid, qstate->q_obj->state);
|
|
|
-
|
|
|
- if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
|
|
|
- qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
|
|
|
- qstate->cmd = BNX2X_Q_CMD_TERMINATE;
|
|
|
- vfop->rc = bnx2x_queue_state_change(bp, qstate);
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
|
|
|
- } else {
|
|
|
- goto op_done;
|
|
|
- }
|
|
|
+ }
|
|
|
|
|
|
+ /* Schedule the configuration of any pending vlan filters */
|
|
|
+ vf->cfg_flags |= VF_CFG_VLAN;
|
|
|
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
|
|
|
+ BNX2X_MSG_IOV);
|
|
|
+ return 0;
|
|
|
op_err:
|
|
|
- BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
|
|
|
- vf->abs_vfid, qid, vfop->rc);
|
|
|
-op_done:
|
|
|
- case BNX2X_VFOP_QFLR_DONE:
|
|
|
- bnx2x_vfop_end(bp, vf, vfop);
|
|
|
- return;
|
|
|
- default:
|
|
|
- bnx2x_vfop_default(state);
|
|
|
- }
|
|
|
-op_pending:
|
|
|
- return;
|
|
|
+ BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
-static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd,
|
|
|
+static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|
|
int qid)
|
|
|
{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
|
|
-
|
|
|
- if (vfop) {
|
|
|
- vfop->args.qx.qid = qid;
|
|
|
- if ((qid == LEADING_IDX) &&
|
|
|
- bnx2x_validate_vf_sp_objs(bp, vf, false))
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
|
|
|
- bnx2x_vfop_qflr, cmd->done);
|
|
|
- else
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_QFLR_TERMINATE,
|
|
|
- bnx2x_vfop_qflr, cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
|
|
|
- cmd->block);
|
|
|
- }
|
|
|
- return -ENOMEM;
|
|
|
-}
|
|
|
-
|
|
|
-/* VFOP multi-casts */
|
|
|
-static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
|
|
|
- struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
|
|
|
- struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
|
|
|
- struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
|
|
|
- enum bnx2x_vfop_mcast_state state = vfop->state;
|
|
|
- int i;
|
|
|
+ int rc;
|
|
|
|
|
|
- bnx2x_vfop_reset_wq(vf);
|
|
|
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
|
|
|
|
|
|
- if (vfop->rc < 0)
|
|
|
- goto op_err;
|
|
|
+ /* If needed, clean the filtering data base */
|
|
|
+ if ((qid == LEADING_IDX) &&
|
|
|
+ bnx2x_validate_vf_sp_objs(bp, vf, false)) {
|
|
|
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
|
|
|
+ if (rc)
|
|
|
+ goto op_err;
|
|
|
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
|
|
|
+ if (rc)
|
|
|
+ goto op_err;
|
|
|
+ }
|
|
|
|
|
|
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
|
|
|
-
|
|
|
- switch (state) {
|
|
|
- case BNX2X_VFOP_MCAST_DEL:
|
|
|
- /* clear existing mcasts */
|
|
|
- vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD
|
|
|
- : BNX2X_VFOP_MCAST_CHK_DONE;
|
|
|
- mcast->mcast_list_len = vf->mcast_list_len;
|
|
|
- vf->mcast_list_len = args->mc_num;
|
|
|
- vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
|
|
|
-
|
|
|
- case BNX2X_VFOP_MCAST_ADD:
|
|
|
- if (raw->check_pending(raw))
|
|
|
- goto op_pending;
|
|
|
-
|
|
|
- /* update mcast list on the ramrod params */
|
|
|
- INIT_LIST_HEAD(&mcast->mcast_list);
|
|
|
- for (i = 0; i < args->mc_num; i++)
|
|
|
- list_add_tail(&(args->mc[i].link),
|
|
|
- &mcast->mcast_list);
|
|
|
- mcast->mcast_list_len = args->mc_num;
|
|
|
+ /* Terminate queue */
|
|
|
+ if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
|
|
|
+ struct bnx2x_queue_state_params qstate;
|
|
|
|
|
|
- /* add new mcasts */
|
|
|
- vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
|
|
|
- vfop->rc = bnx2x_config_mcast(bp, mcast,
|
|
|
- BNX2X_MCAST_CMD_ADD);
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
|
|
|
-
|
|
|
- case BNX2X_VFOP_MCAST_CHK_DONE:
|
|
|
- vfop->rc = raw->check_pending(raw) ? 1 : 0;
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
|
|
|
- default:
|
|
|
- bnx2x_vfop_default(state);
|
|
|
+ memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
|
|
|
+ qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
|
|
|
+ qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
|
|
|
+ qstate.cmd = BNX2X_Q_CMD_TERMINATE;
|
|
|
+ set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
|
|
|
+ rc = bnx2x_queue_state_change(bp, &qstate);
|
|
|
+ if (rc)
|
|
|
+ goto op_err;
|
|
|
}
|
|
|
-op_err:
|
|
|
- BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
|
|
|
-op_done:
|
|
|
- kfree(args->mc);
|
|
|
- bnx2x_vfop_end(bp, vf, vfop);
|
|
|
-op_pending:
|
|
|
- return;
|
|
|
-}
|
|
|
|
|
|
-int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd,
|
|
|
- bnx2x_mac_addr_t *mcasts,
|
|
|
- int mcast_num, bool drv_only)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop = NULL;
|
|
|
- size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
|
|
|
- struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
|
|
|
- NULL;
|
|
|
-
|
|
|
- if (!mc_sz || mc) {
|
|
|
- vfop = bnx2x_vfop_add(bp, vf);
|
|
|
- if (vfop) {
|
|
|
- int i;
|
|
|
- struct bnx2x_mcast_ramrod_params *ramrod =
|
|
|
- &vf->op_params.mcast;
|
|
|
-
|
|
|
- /* set ramrod params */
|
|
|
- memset(ramrod, 0, sizeof(*ramrod));
|
|
|
- ramrod->mcast_obj = &vf->mcast_obj;
|
|
|
- if (drv_only)
|
|
|
- set_bit(RAMROD_DRV_CLR_ONLY,
|
|
|
- &ramrod->ramrod_flags);
|
|
|
-
|
|
|
- /* copy mcasts pointers */
|
|
|
- vfop->args.mc_list.mc_num = mcast_num;
|
|
|
- vfop->args.mc_list.mc = mc;
|
|
|
- for (i = 0; i < mcast_num; i++)
|
|
|
- mc[i].mac = mcasts[i];
|
|
|
-
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
|
|
|
- bnx2x_vfop_mcast, cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
|
|
|
- cmd->block);
|
|
|
- } else {
|
|
|
- kfree(mc);
|
|
|
- }
|
|
|
- }
|
|
|
- return -ENOMEM;
|
|
|
+ return 0;
|
|
|
+op_err:
|
|
|
+ BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
-/* VFOP rx-mode */
|
|
|
-static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
+int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|
|
+ bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
|
|
|
{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
|
|
|
- struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
|
|
|
- enum bnx2x_vfop_rxmode_state state = vfop->state;
|
|
|
+ struct bnx2x_mcast_list_elem *mc = NULL;
|
|
|
+ struct bnx2x_mcast_ramrod_params mcast;
|
|
|
+ int rc, i;
|
|
|
|
|
|
- bnx2x_vfop_reset_wq(vf);
|
|
|
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
|
|
|
|
|
|
- if (vfop->rc < 0)
|
|
|
- goto op_err;
|
|
|
+ /* Prepare Multicast command */
|
|
|
+ memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
|
|
|
+ mcast.mcast_obj = &vf->mcast_obj;
|
|
|
+ if (drv_only)
|
|
|
+ set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
|
|
|
+ else
|
|
|
+ set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
|
|
|
+ if (mc_num) {
|
|
|
+ mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!mc) {
|
|
|
+ BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
|
|
|
+ /* clear existing mcasts */
|
|
|
+ mcast.mcast_list_len = vf->mcast_list_len;
|
|
|
+ vf->mcast_list_len = mc_num;
|
|
|
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
|
|
|
+ if (rc) {
|
|
|
+ BNX2X_ERR("Failed to remove multicasts\n");
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
|
|
|
- switch (state) {
|
|
|
- case BNX2X_VFOP_RXMODE_CONFIG:
|
|
|
- /* next state */
|
|
|
- vfop->state = BNX2X_VFOP_RXMODE_DONE;
|
|
|
+ /* update mcast list on the ramrod params */
|
|
|
+ if (mc_num) {
|
|
|
+ INIT_LIST_HEAD(&mcast.mcast_list);
|
|
|
+ for (i = 0; i < mc_num; i++) {
|
|
|
+ mc[i].mac = mcasts[i];
|
|
|
+ list_add_tail(&mc[i].link,
|
|
|
+ &mcast.mcast_list);
|
|
|
+ }
|
|
|
|
|
|
- /* record the accept flags in vfdb so hypervisor can modify them
|
|
|
- * if necessary
|
|
|
- */
|
|
|
- bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
|
|
|
- ramrod->rx_accept_flags;
|
|
|
- vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
|
|
|
-op_err:
|
|
|
- BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
|
|
|
-op_done:
|
|
|
- case BNX2X_VFOP_RXMODE_DONE:
|
|
|
- bnx2x_vfop_end(bp, vf, vfop);
|
|
|
- return;
|
|
|
- default:
|
|
|
- bnx2x_vfop_default(state);
|
|
|
+ /* add new mcasts */
|
|
|
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
|
|
|
+ if (rc)
|
|
|
+ BNX2X_ERR("Faled to add multicasts\n");
|
|
|
+ kfree(mc);
|
|
|
}
|
|
|
-op_pending:
|
|
|
- return;
|
|
|
+
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
|
|
@@ -1268,121 +642,56 @@ static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
|
|
|
ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
|
|
|
}
|
|
|
|
|
|
-int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd,
|
|
|
- int qid, unsigned long accept_flags)
|
|
|
+int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|
|
+ int qid, unsigned long accept_flags)
|
|
|
{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
|
|
-
|
|
|
- if (vfop) {
|
|
|
- struct bnx2x_rx_mode_ramrod_params *ramrod =
|
|
|
- &vf->op_params.rx_mode;
|
|
|
+ struct bnx2x_rx_mode_ramrod_params ramrod;
|
|
|
|
|
|
- bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
|
|
|
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
|
|
|
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
|
|
|
- bnx2x_vfop_rxmode, cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
|
|
|
- cmd->block);
|
|
|
- }
|
|
|
- return -ENOMEM;
|
|
|
+ bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
|
|
|
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
|
|
|
+ vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
|
|
|
+ return bnx2x_config_rx_mode(bp, &ramrod);
|
|
|
}
|
|
|
|
|
|
-/* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
|
|
|
- * queue destructor)
|
|
|
- */
|
|
|
-static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
+int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
|
|
|
{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
|
|
|
- int qid = vfop->args.qx.qid;
|
|
|
- enum bnx2x_vfop_qteardown_state state = vfop->state;
|
|
|
- struct bnx2x_vfop_cmd cmd;
|
|
|
-
|
|
|
- if (vfop->rc < 0)
|
|
|
- goto op_err;
|
|
|
-
|
|
|
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
|
|
|
-
|
|
|
- cmd.done = bnx2x_vfop_qdown;
|
|
|
- cmd.block = false;
|
|
|
-
|
|
|
- switch (state) {
|
|
|
- case BNX2X_VFOP_QTEARDOWN_RXMODE:
|
|
|
- /* Drop all */
|
|
|
- if (bnx2x_validate_vf_sp_objs(bp, vf, true))
|
|
|
- vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
|
|
|
- else
|
|
|
- vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
|
|
|
- vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
|
|
|
- if (vfop->rc)
|
|
|
- goto op_err;
|
|
|
- return;
|
|
|
-
|
|
|
- case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
|
|
|
- /* vlan-clear-all: don't consume credit */
|
|
|
- vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
|
|
|
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
|
|
|
- if (vfop->rc)
|
|
|
- goto op_err;
|
|
|
- return;
|
|
|
-
|
|
|
- case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
|
|
|
- /* mac-clear-all: consume credit */
|
|
|
- vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST;
|
|
|
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
|
|
|
- if (vfop->rc)
|
|
|
- goto op_err;
|
|
|
- return;
|
|
|
+ int rc;
|
|
|
|
|
|
- case BNX2X_VFOP_QTEARDOWN_CLR_MCAST:
|
|
|
- vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
|
|
|
- vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
|
|
|
- if (vfop->rc)
|
|
|
- goto op_err;
|
|
|
- return;
|
|
|
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
|
|
|
|
|
|
- case BNX2X_VFOP_QTEARDOWN_QDTOR:
|
|
|
- /* run the queue destruction flow */
|
|
|
- DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
|
|
|
- vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
|
|
|
- DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
|
|
|
- vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
|
|
|
- DP(BNX2X_MSG_IOV, "returned from cmd\n");
|
|
|
- if (vfop->rc)
|
|
|
+ /* Remove all classification configuration for leading queue */
|
|
|
+ if (qid == LEADING_IDX) {
|
|
|
+ rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
|
|
|
+ if (rc)
|
|
|
goto op_err;
|
|
|
- return;
|
|
|
-op_err:
|
|
|
- BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
|
|
|
- vf->abs_vfid, qid, vfop->rc);
|
|
|
|
|
|
- case BNX2X_VFOP_QTEARDOWN_DONE:
|
|
|
- bnx2x_vfop_end(bp, vf, vfop);
|
|
|
- return;
|
|
|
- default:
|
|
|
- bnx2x_vfop_default(state);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd,
|
|
|
- int qid)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
|
|
-
|
|
|
- /* for non leading queues skip directly to qdown sate */
|
|
|
- if (vfop) {
|
|
|
- vfop->args.qx.qid = qid;
|
|
|
- bnx2x_vfop_opset(qid == LEADING_IDX ?
|
|
|
- BNX2X_VFOP_QTEARDOWN_RXMODE :
|
|
|
- BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
|
|
|
- cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
|
|
|
- cmd->block);
|
|
|
+ /* Remove filtering if feasible */
|
|
|
+ if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
|
|
|
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
|
|
|
+ false, false);
|
|
|
+ if (rc)
|
|
|
+ goto op_err;
|
|
|
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
|
|
|
+ false, true);
|
|
|
+ if (rc)
|
|
|
+ goto op_err;
|
|
|
+ rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
|
|
|
+ if (rc)
|
|
|
+ goto op_err;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- return -ENOMEM;
|
|
|
+ /* Destroy queue */
|
|
|
+ rc = bnx2x_vf_queue_destroy(bp, vf, qid);
|
|
|
+ if (rc)
|
|
|
+ goto op_err;
|
|
|
+ return rc;
|
|
|
+op_err:
|
|
|
+ BNX2X_ERR("vf[%d:%d] error: rc %d\n",
|
|
|
+ vf->abs_vfid, qid, rc);
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
/* VF enable primitives
|
|
@@ -1582,120 +891,63 @@ static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
bnx2x_tx_hw_flushed(bp, poll_cnt);
|
|
|
}
|
|
|
|
|
|
-static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
+static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
|
|
|
- struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
|
|
|
- enum bnx2x_vfop_flr_state state = vfop->state;
|
|
|
- struct bnx2x_vfop_cmd cmd = {
|
|
|
- .done = bnx2x_vfop_flr,
|
|
|
- .block = false,
|
|
|
- };
|
|
|
-
|
|
|
- if (vfop->rc < 0)
|
|
|
- goto op_err;
|
|
|
+ int rc, i;
|
|
|
|
|
|
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
|
|
|
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
|
|
|
|
|
|
- switch (state) {
|
|
|
- case BNX2X_VFOP_FLR_QUEUES:
|
|
|
- /* the cleanup operations are valid if and only if the VF
|
|
|
- * was first acquired.
|
|
|
- */
|
|
|
- if (++(qx->qid) < vf_rxq_count(vf)) {
|
|
|
- vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
|
|
|
- qx->qid);
|
|
|
- if (vfop->rc)
|
|
|
- goto op_err;
|
|
|
- return;
|
|
|
- }
|
|
|
- /* remove multicasts */
|
|
|
- vfop->state = BNX2X_VFOP_FLR_HW;
|
|
|
- vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
|
|
|
- 0, true);
|
|
|
- if (vfop->rc)
|
|
|
- goto op_err;
|
|
|
- return;
|
|
|
- case BNX2X_VFOP_FLR_HW:
|
|
|
+ /* the cleanup operations are valid if and only if the VF
|
|
|
+ * was first acquired.
|
|
|
+ */
|
|
|
+ for (i = 0; i < vf_rxq_count(vf); i++) {
|
|
|
+ rc = bnx2x_vf_queue_flr(bp, vf, i);
|
|
|
+ if (rc)
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
- /* dispatch final cleanup and wait for HW queues to flush */
|
|
|
- bnx2x_vf_flr_clnup_hw(bp, vf);
|
|
|
+ /* remove multicasts */
|
|
|
+ bnx2x_vf_mcast(bp, vf, NULL, 0, true);
|
|
|
|
|
|
- /* release VF resources */
|
|
|
- bnx2x_vf_free_resc(bp, vf);
|
|
|
+ /* dispatch final cleanup and wait for HW queues to flush */
|
|
|
+ bnx2x_vf_flr_clnup_hw(bp, vf);
|
|
|
|
|
|
- /* re-open the mailbox */
|
|
|
- bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
|
|
|
+ /* release VF resources */
|
|
|
+ bnx2x_vf_free_resc(bp, vf);
|
|
|
|
|
|
- goto op_done;
|
|
|
- default:
|
|
|
- bnx2x_vfop_default(state);
|
|
|
- }
|
|
|
-op_err:
|
|
|
- BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
|
|
|
-op_done:
|
|
|
- vf->flr_clnup_stage = VF_FLR_ACK;
|
|
|
- bnx2x_vfop_end(bp, vf, vfop);
|
|
|
- bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
|
|
|
-}
|
|
|
-
|
|
|
-static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- vfop_handler_t done)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
|
|
- if (vfop) {
|
|
|
- vfop->args.qx.qid = -1; /* loop */
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
|
|
|
- bnx2x_vfop_flr, done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
|
|
|
- }
|
|
|
- return -ENOMEM;
|
|
|
+ /* re-open the mailbox */
|
|
|
+ bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
|
|
|
+ return;
|
|
|
+out:
|
|
|
+ BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
|
|
|
+ vf->abs_vfid, i, rc);
|
|
|
}
|
|
|
|
|
|
-static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
|
|
|
+static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
|
|
|
{
|
|
|
- int i = prev_vf ? prev_vf->index + 1 : 0;
|
|
|
struct bnx2x_virtf *vf;
|
|
|
+ int i;
|
|
|
|
|
|
- /* find next VF to cleanup */
|
|
|
-next_vf_to_clean:
|
|
|
- for (;
|
|
|
- i < BNX2X_NR_VIRTFN(bp) &&
|
|
|
- (bnx2x_vf(bp, i, state) != VF_RESET ||
|
|
|
- bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
|
|
|
- i++)
|
|
|
- ;
|
|
|
+ for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
|
|
|
+ /* VF should be RESET & in FLR cleanup states */
|
|
|
+ if (bnx2x_vf(bp, i, state) != VF_RESET ||
|
|
|
+ !bnx2x_vf(bp, i, flr_clnup_stage))
|
|
|
+ continue;
|
|
|
|
|
|
- DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i,
|
|
|
- BNX2X_NR_VIRTFN(bp));
|
|
|
+ DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
|
|
|
+ i, BNX2X_NR_VIRTFN(bp));
|
|
|
|
|
|
- if (i < BNX2X_NR_VIRTFN(bp)) {
|
|
|
vf = BP_VF(bp, i);
|
|
|
|
|
|
/* lock the vf pf channel */
|
|
|
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
|
|
|
|
|
|
/* invoke the VF FLR SM */
|
|
|
- if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
|
|
|
- BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
|
|
|
- vf->abs_vfid);
|
|
|
-
|
|
|
- /* mark the VF to be ACKED and continue */
|
|
|
- vf->flr_clnup_stage = VF_FLR_ACK;
|
|
|
- goto next_vf_to_clean;
|
|
|
- }
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- /* we are done, update vf records */
|
|
|
- for_each_vf(bp, i) {
|
|
|
- vf = BP_VF(bp, i);
|
|
|
+ bnx2x_vf_flr(bp, vf);
|
|
|
|
|
|
- if (vf->flr_clnup_stage != VF_FLR_ACK)
|
|
|
- continue;
|
|
|
-
|
|
|
- vf->flr_clnup_stage = VF_FLR_EPILOG;
|
|
|
+ /* mark the VF to be ACKED and continue */
|
|
|
+ vf->flr_clnup_stage = false;
|
|
|
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
|
|
|
}
|
|
|
|
|
|
/* Acknowledge the handled VFs.
|
|
@@ -1745,7 +997,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
|
|
|
if (reset) {
|
|
|
/* set as reset and ready for cleanup */
|
|
|
vf->state = VF_RESET;
|
|
|
- vf->flr_clnup_stage = VF_FLR_CLN;
|
|
|
+ vf->flr_clnup_stage = true;
|
|
|
|
|
|
DP(BNX2X_MSG_IOV,
|
|
|
"Initiating Final cleanup for VF %d\n",
|
|
@@ -1754,7 +1006,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
|
|
|
}
|
|
|
|
|
|
/* do the FLR cleanup for all marked VFs*/
|
|
|
- bnx2x_vf_flr_clnup(bp, NULL);
|
|
|
+ bnx2x_vf_flr_clnup(bp);
|
|
|
}
|
|
|
|
|
|
/* IOV global initialization routines */
|
|
@@ -2021,7 +1273,6 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
|
|
|
bnx2x_vf(bp, i, index) = i;
|
|
|
bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
|
|
|
bnx2x_vf(bp, i, state) = VF_FREE;
|
|
|
- INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
|
|
|
mutex_init(&bnx2x_vf(bp, i, op_mutex));
|
|
|
bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
|
|
|
}
|
|
@@ -2042,6 +1293,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
|
|
|
goto failed;
|
|
|
}
|
|
|
|
|
|
+ /* Prepare the VFs event synchronization mechanism */
|
|
|
+ mutex_init(&bp->vfdb->event_mutex);
|
|
|
+
|
|
|
return 0;
|
|
|
failed:
|
|
|
DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
|
|
@@ -2285,7 +1539,7 @@ int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
|
|
|
|
|
|
/* release all the VFs */
|
|
|
for_each_vf(bp, i)
|
|
|
- bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
|
|
|
+ bnx2x_vf_release(bp, BP_VF(bp, i));
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -2375,6 +1629,12 @@ void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
|
|
|
smp_mb__after_clear_bit();
|
|
|
}
|
|
|
|
|
|
+static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
|
|
|
+ struct bnx2x_virtf *vf)
|
|
|
+{
|
|
|
+ vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
|
|
|
+}
|
|
|
+
|
|
|
int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
|
|
|
{
|
|
|
struct bnx2x_virtf *vf;
|
|
@@ -2399,6 +1659,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
|
|
|
case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
|
|
|
case EVENT_RING_OPCODE_MULTICAST_RULES:
|
|
|
case EVENT_RING_OPCODE_FILTERS_RULES:
|
|
|
+ case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
|
|
|
cid = (elem->message.data.eth_event.echo &
|
|
|
BNX2X_SWCID_MASK);
|
|
|
DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
|
|
@@ -2463,13 +1724,15 @@ get_vf:
|
|
|
vf->abs_vfid, qidx);
|
|
|
bnx2x_vf_handle_filters_eqe(bp, vf);
|
|
|
break;
|
|
|
+ case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
|
|
|
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
|
|
|
+ vf->abs_vfid, qidx);
|
|
|
+ bnx2x_vf_handle_rss_update_eqe(bp, vf);
|
|
|
case EVENT_RING_OPCODE_VF_FLR:
|
|
|
case EVENT_RING_OPCODE_MALICIOUS_VF:
|
|
|
/* Do nothing for now */
|
|
|
return 0;
|
|
|
}
|
|
|
- /* SRIOV: reschedule any 'in_progress' operations */
|
|
|
- bnx2x_iov_sp_event(bp, cid, false);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -2506,23 +1769,6 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
|
|
|
-{
|
|
|
- struct bnx2x_virtf *vf;
|
|
|
-
|
|
|
- /* check if the cid is the VF range */
|
|
|
- if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
|
|
|
- return;
|
|
|
-
|
|
|
- vf = bnx2x_vf_by_cid(bp, vf_cid);
|
|
|
- if (vf) {
|
|
|
- /* set in_progress flag */
|
|
|
- atomic_set(&vf->op_in_progress, 1);
|
|
|
- if (queue_work)
|
|
|
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
|
|
|
{
|
|
|
int i;
|
|
@@ -2604,33 +1850,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
|
|
|
bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
|
|
|
}
|
|
|
|
|
|
-void bnx2x_iov_sp_task(struct bnx2x *bp)
|
|
|
-{
|
|
|
- int i;
|
|
|
-
|
|
|
- if (!IS_SRIOV(bp))
|
|
|
- return;
|
|
|
- /* Iterate over all VFs and invoke state transition for VFs with
|
|
|
- * 'in-progress' slow-path operations
|
|
|
- */
|
|
|
- DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_SP),
|
|
|
- "searching for pending vf operations\n");
|
|
|
- for_each_vf(bp, i) {
|
|
|
- struct bnx2x_virtf *vf = BP_VF(bp, i);
|
|
|
-
|
|
|
- if (!vf) {
|
|
|
- BNX2X_ERR("VF was null! skipping...\n");
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- if (!list_empty(&vf->op_list_head) &&
|
|
|
- atomic_read(&vf->op_in_progress)) {
|
|
|
- DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
|
|
|
- bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
|
|
|
- }
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
static inline
|
|
|
struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
|
|
|
{
|
|
@@ -2866,52 +2085,26 @@ static void bnx2x_set_vf_state(void *cookie)
|
|
|
p->vf->state = p->state;
|
|
|
}
|
|
|
|
|
|
-/* VFOP close (teardown the queues, delete mcasts and close HW) */
|
|
|
-static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
+int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
|
|
|
- struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
|
|
|
- enum bnx2x_vfop_close_state state = vfop->state;
|
|
|
- struct bnx2x_vfop_cmd cmd = {
|
|
|
- .done = bnx2x_vfop_close,
|
|
|
- .block = false,
|
|
|
- };
|
|
|
-
|
|
|
- if (vfop->rc < 0)
|
|
|
- goto op_err;
|
|
|
-
|
|
|
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
|
|
|
-
|
|
|
- switch (state) {
|
|
|
- case BNX2X_VFOP_CLOSE_QUEUES:
|
|
|
-
|
|
|
- if (++(qx->qid) < vf_rxq_count(vf)) {
|
|
|
- vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
|
|
|
- if (vfop->rc)
|
|
|
- goto op_err;
|
|
|
- return;
|
|
|
- }
|
|
|
- vfop->state = BNX2X_VFOP_CLOSE_HW;
|
|
|
- vfop->rc = 0;
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
|
|
|
+ int rc = 0, i;
|
|
|
|
|
|
- case BNX2X_VFOP_CLOSE_HW:
|
|
|
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
|
|
|
|
|
|
- /* disable the interrupts */
|
|
|
- DP(BNX2X_MSG_IOV, "disabling igu\n");
|
|
|
- bnx2x_vf_igu_disable(bp, vf);
|
|
|
+ /* Close all queues */
|
|
|
+ for (i = 0; i < vf_rxq_count(vf); i++) {
|
|
|
+ rc = bnx2x_vf_queue_teardown(bp, vf, i);
|
|
|
+ if (rc)
|
|
|
+ goto op_err;
|
|
|
+ }
|
|
|
|
|
|
- /* disable the VF */
|
|
|
- DP(BNX2X_MSG_IOV, "clearing qtbl\n");
|
|
|
- bnx2x_vf_clr_qtbl(bp, vf);
|
|
|
+ /* disable the interrupts */
|
|
|
+ DP(BNX2X_MSG_IOV, "disabling igu\n");
|
|
|
+ bnx2x_vf_igu_disable(bp, vf);
|
|
|
|
|
|
- goto op_done;
|
|
|
- default:
|
|
|
- bnx2x_vfop_default(state);
|
|
|
- }
|
|
|
-op_err:
|
|
|
- BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
|
|
|
-op_done:
|
|
|
+ /* disable the VF */
|
|
|
+ DP(BNX2X_MSG_IOV, "clearing qtbl\n");
|
|
|
+ bnx2x_vf_clr_qtbl(bp, vf);
|
|
|
|
|
|
/* need to make sure there are no outstanding stats ramrods which may
|
|
|
* cause the device to access the VF's stats buffer which it will free
|
|
@@ -2926,43 +2119,20 @@ op_done:
|
|
|
}
|
|
|
|
|
|
DP(BNX2X_MSG_IOV, "set state to acquired\n");
|
|
|
- bnx2x_vfop_end(bp, vf, vfop);
|
|
|
-op_pending:
|
|
|
- /* Not supported at the moment; Exists for macros only */
|
|
|
- return;
|
|
|
-}
|
|
|
|
|
|
-int bnx2x_vfop_close_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
|
|
- if (vfop) {
|
|
|
- vfop->args.qx.qid = -1; /* loop */
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
|
|
|
- bnx2x_vfop_close, cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
|
|
|
- cmd->block);
|
|
|
- }
|
|
|
- return -ENOMEM;
|
|
|
+ return 0;
|
|
|
+op_err:
|
|
|
+ BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
/* VF release can be called either: 1. The VF was acquired but
|
|
|
* not enabled 2. the vf was enabled or in the process of being
|
|
|
* enabled
|
|
|
*/
|
|
|
-static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
+int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
|
|
|
- struct bnx2x_vfop_cmd cmd = {
|
|
|
- .done = bnx2x_vfop_release,
|
|
|
- .block = false,
|
|
|
- };
|
|
|
-
|
|
|
- DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
|
|
|
-
|
|
|
- if (vfop->rc < 0)
|
|
|
- goto op_err;
|
|
|
+ int rc;
|
|
|
|
|
|
DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
|
|
|
vf->state == VF_FREE ? "Free" :
|
|
@@ -2973,193 +2143,87 @@ static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
|
|
|
switch (vf->state) {
|
|
|
case VF_ENABLED:
|
|
|
- vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
|
|
|
- if (vfop->rc)
|
|
|
+ rc = bnx2x_vf_close(bp, vf);
|
|
|
+ if (rc)
|
|
|
goto op_err;
|
|
|
- return;
|
|
|
-
|
|
|
+ /* Fallthrough to release resources */
|
|
|
case VF_ACQUIRED:
|
|
|
DP(BNX2X_MSG_IOV, "about to free resources\n");
|
|
|
bnx2x_vf_free_resc(bp, vf);
|
|
|
- DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
|
|
|
- goto op_done;
|
|
|
+ break;
|
|
|
|
|
|
case VF_FREE:
|
|
|
case VF_RESET:
|
|
|
- /* do nothing */
|
|
|
- goto op_done;
|
|
|
default:
|
|
|
- bnx2x_vfop_default(vf->state);
|
|
|
- }
|
|
|
-op_err:
|
|
|
- BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
|
|
|
-op_done:
|
|
|
- bnx2x_vfop_end(bp, vf, vfop);
|
|
|
-}
|
|
|
-
|
|
|
-static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
|
|
|
- enum bnx2x_vfop_rss_state state;
|
|
|
-
|
|
|
- if (!vfop) {
|
|
|
- BNX2X_ERR("vfop was null\n");
|
|
|
- return;
|
|
|
+ break;
|
|
|
}
|
|
|
-
|
|
|
- state = vfop->state;
|
|
|
- bnx2x_vfop_reset_wq(vf);
|
|
|
-
|
|
|
- if (vfop->rc < 0)
|
|
|
- goto op_err;
|
|
|
-
|
|
|
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
|
|
|
-
|
|
|
- switch (state) {
|
|
|
- case BNX2X_VFOP_RSS_CONFIG:
|
|
|
- /* next state */
|
|
|
- vfop->state = BNX2X_VFOP_RSS_DONE;
|
|
|
- bnx2x_config_rss(bp, &vfop->op_p->rss);
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
|
|
|
+ return 0;
|
|
|
op_err:
|
|
|
- BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
|
|
|
-op_done:
|
|
|
- case BNX2X_VFOP_RSS_DONE:
|
|
|
- bnx2x_vfop_end(bp, vf, vfop);
|
|
|
- return;
|
|
|
- default:
|
|
|
- bnx2x_vfop_default(state);
|
|
|
- }
|
|
|
-op_pending:
|
|
|
- return;
|
|
|
-}
|
|
|
-
|
|
|
-int bnx2x_vfop_release_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
|
|
- if (vfop) {
|
|
|
- bnx2x_vfop_opset(-1, /* use vf->state */
|
|
|
- bnx2x_vfop_release, cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
|
|
|
- cmd->block);
|
|
|
- }
|
|
|
- return -ENOMEM;
|
|
|
+ BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
-int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd)
|
|
|
+int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|
|
+ struct bnx2x_config_rss_params *rss)
|
|
|
{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
|
|
-
|
|
|
- if (vfop) {
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
|
|
|
- cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
|
|
|
- cmd->block);
|
|
|
- }
|
|
|
- return -ENOMEM;
|
|
|
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
|
|
|
+ set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
|
|
|
+ return bnx2x_config_rss(bp, rss);
|
|
|
}
|
|
|
|
|
|
-/* VFOP tpa update, send update on all queues */
|
|
|
-static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
+int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|
|
+ struct vfpf_tpa_tlv *tlv,
|
|
|
+ struct bnx2x_queue_update_tpa_params *params)
|
|
|
{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
|
|
|
- struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa;
|
|
|
- enum bnx2x_vfop_tpa_state state = vfop->state;
|
|
|
-
|
|
|
- bnx2x_vfop_reset_wq(vf);
|
|
|
-
|
|
|
- if (vfop->rc < 0)
|
|
|
- goto op_err;
|
|
|
+ aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
|
|
|
+ struct bnx2x_queue_state_params qstate;
|
|
|
+ int qid, rc = 0;
|
|
|
|
|
|
- DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n",
|
|
|
- vf->abs_vfid, tpa_args->qid,
|
|
|
- state);
|
|
|
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
|
|
|
|
|
|
- switch (state) {
|
|
|
- case BNX2X_VFOP_TPA_CONFIG:
|
|
|
+ /* Set ramrod params */
|
|
|
+ memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
|
|
|
+ memcpy(&qstate.params.update_tpa, params,
|
|
|
+ sizeof(struct bnx2x_queue_update_tpa_params));
|
|
|
+ qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
|
|
|
+ set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
|
|
|
|
|
|
- if (tpa_args->qid < vf_rxq_count(vf)) {
|
|
|
- struct bnx2x_queue_state_params *qstate =
|
|
|
- &vf->op_params.qstate;
|
|
|
-
|
|
|
- qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj);
|
|
|
-
|
|
|
- /* The only thing that changes for the ramrod params
|
|
|
- * between calls is the sge_map
|
|
|
- */
|
|
|
- qstate->params.update_tpa.sge_map =
|
|
|
- tpa_args->sge_map[tpa_args->qid];
|
|
|
-
|
|
|
- DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n",
|
|
|
- tpa_args->qid,
|
|
|
- U64_HI(qstate->params.update_tpa.sge_map),
|
|
|
- U64_LO(qstate->params.update_tpa.sge_map));
|
|
|
- qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA;
|
|
|
- vfop->rc = bnx2x_queue_state_change(bp, qstate);
|
|
|
-
|
|
|
- tpa_args->qid++;
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
|
|
|
+ for (qid = 0; qid < vf_rxq_count(vf); qid++) {
|
|
|
+ qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
|
|
|
+ qstate.params.update_tpa.sge_map = sge_addr[qid];
|
|
|
+ DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
|
|
|
+ vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
|
|
|
+ U64_LO(sge_addr[qid]));
|
|
|
+ rc = bnx2x_queue_state_change(bp, &qstate);
|
|
|
+ if (rc) {
|
|
|
+ BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
|
|
|
+ U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
|
|
|
+ vf->abs_vfid, qid);
|
|
|
+ return rc;
|
|
|
}
|
|
|
- vfop->state = BNX2X_VFOP_TPA_DONE;
|
|
|
- vfop->rc = 0;
|
|
|
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
|
|
|
-op_err:
|
|
|
- BNX2X_ERR("TPA update error: rc %d\n", vfop->rc);
|
|
|
-op_done:
|
|
|
- case BNX2X_VFOP_TPA_DONE:
|
|
|
- bnx2x_vfop_end(bp, vf, vfop);
|
|
|
- return;
|
|
|
- default:
|
|
|
- bnx2x_vfop_default(state);
|
|
|
}
|
|
|
-op_pending:
|
|
|
- return;
|
|
|
-}
|
|
|
|
|
|
-int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
|
|
|
- struct bnx2x_virtf *vf,
|
|
|
- struct bnx2x_vfop_cmd *cmd,
|
|
|
- struct vfpf_tpa_tlv *tpa_tlv)
|
|
|
-{
|
|
|
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
|
|
-
|
|
|
- if (vfop) {
|
|
|
- vfop->args.qx.qid = 0; /* loop */
|
|
|
- memcpy(&vfop->args.tpa.sge_map,
|
|
|
- tpa_tlv->tpa_client_info.sge_addr,
|
|
|
- sizeof(vfop->args.tpa.sge_map));
|
|
|
- bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG,
|
|
|
- bnx2x_vfop_tpa, cmd->done);
|
|
|
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa,
|
|
|
- cmd->block);
|
|
|
- }
|
|
|
- return -ENOMEM;
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
/* VF release ~ VF close + VF release-resources
|
|
|
* Release is the ultimate SW shutdown and is called whenever an
|
|
|
* irrecoverable error is encountered.
|
|
|
*/
|
|
|
-void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
|
|
|
+int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
{
|
|
|
- struct bnx2x_vfop_cmd cmd = {
|
|
|
- .done = NULL,
|
|
|
- .block = block,
|
|
|
- };
|
|
|
int rc;
|
|
|
|
|
|
DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
|
|
|
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
|
|
|
|
|
|
- rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
|
|
|
+ rc = bnx2x_vf_free(bp, vf);
|
|
|
if (rc)
|
|
|
WARN(rc,
|
|
|
"VF[%d] Failed to allocate resources for release op- rc=%d\n",
|
|
|
vf->abs_vfid, rc);
|
|
|
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
|
|
@@ -3875,3 +2939,28 @@ void bnx2x_iov_channel_down(struct bnx2x *bp)
|
|
|
bnx2x_post_vf_bulletin(bp, vf_idx);
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+void bnx2x_iov_task(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
|
|
|
+
|
|
|
+ if (!netif_running(bp->dev))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
|
|
|
+ &bp->iov_task_state))
|
|
|
+ bnx2x_vf_handle_flr_event(bp);
|
|
|
+
|
|
|
+ if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
|
|
|
+ &bp->iov_task_state))
|
|
|
+ bnx2x_vf_mbx(bp);
|
|
|
+}
|
|
|
+
|
|
|
+void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
|
|
|
+{
|
|
|
+ smp_mb__before_clear_bit();
|
|
|
+ set_bit(flag, &bp->iov_task_state);
|
|
|
+ smp_mb__after_clear_bit();
|
|
|
+ DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
|
|
|
+ queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
|
|
|
+}
|