|
@@ -126,7 +126,12 @@ int octeon_init_instr_queue(struct octeon_device *oct,
|
|
|
|
|
|
/* Initialize the spinlock for this instruction queue */
|
|
|
spin_lock_init(&iq->lock);
|
|
|
- spin_lock_init(&iq->post_lock);
|
|
|
+ if (iq_no == 0) {
|
|
|
+ iq->allow_soft_cmds = true;
|
|
|
+ spin_lock_init(&iq->post_lock);
|
|
|
+ } else {
|
|
|
+ iq->allow_soft_cmds = false;
|
|
|
+ }
|
|
|
|
|
|
spin_lock_init(&iq->iq_flush_running_lock);
|
|
|
|
|
@@ -566,7 +571,8 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
|
|
|
/* Get the lock and prevent other tasks and tx interrupt handler from
|
|
|
* running.
|
|
|
*/
|
|
|
- spin_lock_bh(&iq->post_lock);
|
|
|
+ if (iq->allow_soft_cmds)
|
|
|
+ spin_lock_bh(&iq->post_lock);
|
|
|
|
|
|
st = __post_command2(iq, cmd);
|
|
|
|
|
@@ -583,7 +589,8 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
|
|
|
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
|
|
|
}
|
|
|
|
|
|
- spin_unlock_bh(&iq->post_lock);
|
|
|
+ if (iq->allow_soft_cmds)
|
|
|
+ spin_unlock_bh(&iq->post_lock);
|
|
|
|
|
|
/* This is only done here to expedite packets being flushed
|
|
|
* for cases where there are no IQ completion interrupts.
|
|
@@ -702,11 +709,20 @@ octeon_prepare_soft_command(struct octeon_device *oct,
|
|
|
int octeon_send_soft_command(struct octeon_device *oct,
|
|
|
struct octeon_soft_command *sc)
|
|
|
{
|
|
|
+ struct octeon_instr_queue *iq;
|
|
|
struct octeon_instr_ih2 *ih2;
|
|
|
struct octeon_instr_ih3 *ih3;
|
|
|
struct octeon_instr_irh *irh;
|
|
|
u32 len;
|
|
|
|
|
|
+ iq = oct->instr_queue[sc->iq_no];
|
|
|
+ if (!iq->allow_soft_cmds) {
|
|
|
+ dev_err(&oct->pci_dev->dev, "Soft commands are not allowed on Queue %d\n",
|
|
|
+ sc->iq_no);
|
|
|
+ INCR_INSTRQUEUE_PKT_COUNT(oct, sc->iq_no, instr_dropped, 1);
|
|
|
+ return IQ_SEND_FAILED;
|
|
|
+ }
|
|
|
+
|
|
|
if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
|
|
|
ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
|
|
|
if (ih3->dlengsz) {
|