Sfoglia il codice sorgente

Merge branch 'liquidio-VF'

Raghu Vatsavayi says:

====================
liquidio VF operations

This patchseries adds support for VF device specific operations
like mailbox, queues and register access. This V3 patchset also
has changes based on comments form earlier versions:

1) Removed extra 'void *' casting.
2) Fixed all cross compilations issues reported on S390 and Powerpc
   architectures.

Please apply the patches in following order as these patches depend
on each other.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 8 anni fa
parent
commit
492e9d8c18

+ 12 - 0
drivers/net/ethernet/cavium/Kconfig

@@ -74,4 +74,16 @@ config OCTEON_MGMT_ETHERNET
 	  port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX,
 	  CN54XX, CN52XX, and CN6XXX chips.
 
+config LIQUIDIO_VF
+	tristate "Cavium LiquidIO VF support"
+	depends on 64BIT && PCI_MSI
+	select PTP_1588_CLOCK
+	---help---
+	  This driver supports Cavium LiquidIO Intelligent Server Adapter
+	  based on CN23XX chips.
+
+	  To compile this driver as a module, choose M here: The module
+	  will be called liquidio_vf. MSI-X interrupt support is required
+	  for this driver to work correctly
+
 endif # NET_VENDOR_CAVIUM

+ 23 - 0
drivers/net/ethernet/cavium/liquidio/Makefile

@@ -11,9 +11,32 @@ liquidio-$(CONFIG_LIQUIDIO) += lio_ethtool.o \
 			cn66xx_device.o    \
 			cn68xx_device.o    \
 			cn23xx_pf_device.o \
+			cn23xx_vf_device.o \
 			octeon_mailbox.o   \
 			octeon_mem_ops.o   \
 			octeon_droq.o      \
 			octeon_nic.o
 
 liquidio-objs := lio_main.o octeon_console.o $(liquidio-y)
+
+obj-$(CONFIG_LIQUIDIO_VF) += liquidio_vf.o
+
+ifeq ($(CONFIG_LIQUIDIO)$(CONFIG_LIQUIDIO_VF), yy)
+	liquidio_vf-objs := lio_vf_main.o
+else
+liquidio_vf-$(CONFIG_LIQUIDIO_VF) += lio_ethtool.o \
+			lio_core.o         \
+			request_manager.o  \
+			response_manager.o \
+			octeon_device.o    \
+			cn66xx_device.o    \
+			cn68xx_device.o    \
+			cn23xx_pf_device.o \
+			cn23xx_vf_device.o \
+			octeon_mailbox.o   \
+			octeon_mem_ops.o   \
+			octeon_droq.o      \
+			octeon_nic.o
+
+liquidio_vf-objs := lio_vf_main.o $(liquidio_vf-y)
+endif

+ 701 - 0
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c

@@ -0,0 +1,701 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ *          Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "cn23xx_vf_device.h"
+#include "octeon_main.h"
+#include "octeon_mailbox.h"
+
+u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
+{
+	/* This gives the SLI clock per microsec */
+	u32 oqticks_per_us = (u32)oct->pfvf_hsword.coproc_tics_per_us;
+
+	/* This gives the clock cycles per millisecond */
+	oqticks_per_us *= 1000;
+
+	/* This gives the oq ticks (1024 core clock cycles) per millisecond */
+	oqticks_per_us /= 1024;
+
+	/* time_intr is in microseconds. The next 2 steps gives the oq ticks
+	 * corressponding to time_intr.
+	 */
+	oqticks_per_us *= time_intr_in_us;
+	oqticks_per_us /= 1000;
+
+	return oqticks_per_us;
+}
+
+static int cn23xx_vf_reset_io_queues(struct octeon_device *oct, u32 num_queues)
+{
+	u32 loop = BUSY_READING_REG_VF_LOOP_COUNT;
+	int ret_val = 0;
+	u32 q_no;
+	u64 d64;
+
+	for (q_no = 0; q_no < num_queues; q_no++) {
+		/* set RST bit to 1. This bit applies to both IQ and OQ */
+		d64 = octeon_read_csr64(oct,
+					CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
+		d64 |= CN23XX_PKT_INPUT_CTL_RST;
+		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
+				   d64);
+	}
+
+	/* wait until the RST bit is clear or the RST and QUIET bits are set */
+	for (q_no = 0; q_no < num_queues; q_no++) {
+		u64 reg_val = octeon_read_csr64(oct,
+					CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
+		while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
+		       !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
+		       loop) {
+			WRITE_ONCE(reg_val, octeon_read_csr64(
+			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)));
+			loop--;
+		}
+		if (!loop) {
+			dev_err(&oct->pci_dev->dev,
+				"clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+				q_no);
+			return -1;
+		}
+		WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
+			   ~CN23XX_PKT_INPUT_CTL_RST);
+		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
+				   READ_ONCE(reg_val));
+
+		WRITE_ONCE(reg_val, octeon_read_csr64(
+		    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)));
+		if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
+			dev_err(&oct->pci_dev->dev,
+				"clearing the reset failed for qno: %u\n",
+				q_no);
+			ret_val = -1;
+		}
+	}
+
+	return ret_val;
+}
+
+static int cn23xx_vf_setup_global_input_regs(struct octeon_device *oct)
+{
+	struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
+	struct octeon_instr_queue *iq;
+	u64 q_no, intr_threshold;
+	u64 d64;
+
+	if (cn23xx_vf_reset_io_queues(oct, oct->sriov_info.rings_per_vf))
+		return -1;
+
+	for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) {
+		void __iomem *inst_cnt_reg;
+
+		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_DOORBELL(q_no),
+				   0xFFFFFFFF);
+		iq = oct->instr_queue[q_no];
+
+		if (iq)
+			inst_cnt_reg = iq->inst_cnt_reg;
+		else
+			inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
+				       CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no);
+
+		d64 = octeon_read_csr64(oct,
+					CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no));
+
+		d64 &= 0xEFFFFFFFFFFFFFFFL;
+
+		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
+				   d64);
+
+		/* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+		 * the Input Queues
+		 */
+		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
+				   CN23XX_PKT_INPUT_CTL_MASK);
+
+		/* set the wmark level to trigger PI_INT */
+		intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
+				 CN23XX_PKT_IN_DONE_WMARK_MASK;
+
+		writeq((readq(inst_cnt_reg) &
+			~(CN23XX_PKT_IN_DONE_WMARK_MASK <<
+			  CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
+		       (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS),
+		       inst_cnt_reg);
+	}
+	return 0;
+}
+
+static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct)
+{
+	u32 reg_val;
+	u32 q_no;
+
+	for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) {
+		octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKTS_CREDIT(q_no),
+				 0xFFFFFFFF);
+
+		reg_val =
+		    octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKTS_SENT(q_no));
+
+		reg_val &= 0xEFFFFFFFFFFFFFFFL;
+
+		reg_val =
+		    octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
+
+		/* set IPTR & DPTR */
+		reg_val |=
+		    (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
+
+		/* reset BMODE */
+		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
+
+		/* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+		 * for Output Queue ScatterList reset ROR_P, NSR_P
+		 */
+		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
+		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
+#else
+		reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
+#endif
+		/* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+		 * for Output Queue Data reset ROR, NSR
+		 */
+		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
+		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
+		/* set the ES bit */
+		reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
+
+		/* write all the selected settings */
+		octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no),
+				 reg_val);
+	}
+}
+
+static int cn23xx_setup_vf_device_regs(struct octeon_device *oct)
+{
+	if (cn23xx_vf_setup_global_input_regs(oct))
+		return -1;
+
+	cn23xx_vf_setup_global_output_regs(oct);
+
+	return 0;
+}
+
+static void cn23xx_setup_vf_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+	struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+	u64 pkt_in_done;
+
+	/* Write the start of the input queue's ring and its size */
+	octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(iq_no),
+			   iq->base_addr_dma);
+	octeon_write_csr(oct, CN23XX_VF_SLI_IQ_SIZE(iq_no), iq->max_count);
+
+	/* Remember the doorbell & instruction count register addr
+	 * for this queue
+	 */
+	iq->doorbell_reg =
+	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_DOORBELL(iq_no);
+	iq->inst_cnt_reg =
+	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq_no);
+	dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+		iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+	/* Store the current instruction counter (used in flush_iq
+	 * calculation)
+	 */
+	pkt_in_done = readq(iq->inst_cnt_reg);
+
+	if (oct->msix_on) {
+		/* Set CINT_ENB to enable IQ interrupt */
+		writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
+		       iq->inst_cnt_reg);
+	}
+	iq->reset_instr_cnt = 0;
+}
+
+static void cn23xx_setup_vf_oq_regs(struct octeon_device *oct, u32 oq_no)
+{
+	struct octeon_droq *droq = oct->droq[oq_no];
+
+	octeon_write_csr64(oct, CN23XX_VF_SLI_OQ_BASE_ADDR64(oq_no),
+			   droq->desc_ring_dma);
+	octeon_write_csr(oct, CN23XX_VF_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+	octeon_write_csr(oct, CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+			 (droq->buffer_size | (OCT_RH_SIZE << 16)));
+
+	/* Get the mapped address of the pkt_sent and pkts_credit regs */
+	droq->pkts_sent_reg =
+	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_SENT(oq_no);
+	droq->pkts_credit_reg =
+	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_CREDIT(oq_no);
+}
+
+static void cn23xx_vf_mbox_thread(struct work_struct *work)
+{
+	struct cavium_wk *wk = (struct cavium_wk *)work;
+	struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr;
+
+	octeon_mbox_process_message(mbox);
+}
+
+static int cn23xx_free_vf_mbox(struct octeon_device *oct)
+{
+	cancel_delayed_work_sync(&oct->mbox[0]->mbox_poll_wk.work);
+	vfree(oct->mbox[0]);
+	return 0;
+}
+
+static int cn23xx_setup_vf_mbox(struct octeon_device *oct)
+{
+	struct octeon_mbox *mbox = NULL;
+
+	mbox = vmalloc(sizeof(*mbox));
+	if (!mbox)
+		return 1;
+
+	memset(mbox, 0, sizeof(struct octeon_mbox));
+
+	spin_lock_init(&mbox->lock);
+
+	mbox->oct_dev = oct;
+
+	mbox->q_no = 0;
+
+	mbox->state = OCTEON_MBOX_STATE_IDLE;
+
+	/* VF mbox interrupt reg */
+	mbox->mbox_int_reg =
+	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_PKT_MBOX_INT(0);
+	/* VF reads from SIG0 reg */
+	mbox->mbox_read_reg =
+	    (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0);
+	/* VF writes into SIG1 reg */
+	mbox->mbox_write_reg =
+	    (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1);
+
+	INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
+			  cn23xx_vf_mbox_thread);
+
+	mbox->mbox_poll_wk.ctxptr = mbox;
+
+	oct->mbox[0] = mbox;
+
+	writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+
+	return 0;
+}
+
+static int cn23xx_enable_vf_io_queues(struct octeon_device *oct)
+{
+	u32 q_no;
+
+	for (q_no = 0; q_no < oct->num_iqs; q_no++) {
+		u64 reg_val;
+
+		/* set the corresponding IQ IS_64B bit */
+		if (oct->io_qmask.iq64B & BIT_ULL(q_no)) {
+			reg_val = octeon_read_csr64(
+			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
+			reg_val |= CN23XX_PKT_INPUT_CTL_IS_64B;
+			octeon_write_csr64(
+			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+		}
+
+		/* set the corresponding IQ ENB bit */
+		if (oct->io_qmask.iq & BIT_ULL(q_no)) {
+			reg_val = octeon_read_csr64(
+			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
+			reg_val |= CN23XX_PKT_INPUT_CTL_RING_ENB;
+			octeon_write_csr64(
+			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+		}
+	}
+	for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+		u32 reg_val;
+
+		/* set the corresponding OQ ENB bit */
+		if (oct->io_qmask.oq & BIT_ULL(q_no)) {
+			reg_val = octeon_read_csr(
+			    oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
+			reg_val |= CN23XX_PKT_OUTPUT_CTL_RING_ENB;
+			octeon_write_csr(
+			    oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no), reg_val);
+		}
+	}
+
+	return 0;
+}
+
+static void cn23xx_disable_vf_io_queues(struct octeon_device *oct)
+{
+	u32 num_queues = oct->num_iqs;
+
+	/* per HRM, rings can only be disabled via reset operation,
+	 * NOT via SLI_PKT()_INPUT/OUTPUT_CONTROL[ENB]
+	 */
+	if (num_queues < oct->num_oqs)
+		num_queues = oct->num_oqs;
+
+	cn23xx_vf_reset_io_queues(oct, num_queues);
+}
+
+void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct)
+{
+	struct octeon_mbox_cmd mbox_cmd;
+
+	mbox_cmd.msg.u64 = 0;
+	mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
+	mbox_cmd.msg.s.resp_needed = 0;
+	mbox_cmd.msg.s.cmd = OCTEON_VF_FLR_REQUEST;
+	mbox_cmd.msg.s.len = 1;
+	mbox_cmd.q_no = 0;
+	mbox_cmd.recv_len = 0;
+	mbox_cmd.recv_status = 0;
+	mbox_cmd.fn = NULL;
+	mbox_cmd.fn_arg = 0;
+
+	octeon_mbox_write(oct, &mbox_cmd);
+}
+
+static void octeon_pfvf_hs_callback(struct octeon_device *oct,
+				    struct octeon_mbox_cmd *cmd,
+				    void *arg)
+{
+	u32 major = 0;
+
+	memcpy((uint8_t *)&oct->pfvf_hsword, cmd->msg.s.params,
+	       CN23XX_MAILBOX_MSGPARAM_SIZE);
+	if (cmd->recv_len > 1)  {
+		major = ((struct lio_version *)(cmd->data))->major;
+		major = major << 16;
+	}
+
+	atomic_set((atomic_t *)arg, major | 1);
+}
+
+int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct)
+{
+	struct octeon_mbox_cmd mbox_cmd;
+	u32 q_no, count = 0;
+	atomic_t status;
+	u32 pfmajor;
+	u32 vfmajor;
+	u32 ret;
+
+	/* Sending VF_ACTIVE indication to the PF driver */
+	dev_dbg(&oct->pci_dev->dev, "requesting info from pf\n");
+
+	mbox_cmd.msg.u64 = 0;
+	mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
+	mbox_cmd.msg.s.resp_needed = 1;
+	mbox_cmd.msg.s.cmd = OCTEON_VF_ACTIVE;
+	mbox_cmd.msg.s.len = 2;
+	mbox_cmd.data[0] = 0;
+	((struct lio_version *)&mbox_cmd.data[0])->major =
+						LIQUIDIO_BASE_MAJOR_VERSION;
+	((struct lio_version *)&mbox_cmd.data[0])->minor =
+						LIQUIDIO_BASE_MINOR_VERSION;
+	((struct lio_version *)&mbox_cmd.data[0])->micro =
+						LIQUIDIO_BASE_MICRO_VERSION;
+	mbox_cmd.q_no = 0;
+	mbox_cmd.recv_len = 0;
+	mbox_cmd.recv_status = 0;
+	mbox_cmd.fn = (octeon_mbox_callback_t)octeon_pfvf_hs_callback;
+	mbox_cmd.fn_arg = &status;
+
+	/* Interrupts are not enabled at this point.
+	 * Enable them with default oq ticks
+	 */
+	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
+
+	octeon_mbox_write(oct, &mbox_cmd);
+
+	atomic_set(&status, 0);
+
+	do {
+		schedule_timeout_uninterruptible(1);
+	} while ((!atomic_read(&status)) && (count++ < 100000));
+
+	/* Disable the interrupt so that the interrupsts will be reenabled
+	 * with the oq ticks received from the PF
+	 */
+	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+	ret = atomic_read(&status);
+	if (!ret) {
+		dev_err(&oct->pci_dev->dev, "octeon_pfvf_handshake timeout\n");
+		return 1;
+	}
+
+	for (q_no = 0 ; q_no < oct->num_iqs ; q_no++)
+		oct->instr_queue[q_no]->txpciq.s.pkind = oct->pfvf_hsword.pkind;
+
+	vfmajor = LIQUIDIO_BASE_MAJOR_VERSION;
+	pfmajor = ret >> 16;
+	if (pfmajor != vfmajor) {
+		dev_err(&oct->pci_dev->dev,
+			"VF Liquidio driver (major version %d) is not compatible with Liquidio PF driver (major version %d)\n",
+			vfmajor, pfmajor);
+		return 1;
+	}
+
+	dev_dbg(&oct->pci_dev->dev,
+		"VF Liquidio driver (major version %d), Liquidio PF driver (major version %d)\n",
+		vfmajor, pfmajor);
+
+	dev_dbg(&oct->pci_dev->dev, "got data from pf pkind is %d\n",
+		oct->pfvf_hsword.pkind);
+
+	return 0;
+}
+
+static void cn23xx_handle_vf_mbox_intr(struct octeon_ioq_vector *ioq_vector)
+{
+	struct octeon_device *oct = ioq_vector->oct_dev;
+	u64 mbox_int_val;
+
+	if (!ioq_vector->droq_index) {
+		/* read and clear by writing 1 */
+		mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
+		writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg);
+		if (octeon_mbox_read(oct->mbox[0]))
+			schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work,
+					      msecs_to_jiffies(0));
+	}
+}
+
+static u64 cn23xx_vf_msix_interrupt_handler(void *dev)
+{
+	struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+	struct octeon_device *oct = ioq_vector->oct_dev;
+	struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+	u64 pkts_sent;
+	u64 ret = 0;
+
+	dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
+	pkts_sent = readq(droq->pkts_sent_reg);
+
+	/* If our device has interrupted, then proceed. Also check
+	 * for all f's if interrupt was triggered on an error
+	 * and the PCI read fails.
+	 */
+	if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
+		return ret;
+
+	/* Write count reg in sli_pkt_cnts to clear these int. */
+	if ((pkts_sent & CN23XX_INTR_PO_INT) ||
+	    (pkts_sent & CN23XX_INTR_PI_INT)) {
+		if (pkts_sent & CN23XX_INTR_PO_INT)
+			ret |= MSIX_PO_INT;
+	}
+
+	if (pkts_sent & CN23XX_INTR_PI_INT)
+		/* We will clear the count when we update the read_index. */
+		ret |= MSIX_PI_INT;
+
+	if (pkts_sent & CN23XX_INTR_MBOX_INT) {
+		cn23xx_handle_vf_mbox_intr(ioq_vector);
+		ret |= MSIX_MBOX_INT;
+	}
+
+	return ret;
+}
+
+static void cn23xx_enable_vf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+	struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
+	u32 q_no, time_threshold;
+
+	if (intr_flag & OCTEON_OUTPUT_INTR) {
+		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+			/* Set up interrupt packet and time thresholds
+			 * for all the OQs
+			 */
+			time_threshold = cn23xx_vf_get_oq_ticks(
+				oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
+
+			octeon_write_csr64(
+			    oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
+			    (CFG_GET_OQ_INTR_PKT(cn23xx->conf) |
+			     ((u64)time_threshold << 32)));
+		}
+	}
+
+	if (intr_flag & OCTEON_INPUT_INTR) {
+		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+			/* Set CINT_ENB to enable IQ interrupt */
+			octeon_write_csr64(
+			    oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
+			    ((octeon_read_csr64(
+				  oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) &
+			      ~CN23XX_PKT_IN_DONE_CNT_MASK) |
+			     CN23XX_INTR_CINT_ENB));
+		}
+	}
+
+	/* Set queue-0 MBOX_ENB to enable VF mailbox interrupt */
+	if (intr_flag & OCTEON_MBOX_INTR) {
+		octeon_write_csr64(
+		    oct, CN23XX_VF_SLI_PKT_MBOX_INT(0),
+		    (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) |
+		     CN23XX_INTR_MBOX_ENB));
+	}
+}
+
+static void cn23xx_disable_vf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+	u32 q_no;
+
+	if (intr_flag & OCTEON_OUTPUT_INTR) {
+		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+			/* Write all 1's in INT_LEVEL reg to disable PO_INT */
+			octeon_write_csr64(
+			    oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
+			    0x3fffffffffffff);
+		}
+	}
+	if (intr_flag & OCTEON_INPUT_INTR) {
+		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+			octeon_write_csr64(
+			    oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
+			    (octeon_read_csr64(
+				 oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) &
+			     ~(CN23XX_INTR_CINT_ENB |
+			       CN23XX_PKT_IN_DONE_CNT_MASK)));
+		}
+	}
+
+	if (intr_flag & OCTEON_MBOX_INTR) {
+		octeon_write_csr64(
+		    oct, CN23XX_VF_SLI_PKT_MBOX_INT(0),
+		    (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) &
+		     ~CN23XX_INTR_MBOX_ENB));
+	}
+}
+
+int cn23xx_setup_octeon_vf_device(struct octeon_device *oct)
+{
+	struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
+	u32 rings_per_vf, ring_flag;
+	u64 reg_val;
+
+	if (octeon_map_pci_barx(oct, 0, 0))
+		return 1;
+
+	/* INPUT_CONTROL[RPVF] gives the VF IOq count */
+	reg_val = octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(0));
+
+	oct->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
+		      CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
+	oct->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) &
+		      CN23XX_PKT_INPUT_CTL_VF_NUM_MASK;
+
+	reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
+
+	rings_per_vf = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
+
+	ring_flag = 0;
+
+	cn23xx->conf  = oct_get_config_info(oct, LIO_23XX);
+	if (!cn23xx->conf) {
+		dev_err(&oct->pci_dev->dev, "%s No Config found for CN23XX\n",
+			__func__);
+		octeon_unmap_pci_barx(oct, 0);
+		return 1;
+	}
+
+	if (oct->sriov_info.rings_per_vf > rings_per_vf) {
+		dev_warn(&oct->pci_dev->dev,
+			 "num_queues:%d greater than PF configured rings_per_vf:%d. Reducing to %d.\n",
+			 oct->sriov_info.rings_per_vf, rings_per_vf,
+			 rings_per_vf);
+		oct->sriov_info.rings_per_vf = rings_per_vf;
+	} else {
+		if (rings_per_vf > num_present_cpus()) {
+			dev_warn(&oct->pci_dev->dev,
+				 "PF configured rings_per_vf:%d greater than num_cpu:%d. Using rings_per_vf:%d equal to num cpus\n",
+				 rings_per_vf,
+				 num_present_cpus(),
+				 num_present_cpus());
+			oct->sriov_info.rings_per_vf =
+				num_present_cpus();
+		} else {
+			oct->sriov_info.rings_per_vf = rings_per_vf;
+		}
+	}
+
+	oct->fn_list.setup_iq_regs = cn23xx_setup_vf_iq_regs;
+	oct->fn_list.setup_oq_regs = cn23xx_setup_vf_oq_regs;
+	oct->fn_list.setup_mbox = cn23xx_setup_vf_mbox;
+	oct->fn_list.free_mbox = cn23xx_free_vf_mbox;
+
+	oct->fn_list.msix_interrupt_handler = cn23xx_vf_msix_interrupt_handler;
+
+	oct->fn_list.setup_device_regs = cn23xx_setup_vf_device_regs;
+
+	oct->fn_list.enable_interrupt = cn23xx_enable_vf_interrupt;
+	oct->fn_list.disable_interrupt = cn23xx_disable_vf_interrupt;
+
+	oct->fn_list.enable_io_queues = cn23xx_enable_vf_io_queues;
+	oct->fn_list.disable_io_queues = cn23xx_disable_vf_io_queues;
+
+	return 0;
+}
+
+void cn23xx_dump_vf_iq_regs(struct octeon_device *oct)
+{
+	u32 regval, q_no;
+
+	dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
+		CN23XX_VF_SLI_IQ_DOORBELL(0),
+		CVM_CAST64(octeon_read_csr64(
+					oct, CN23XX_VF_SLI_IQ_DOORBELL(0))));
+
+	dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
+		CN23XX_VF_SLI_IQ_BASE_ADDR64(0),
+		CVM_CAST64(octeon_read_csr64(
+			oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(0))));
+
+	dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
+		CN23XX_VF_SLI_IQ_SIZE(0),
+		CVM_CAST64(octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_SIZE(0))));
+
+	for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) {
+		dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
+			q_no, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
+			CVM_CAST64(octeon_read_csr64(
+				oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))));
+	}
+
+	pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+	dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
+		CN23XX_CONFIG_PCIE_DEVCTL, regval);
+}

+ 48 - 0
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h

@@ -0,0 +1,48 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ *          Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file  cn23xx_device.h
+ * \brief Host Driver: Routines that perform CN23XX specific operations.
+ */
+
+#ifndef __CN23XX_VF_DEVICE_H__
+#define __CN23XX_VF_DEVICE_H__
+
+#include "cn23xx_vf_regs.h"
+
+/* Register address and configuration for a CN23XX devices.
+ * If device specific changes need to be made then add a struct to include
+ * device specific fields as shown in the commented section
+ */
+struct octeon_cn23xx_vf {
+	struct octeon_config *conf;
+};
+
+#define BUSY_READING_REG_VF_LOOP_COUNT		10000
+
+#define CN23XX_MAILBOX_MSGPARAM_SIZE		6
+
+#define MAX_VF_IP_OP_PENDING_PKT_COUNT		100
+
+void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct);
+
+int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct);
+
+int cn23xx_setup_octeon_vf_device(struct octeon_device *oct);
+
+void cn23xx_dump_vf_initialized_regs(struct octeon_device *oct);
+#endif

+ 274 - 0
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h

@@ -0,0 +1,274 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ *          Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file cn23xx_vf_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN23XX vf functions.
+ */
+
+#ifndef __CN23XX_VF_REGS_H__
+#define __CN23XX_VF_REGS_H__
+
+#define     CN23XX_CONFIG_XPANSION_BAR             0x38
+
+#define     CN23XX_CONFIG_PCIE_CAP                 0x70
+#define     CN23XX_CONFIG_PCIE_DEVCAP              0x74
+#define     CN23XX_CONFIG_PCIE_DEVCTL              0x78
+#define     CN23XX_CONFIG_PCIE_LINKCAP             0x7C
+#define     CN23XX_CONFIG_PCIE_LINKCTL             0x80
+#define     CN23XX_CONFIG_PCIE_SLOTCAP             0x84
+#define     CN23XX_CONFIG_PCIE_SLOTCTL             0x88
+
+#define     CN23XX_CONFIG_PCIE_FLTMSK              0x720
+
+/* The input jabber is used to determine the TSO max size.
+ * Due to H/W limitation, this need to be reduced to 60000
+ * in order to to H/W TSO and avoid the WQE malfarmation
+ * PKO_BUG_24989_WQE_LEN
+ */
+#define    CN23XX_DEFAULT_INPUT_JABBER             0xEA60 /*60000*/
+
+/* ##############  BAR0 Registers ################ */
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define    CN23XX_VF_IQ_OFFSET                     0x20000
+
+/*###################### REQUEST QUEUE #########################*/
+
+/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */
+#define    CN23XX_VF_SLI_IQ_INSTR_COUNT_START64     0x10040
+
+/* 64 registers for Input Queues Start Addr - SLI_PKT0_INSTR_BADDR */
+#define    CN23XX_VF_SLI_IQ_BASE_ADDR_START64       0x10010
+
+/* 64 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */
+#define    CN23XX_VF_SLI_IQ_DOORBELL_START          0x10020
+
+/* 64 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */
+#define    CN23XX_VF_SLI_IQ_SIZE_START              0x10030
+
+/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL.
+ */
+#define    CN23XX_VF_SLI_IQ_PKT_CONTROL_START64     0x10000
+
+/*------- Request Queue Macros ---------*/
+#define CN23XX_VF_SLI_IQ_PKT_CONTROL64(iq)		\
+	(CN23XX_VF_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
+
+#define CN23XX_VF_SLI_IQ_BASE_ADDR64(iq)		\
+	(CN23XX_VF_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
+
+#define CN23XX_VF_SLI_IQ_SIZE(iq)			\
+	(CN23XX_VF_SLI_IQ_SIZE_START + ((iq) * CN23XX_VF_IQ_OFFSET))
+
+#define CN23XX_VF_SLI_IQ_DOORBELL(iq)			\
+	(CN23XX_VF_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_VF_IQ_OFFSET))
+
+#define CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq)		\
+	(CN23XX_VF_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
+
+/*------------------ Masks ----------------*/
+#define    CN23XX_PKT_INPUT_CTL_VF_NUM                  BIT_ULL(32)
+#define    CN23XX_PKT_INPUT_CTL_MAC_NUM                 BIT(29)
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define    CN23XX_PKT_INPUT_CTL_RDSIZE                  (3 << 25)
+#define    CN23XX_PKT_INPUT_CTL_IS_64B                  BIT(24)
+#define    CN23XX_PKT_INPUT_CTL_RST                     BIT(23)
+#define    CN23XX_PKT_INPUT_CTL_QUIET                   BIT(28)
+#define    CN23XX_PKT_INPUT_CTL_RING_ENB                BIT(22)
+#define    CN23XX_PKT_INPUT_CTL_DATA_NS                 BIT(8)
+#define    CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP        BIT(6)
+#define    CN23XX_PKT_INPUT_CTL_DATA_RO                 BIT(5)
+#define    CN23XX_PKT_INPUT_CTL_USE_CSR                 BIT(4)
+#define    CN23XX_PKT_INPUT_CTL_GATHER_NS               BIT(3)
+#define    CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP      (2)
+#define    CN23XX_PKT_INPUT_CTL_GATHER_RO               (1)
+
+/** Rings per Virtual Function [RO] **/
+#define    CN23XX_PKT_INPUT_CTL_RPVF_MASK               (0x3F)
+#define    CN23XX_PKT_INPUT_CTL_RPVF_POS                (48)
+/* These bits[47:44][RO] give the Physical function number info within the MAC*/
+#define    CN23XX_PKT_INPUT_CTL_PF_NUM_MASK             (0x7)
+#define    CN23XX_PKT_INPUT_CTL_PF_NUM_POS              (45)
+/** These bits[43:32][RO] give the virtual function number info within the PF*/
+#define    CN23XX_PKT_INPUT_CTL_VF_NUM_MASK             (0x1FFF)
+#define    CN23XX_PKT_INPUT_CTL_VF_NUM_POS              (32)
+#define    CN23XX_PKT_INPUT_CTL_MAC_NUM_MASK            (0x3)
+#define    CN23XX_PKT_INPUT_CTL_MAC_NUM_POS             (29)
+#define    CN23XX_PKT_IN_DONE_WMARK_MASK                (0xFFFFULL)
+#define    CN23XX_PKT_IN_DONE_WMARK_BIT_POS             (32)
+#define    CN23XX_PKT_IN_DONE_CNT_MASK                  (0x00000000FFFFFFFFULL)
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+#define CN23XX_PKT_INPUT_CTL_MASK			\
+	(CN23XX_PKT_INPUT_CTL_RDSIZE			\
+	 | CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP	\
+	 | CN23XX_PKT_INPUT_CTL_USE_CSR)
+#else
+#define CN23XX_PKT_INPUT_CTL_MASK			\
+	(CN23XX_PKT_INPUT_CTL_RDSIZE			\
+	 | CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP	\
+	 | CN23XX_PKT_INPUT_CTL_USE_CSR			\
+	 | CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP)
+#endif
+
+/** Masks for SLI_PKT_IN_DONE(0..63)_CNTS Register */
+#define    CN23XX_IN_DONE_CNTS_PI_INT               BIT_ULL(62)
+#define    CN23XX_IN_DONE_CNTS_CINT_ENB             BIT_ULL(48)
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */
+#define    CN23XX_VF_SLI_OQ_PKT_CONTROL_START       0x10050
+
+/* 64 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */
+#define    CN23XX_VF_SLI_OQ0_BUFF_INFO_SIZE         0x10060
+
+/* 64 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */
+#define    CN23XX_VF_SLI_OQ_BASE_ADDR_START64       0x10070
+
+/* 64 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */
+#define    CN23XX_VF_SLI_OQ_PKT_CREDITS_START       0x10080
+
+/* 64 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */
+#define    CN23XX_VF_SLI_OQ_SIZE_START              0x10090
+
+/* 64 registers for Output Queue Packet Count - SLI_PKT0_CNTS */
+#define    CN23XX_VF_SLI_OQ_PKT_SENT_START          0x100B0
+
+/* 64 registers for Output Queue INT Levels - SLI_PKT0_INT_LEVELS */
+#define    CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64  0x100A0
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define    CN23XX_VF_OQ_OFFSET                      0x20000
+
+/*------- Output Queue Macros ---------*/
+
+#define CN23XX_VF_SLI_OQ_PKT_CONTROL(oq)		\
+	(CN23XX_VF_SLI_OQ_PKT_CONTROL_START + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_BASE_ADDR64(oq)		\
+	(CN23XX_VF_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_SIZE(oq)			\
+	(CN23XX_VF_SLI_OQ_SIZE_START + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq)		\
+	(CN23XX_VF_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_PKTS_SENT(oq)		\
+	(CN23XX_VF_SLI_OQ_PKT_SENT_START + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_PKTS_CREDIT(oq)		\
+	(CN23XX_VF_SLI_OQ_PKT_CREDITS_START + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(oq)		\
+	(CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+/* Macro's for accessing CNT and TIME separately from INT_LEVELS */
+#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_CNT(oq)	\
+	(CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_TIME(oq)	\
+	(CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 +	\
+	 ((oq) * CN23XX_VF_OQ_OFFSET) + 4)
+
+/*------------------ Masks ----------------*/
+#define    CN23XX_PKT_OUTPUT_CTL_TENB                  BIT(13)
+#define    CN23XX_PKT_OUTPUT_CTL_CENB                  BIT(12)
+#define    CN23XX_PKT_OUTPUT_CTL_IPTR                  BIT(11)
+#define    CN23XX_PKT_OUTPUT_CTL_ES                    BIT(9)
+#define    CN23XX_PKT_OUTPUT_CTL_NSR                   BIT(8)
+#define    CN23XX_PKT_OUTPUT_CTL_ROR                   BIT(7)
+#define    CN23XX_PKT_OUTPUT_CTL_DPTR                  BIT(6)
+#define    CN23XX_PKT_OUTPUT_CTL_BMODE                 BIT(5)
+#define    CN23XX_PKT_OUTPUT_CTL_ES_P                  BIT(3)
+#define    CN23XX_PKT_OUTPUT_CTL_NSR_P                 BIT(2)
+#define    CN23XX_PKT_OUTPUT_CTL_ROR_P                 BIT(1)
+#define    CN23XX_PKT_OUTPUT_CTL_RING_ENB              BIT(0)
+
+/*######################### Mailbox Reg Macros ########################*/
+#define    CN23XX_VF_SLI_PKT_MBOX_INT_START            0x10210
+#define    CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START         0x10200
+
+#define    CN23XX_SLI_MBOX_OFFSET                      0x20000
+#define    CN23XX_SLI_MBOX_SIG_IDX_OFFSET              0x8
+
+#define CN23XX_VF_SLI_PKT_MBOX_INT(q)	\
+	(CN23XX_VF_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET))
+
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx)		\
+	(CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START +		\
+	 ((q) * CN23XX_SLI_MBOX_OFFSET +		\
+	  (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET))
+
+/*######################## INTERRUPTS #########################*/
+
+#define    CN23XX_VF_SLI_INT_SUM_START		  0x100D0
+
+#define CN23XX_VF_SLI_INT_SUM(q)			\
+	(CN23XX_VF_SLI_INT_SUM_START + ((q) * CN23XX_VF_IQ_OFFSET))
+
+/*------------------ Interrupt Masks ----------------*/
+
+#define    CN23XX_INTR_PO_INT                   BIT_ULL(63)
+#define    CN23XX_INTR_PI_INT                   BIT_ULL(62)
+#define    CN23XX_INTR_MBOX_INT                 BIT_ULL(61)
+#define    CN23XX_INTR_RESEND                   BIT_ULL(60)
+
+#define    CN23XX_INTR_CINT_ENB                 BIT_ULL(48)
+#define    CN23XX_INTR_MBOX_ENB                 BIT(0)
+
+/*############################ MIO #########################*/
+#define    CN23XX_MIO_PTP_CLOCK_CFG       0x0001070000000f00ULL
+#define    CN23XX_MIO_PTP_CLOCK_LO        0x0001070000000f08ULL
+#define    CN23XX_MIO_PTP_CLOCK_HI        0x0001070000000f10ULL
+#define    CN23XX_MIO_PTP_CLOCK_COMP      0x0001070000000f18ULL
+#define    CN23XX_MIO_PTP_TIMESTAMP       0x0001070000000f20ULL
+#define    CN23XX_MIO_PTP_EVT_CNT         0x0001070000000f28ULL
+#define    CN23XX_MIO_PTP_CKOUT_THRESH_LO 0x0001070000000f30ULL
+#define    CN23XX_MIO_PTP_CKOUT_THRESH_HI 0x0001070000000f38ULL
+#define    CN23XX_MIO_PTP_CKOUT_HI_INCR   0x0001070000000f40ULL
+#define    CN23XX_MIO_PTP_CKOUT_LO_INCR   0x0001070000000f48ULL
+#define    CN23XX_MIO_PTP_PPS_THRESH_LO   0x0001070000000f50ULL
+#define    CN23XX_MIO_PTP_PPS_THRESH_HI   0x0001070000000f58ULL
+#define    CN23XX_MIO_PTP_PPS_HI_INCR     0x0001070000000f60ULL
+#define    CN23XX_MIO_PTP_PPS_LO_INCR     0x0001070000000f68ULL
+
+/*############################ RST #########################*/
+#define    CN23XX_RST_BOOT                0x0001180006001600ULL
+
+/*######################## MSIX TABLE #########################*/
+
+#define    CN23XX_MSIX_TABLE_ADDR_START    0x0
+#define    CN23XX_MSIX_TABLE_DATA_START    0x8
+
+#define    CN23XX_MSIX_TABLE_SIZE          0x10
+#define    CN23XX_MSIX_TABLE_ENTRIES       0x41
+
+#define    CN23XX_MSIX_ENTRY_VECTOR_CTL    BIT_ULL(32)
+
+#define CN23XX_MSIX_TABLE_ADDR(idx)		\
+	(CN23XX_MSIX_TABLE_ADDR_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+#define CN23XX_MSIX_TABLE_DATA(idx)		\
+	(CN23XX_MSIX_TABLE_DATA_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+#endif

+ 0 - 7
drivers/net/ethernet/cavium/liquidio/lio_core.c

@@ -85,13 +85,6 @@ void octeon_update_tx_completion_counters(void *buf, int reqtype,
 	}
 
 	(*pkts_compl)++;
-/*TODO, Use some other pound define to suggest
- * the fact that iqs are not tied to netdevs
- * and can take traffic from different netdevs
- * hence bql reporting is done per packet
- * than in bulk. Usage of NO_NAPI in txq completion is
- * a little confusing
- */
 	*bytes_compl += skb->len;
 }
 

+ 5 - 1
drivers/net/ethernet/cavium/liquidio/lio_main.c

@@ -40,6 +40,7 @@ MODULE_VERSION(LIQUIDIO_VERSION);
 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX);
 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX);
 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME LIO_FW_NAME_SUFFIX);
 
 static int ddr_timeout = 10000;
 module_param(ddr_timeout, int, 0644);
@@ -4484,7 +4485,10 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
 
 	atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
 
-	octeon_set_io_queues_off(octeon_dev);
+	if (octeon_set_io_queues_off(octeon_dev)) {
+		dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
+		return 1;
+	}
 
 	if (OCTEON_CN23XX_PF(octeon_dev)) {
 		ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);

+ 614 - 0
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c

@@ -0,0 +1,614 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ *          Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/pci.h>
+#include <net/vxlan.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_main.h"
+#include "cn23xx_vf_device.h"
+
+MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LIQUIDIO_VERSION);
+
+struct octeon_device_priv {
+	/* Tasklet structures for this device. */
+	struct tasklet_struct droq_tasklet;
+	unsigned long napi_mask;
+};
+
+static int
+liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void liquidio_vf_remove(struct pci_dev *pdev);
+static int octeon_device_init(struct octeon_device *oct);
+
+static int lio_wait_for_oq_pkts(struct octeon_device *oct)
+{
+	struct octeon_device_priv *oct_priv =
+	    (struct octeon_device_priv *)oct->priv;
+	int retry = MAX_VF_IP_OP_PENDING_PKT_COUNT;
+	int pkt_cnt = 0, pending_pkts;
+	int i;
+
+	do {
+		pending_pkts = 0;
+
+		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+			if (!(oct->io_qmask.oq & BIT_ULL(i)))
+				continue;
+			pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
+		}
+		if (pkt_cnt > 0) {
+			pending_pkts += pkt_cnt;
+			tasklet_schedule(&oct_priv->droq_tasklet);
+		}
+		pkt_cnt = 0;
+		schedule_timeout_uninterruptible(1);
+
+	} while (retry-- && pending_pkts);
+
+	return pkt_cnt;
+}
+
+/**
+ * \brief wait for all pending requests to complete
+ * @param oct Pointer to Octeon device
+ *
+ * Called during shutdown sequence
+ */
+static int wait_for_pending_requests(struct octeon_device *oct)
+{
+	int i, pcount = 0;
+
+	for (i = 0; i < MAX_VF_IP_OP_PENDING_PKT_COUNT; i++) {
+		pcount = atomic_read(
+		    &oct->response_list[OCTEON_ORDERED_SC_LIST]
+			 .pending_req_count);
+		if (pcount)
+			schedule_timeout_uninterruptible(HZ / 10);
+		else
+			break;
+	}
+
+	if (pcount)
+		return 1;
+
+	return 0;
+}
+
+static const struct pci_device_id liquidio_vf_pci_tbl[] = {
+	{
+		PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+	},
+	{
+		0, 0, 0, 0, 0, 0, 0
+	}
+};
+MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl);
+
+static struct pci_driver liquidio_vf_pci_driver = {
+	.name		= "LiquidIO_VF",
+	.id_table	= liquidio_vf_pci_tbl,
+	.probe		= liquidio_vf_probe,
+	.remove		= liquidio_vf_remove,
+};
+
+static
+int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
+{
+	struct octeon_device *oct = droq->oct_dev;
+	struct octeon_device_priv *oct_priv =
+	    (struct octeon_device_priv *)oct->priv;
+
+	if (droq->ops.poll_mode) {
+		droq->ops.napi_fn(droq);
+	} else {
+		if (ret & MSIX_PO_INT) {
+			dev_err(&oct->pci_dev->dev,
+				"should not come here should not get rx when poll mode = 0 for vf\n");
+			tasklet_schedule(&oct_priv->droq_tasklet);
+			return 1;
+		}
+		/* this will be flushed periodically by check iq db */
+		if (ret & MSIX_PI_INT)
+			return 0;
+	}
+	return 0;
+}
+
+static irqreturn_t
+liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
+{
+	struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+	struct octeon_device *oct = ioq_vector->oct_dev;
+	struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+	u64 ret;
+
+	ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
+
+	if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
+		liquidio_schedule_msix_droq_pkt_handler(droq, ret);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * \brief Setup interrupt for octeon device
+ * @param oct octeon device
+ *
+ *  Enable interrupt in Octeon device as given in the PCI interrupt mask.
+ */
+static int octeon_setup_interrupt(struct octeon_device *oct)
+{
+	struct msix_entry *msix_entries;
+	int num_alloc_ioq_vectors;
+	int num_ioq_vectors;
+	int irqret;
+	int i;
+
+	if (oct->msix_on) {
+		oct->num_msix_irqs = oct->sriov_info.rings_per_vf;
+
+		oct->msix_entries = kcalloc(
+		    oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
+		if (!oct->msix_entries)
+			return 1;
+
+		msix_entries = (struct msix_entry *)oct->msix_entries;
+
+		for (i = 0; i < oct->num_msix_irqs; i++)
+			msix_entries[i].entry = i;
+		num_alloc_ioq_vectors = pci_enable_msix_range(
+						oct->pci_dev, msix_entries,
+						oct->num_msix_irqs,
+						oct->num_msix_irqs);
+		if (num_alloc_ioq_vectors < 0) {
+			dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
+			kfree(oct->msix_entries);
+			oct->msix_entries = NULL;
+			return 1;
+		}
+		dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
+
+		num_ioq_vectors = oct->num_msix_irqs;
+
+		for (i = 0; i < num_ioq_vectors; i++) {
+			irqret = request_irq(msix_entries[i].vector,
+					     liquidio_msix_intr_handler, 0,
+					     "octeon", &oct->ioq_vector[i]);
+			if (irqret) {
+				dev_err(&oct->pci_dev->dev,
+					"OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
+					irqret);
+
+				while (i) {
+					i--;
+					irq_set_affinity_hint(
+					    msix_entries[i].vector, NULL);
+					free_irq(msix_entries[i].vector,
+						 &oct->ioq_vector[i]);
+				}
+				pci_disable_msix(oct->pci_dev);
+				kfree(oct->msix_entries);
+				oct->msix_entries = NULL;
+				return 1;
+			}
+			oct->ioq_vector[i].vector = msix_entries[i].vector;
+			/* assign the cpu mask for this msix interrupt vector */
+			irq_set_affinity_hint(
+			    msix_entries[i].vector,
+			    (&oct->ioq_vector[i].affinity_mask));
+		}
+		dev_dbg(&oct->pci_dev->dev,
+			"OCTEON[%d]: MSI-X enabled\n", oct->octeon_id);
+	}
+	return 0;
+}
+
+/**
+ * \brief PCI probe handler
+ * @param pdev PCI device structure
+ * @param ent unused
+ */
+static int
+liquidio_vf_probe(struct pci_dev *pdev,
+		  const struct pci_device_id *ent __attribute__((unused)))
+{
+	struct octeon_device *oct_dev = NULL;
+
+	oct_dev = octeon_allocate_device(pdev->device,
+					 sizeof(struct octeon_device_priv));
+
+	if (!oct_dev) {
+		dev_err(&pdev->dev, "Unable to allocate device\n");
+		return -ENOMEM;
+	}
+	oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
+
+	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
+		 (u32)pdev->vendor, (u32)pdev->device);
+
+	/* Assign octeon_device for this device to the private data area. */
+	pci_set_drvdata(pdev, oct_dev);
+
+	/* set linux specific device pointer */
+	oct_dev->pci_dev = pdev;
+
+	if (octeon_device_init(oct_dev)) {
+		liquidio_vf_remove(pdev);
+		return -ENOMEM;
+	}
+
+	dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
+
+	return 0;
+}
+
+/**
+ * \brief PCI FLR for each Octeon device.
+ * @param oct octeon device
+ */
+static void octeon_pci_flr(struct octeon_device *oct)
+{
+	u16 status;
+
+	pci_save_state(oct->pci_dev);
+
+	pci_cfg_access_lock(oct->pci_dev);
+
+	/* Quiesce the device completely */
+	pci_write_config_word(oct->pci_dev, PCI_COMMAND,
+			      PCI_COMMAND_INTX_DISABLE);
+
+	/* Wait for Transaction Pending bit clean */
+	msleep(100);
+	pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA, &status);
+	if (status & PCI_EXP_DEVSTA_TRPND) {
+		dev_info(&oct->pci_dev->dev, "Function reset incomplete after 100ms, sleeping for 5 seconds\n");
+		ssleep(5);
+		pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA,
+					  &status);
+		if (status & PCI_EXP_DEVSTA_TRPND)
+			dev_info(&oct->pci_dev->dev, "Function reset still incomplete after 5s, reset anyway\n");
+	}
+	pcie_capability_set_word(oct->pci_dev, PCI_EXP_DEVCTL,
+				 PCI_EXP_DEVCTL_BCR_FLR);
+	mdelay(100);
+
+	pci_cfg_access_unlock(oct->pci_dev);
+
+	pci_restore_state(oct->pci_dev);
+}
+
+/**
+ *\brief Destroy resources associated with octeon device
+ * @param pdev PCI device structure
+ * @param ent unused
+ */
+static void octeon_destroy_resources(struct octeon_device *oct)
+{
+	struct msix_entry *msix_entries;
+	int i;
+
+	switch (atomic_read(&oct->status)) {
+	case OCT_DEV_RUNNING:
+	case OCT_DEV_CORE_OK:
+		/* No more instructions will be forwarded. */
+		atomic_set(&oct->status, OCT_DEV_IN_RESET);
+
+		dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
+			lio_get_state_string(&oct->status));
+
+		schedule_timeout_uninterruptible(HZ / 10);
+
+		/* fallthrough */
+	case OCT_DEV_HOST_OK:
+		/* fallthrough */
+	case OCT_DEV_IO_QUEUES_DONE:
+		if (wait_for_pending_requests(oct))
+			dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+		if (lio_wait_for_instr_fetch(oct))
+			dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
+
+		/* Disable the input and output queues now. No more packets will
+		 * arrive from Octeon, but we should wait for all packet
+		 * processing to finish.
+		 */
+		oct->fn_list.disable_io_queues(oct);
+
+		if (lio_wait_for_oq_pkts(oct))
+			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
+
+	case OCT_DEV_INTR_SET_DONE:
+		/* Disable interrupts  */
+		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+		if (oct->msix_on) {
+			msix_entries = (struct msix_entry *)oct->msix_entries;
+			for (i = 0; i < oct->num_msix_irqs; i++) {
+				irq_set_affinity_hint(msix_entries[i].vector,
+						      NULL);
+				free_irq(msix_entries[i].vector,
+					 &oct->ioq_vector[i]);
+			}
+			pci_disable_msix(oct->pci_dev);
+			kfree(oct->msix_entries);
+			oct->msix_entries = NULL;
+		}
+		/* Soft reset the octeon device before exiting */
+		if (oct->pci_dev->reset_fn)
+			octeon_pci_flr(oct);
+		else
+			cn23xx_vf_ask_pf_to_do_flr(oct);
+
+		/* fallthrough */
+	case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
+		octeon_free_ioq_vector(oct);
+
+		/* fallthrough */
+	case OCT_DEV_MBOX_SETUP_DONE:
+		oct->fn_list.free_mbox(oct);
+
+		/* fallthrough */
+	case OCT_DEV_IN_RESET:
+	case OCT_DEV_DROQ_INIT_DONE:
+		mdelay(100);
+		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+			if (!(oct->io_qmask.oq & BIT_ULL(i)))
+				continue;
+			octeon_delete_droq(oct, i);
+		}
+
+		/* fallthrough */
+	case OCT_DEV_RESP_LIST_INIT_DONE:
+		octeon_delete_response_list(oct);
+
+		/* fallthrough */
+	case OCT_DEV_INSTR_QUEUE_INIT_DONE:
+		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+			if (!(oct->io_qmask.iq & BIT_ULL(i)))
+				continue;
+			octeon_delete_instr_queue(oct, i);
+		}
+
+		/* fallthrough */
+	case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
+		octeon_free_sc_buffer_pool(oct);
+
+		/* fallthrough */
+	case OCT_DEV_DISPATCH_INIT_DONE:
+		octeon_delete_dispatch_list(oct);
+		cancel_delayed_work_sync(&oct->nic_poll_work.work);
+
+		/* fallthrough */
+	case OCT_DEV_PCI_MAP_DONE:
+		octeon_unmap_pci_barx(oct, 0);
+		octeon_unmap_pci_barx(oct, 1);
+
+		/* fallthrough */
+	case OCT_DEV_PCI_ENABLE_DONE:
+		pci_clear_master(oct->pci_dev);
+		/* Disable the device, releasing the PCI INT */
+		pci_disable_device(oct->pci_dev);
+
+		/* fallthrough */
+	case OCT_DEV_BEGIN_STATE:
+		/* Nothing to be done here either */
+		break;
+	}
+}
+
+/**
+ * \brief Cleans up resources at unload time
+ * @param pdev PCI device structure
+ */
+static void liquidio_vf_remove(struct pci_dev *pdev)
+{
+	struct octeon_device *oct_dev = pci_get_drvdata(pdev);
+
+	dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
+
+	/* Reset the octeon device and cleanup all memory allocated for
+	 * the octeon device by driver.
+	 */
+	octeon_destroy_resources(oct_dev);
+
+	dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
+
+	/* This octeon device has been removed. Update the global
+	 * data structure to reflect this. Free the device structure.
+	 */
+	octeon_free_device_mem(oct_dev);
+}
+
+/**
+ * \brief PCI initialization for each Octeon device.
+ * @param oct octeon device
+ */
+static int octeon_pci_os_setup(struct octeon_device *oct)
+{
+#ifdef CONFIG_PCI_IOV
+	/* setup PCI stuff first */
+	if (!oct->pci_dev->physfn)
+		octeon_pci_flr(oct);
+#endif
+
+	if (pci_enable_device(oct->pci_dev)) {
+		dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
+		return 1;
+	}
+
+	if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
+		dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
+		pci_disable_device(oct->pci_dev);
+		return 1;
+	}
+
+	/* Enable PCI DMA Master. */
+	pci_set_master(oct->pci_dev);
+
+	return 0;
+}
+
+/**
+ * \brief Device initialization for each Octeon device that is probed
+ * @param octeon_dev  octeon device
+ */
+static int octeon_device_init(struct octeon_device *oct)
+{
+	u32 rev_id;
+	int j;
+
+	atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
+
+	/* Enable access to the octeon device and make its DMA capability
+	 * known to the OS.
+	 */
+	if (octeon_pci_os_setup(oct))
+		return 1;
+	atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
+
+	oct->chip_id = OCTEON_CN23XX_VF_VID;
+	pci_read_config_dword(oct->pci_dev, 8, &rev_id);
+	oct->rev_id = rev_id & 0xff;
+
+	if (cn23xx_setup_octeon_vf_device(oct))
+		return 1;
+
+	atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE);
+
+	/* Initialize the dispatch mechanism used to push packets arriving on
+	 * Octeon Output queues.
+	 */
+	if (octeon_init_dispatch_list(oct))
+		return 1;
+
+	atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE);
+
+	if (octeon_set_io_queues_off(oct)) {
+		dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
+		return 1;
+	}
+
+	if (oct->fn_list.setup_device_regs(oct)) {
+		dev_err(&oct->pci_dev->dev, "device registers configuration failed\n");
+		return 1;
+	}
+
+	/* Initialize soft command buffer pool */
+	if (octeon_setup_sc_buffer_pool(oct)) {
+		dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n");
+		return 1;
+	}
+	atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
+
+	/* Setup the data structures that manage this Octeon's Input queues. */
+	if (octeon_setup_instr_queues(oct)) {
+		dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n");
+		return 1;
+	}
+	atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
+
+	/* Initialize lists to manage the requests of different types that
+	 * arrive from user & kernel applications for this octeon device.
+	 */
+	if (octeon_setup_response_list(oct)) {
+		dev_err(&oct->pci_dev->dev, "Response list allocation failed\n");
+		return 1;
+	}
+	atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
+
+	if (octeon_setup_output_queues(oct)) {
+		dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n");
+		return 1;
+	}
+	atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);
+
+	if (oct->fn_list.setup_mbox(oct)) {
+		dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
+		return 1;
+	}
+	atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
+
+	if (octeon_allocate_ioq_vector(oct)) {
+		dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
+		return 1;
+	}
+	atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
+
+	dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n",
+		 LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf);
+
+	/* Setup the interrupt handler and record the INT SUM register address*/
+	if (octeon_setup_interrupt(oct))
+		return 1;
+
+	if (cn23xx_octeon_pfvf_handshake(oct))
+		return 1;
+
+	/* Enable Octeon device interrupts */
+	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
+
+	atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
+
+	/* Enable the input and output queues for this Octeon device */
+	if (oct->fn_list.enable_io_queues(oct)) {
+		dev_err(&oct->pci_dev->dev, "enabling io queues failed\n");
+		return 1;
+	}
+
+	atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
+
+	atomic_set(&oct->status, OCT_DEV_HOST_OK);
+
+	/* Send Credit for Octeon Output queues. Credits are always sent after
+	 * the output queue is enabled.
+	 */
+	for (j = 0; j < oct->num_oqs; j++)
+		writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
+
+	/* Packets can start arriving on the output queues from this point. */
+
+	atomic_set(&oct->status, OCT_DEV_CORE_OK);
+
+	atomic_set(&oct->status, OCT_DEV_RUNNING);
+
+	return 0;
+}
+
+static int __init liquidio_vf_init(void)
+{
+	octeon_init_device_list(0);
+	return pci_register_driver(&liquidio_vf_pci_driver);
+}
+
+static void __exit liquidio_vf_exit(void)
+{
+	pci_unregister_driver(&liquidio_vf_pci_driver);
+
+	pr_info("LiquidIO_VF network module is now unloaded\n");
+}
+
+module_init(liquidio_vf_init);
+module_exit(liquidio_vf_exit);

+ 61 - 5
drivers/net/ethernet/cavium/liquidio/octeon_device.c

@@ -28,6 +28,7 @@
 #include "cn66xx_regs.h"
 #include "cn66xx_device.h"
 #include "cn23xx_pf_device.h"
+#include "cn23xx_vf_device.h"
 
 /** Default configuration
  *  for CN66XX OCTEON Models.
@@ -571,15 +572,17 @@ static void *__retrieve_octeon_config_info(struct octeon_device *oct,
 	switch (oct_conf_info[oct_id].conf_type) {
 	case OCTEON_CONFIG_TYPE_DEFAULT:
 		if (oct->chip_id == OCTEON_CN66XX) {
-			ret = (void *)&default_cn66xx_conf;
+			ret = &default_cn66xx_conf;
 		} else if ((oct->chip_id == OCTEON_CN68XX) &&
 			   (card_type == LIO_210NV)) {
-			ret =  (void *)&default_cn68xx_210nv_conf;
+			ret = &default_cn68xx_210nv_conf;
 		} else if ((oct->chip_id == OCTEON_CN68XX) &&
 			   (card_type == LIO_410NV)) {
-			ret =  (void *)&default_cn68xx_conf;
+			ret = &default_cn68xx_conf;
 		} else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
-			ret =  (void *)&default_cn23xx_conf;
+			ret = &default_cn23xx_conf;
+		} else if (oct->chip_id == OCTEON_CN23XX_VF_VID) {
+			ret = &default_cn23xx_conf;
 		}
 		break;
 	default:
@@ -595,6 +598,7 @@ static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
 	case OCTEON_CN68XX:
 		return lio_validate_cn6xxx_config_info(oct, conf);
 	case OCTEON_CN23XX_PF_VID:
+	case OCTEON_CN23XX_VF_VID:
 		return 0;
 	default:
 		break;
@@ -672,6 +676,9 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
 	case OCTEON_CN23XX_PF_VID:
 		configsize = sizeof(struct octeon_cn23xx_pf);
 		break;
+	case OCTEON_CN23XX_VF_VID:
+		configsize = sizeof(struct octeon_cn23xx_vf);
+		break;
 	default:
 		pr_err("%s: Unknown PCI Device: 0x%x\n",
 		       __func__,
@@ -747,6 +754,9 @@ octeon_allocate_ioq_vector(struct octeon_device  *oct)
 
 	if (OCTEON_CN23XX_PF(oct))
 		num_ioqs = oct->sriov_info.num_pf_rings;
+	else if (OCTEON_CN23XX_VF(oct))
+		num_ioqs = oct->sriov_info.rings_per_vf;
+
 	size = sizeof(struct octeon_ioq_vector) * num_ioqs;
 
 	oct->ioq_vector = vmalloc(size);
@@ -790,6 +800,8 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
 			CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn6xxx));
 	else if (OCTEON_CN23XX_PF(oct))
 		num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_pf));
+	else if (OCTEON_CN23XX_VF(oct))
+		num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_vf));
 
 	oct->num_iqs = 0;
 
@@ -835,6 +847,9 @@ int octeon_setup_output_queues(struct octeon_device *oct)
 	} else if (OCTEON_CN23XX_PF(oct)) {
 		num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_pf));
 		desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_pf));
+	} else if (OCTEON_CN23XX_VF(oct)) {
+		num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_vf));
+		desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_vf));
 	}
 	oct->num_oqs = 0;
 	oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
@@ -853,12 +868,53 @@ int octeon_setup_output_queues(struct octeon_device *oct)
 	return 0;
 }
 
-void octeon_set_io_queues_off(struct octeon_device *oct)
+int octeon_set_io_queues_off(struct octeon_device *oct)
 {
+	int loop = BUSY_READING_REG_VF_LOOP_COUNT;
+
 	if (OCTEON_CN6XXX(oct)) {
 		octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
 		octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+	} else if (oct->chip_id == OCTEON_CN23XX_VF_VID) {
+		u32 q_no;
+
+		/* IOQs will already be in reset.
+		 * If RST bit is set, wait for quiet bit to be set.
+		 * Once quiet bit is set, clear the RST bit.
+		 */
+		for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) {
+			u64 reg_val = octeon_read_csr64(
+				oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
+
+			while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
+			       !(reg_val &  CN23XX_PKT_INPUT_CTL_QUIET) &&
+			       loop) {
+				reg_val = octeon_read_csr64(
+					oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+				loop--;
+			}
+			if (!loop) {
+				dev_err(&oct->pci_dev->dev,
+					"clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+					q_no);
+				return -1;
+			}
+
+			reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
+			octeon_write_csr64(oct,
+					   CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+					   reg_val);
+
+			reg_val = octeon_read_csr64(
+					oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+			if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+				dev_err(&oct->pci_dev->dev,
+					"unable to reset qno %u\n", q_no);
+				return -1;
+			}
+		}
 	}
+	return 0;
 }
 
 void octeon_set_droq_pkt_op(struct octeon_device *oct,

+ 8 - 1
drivers/net/ethernet/cavium/liquidio/octeon_device.h

@@ -53,6 +53,7 @@ enum {
 	NUM_OCTEON_CONFS,
 };
 
+#define  OCTEON_INPUT_INTR    (1)
 #define  OCTEON_OUTPUT_INTR   (2)
 #define  OCTEON_MBOX_INTR     (4)
 #define  OCTEON_ALL_INTR      0xff
@@ -294,6 +295,7 @@ struct octdev_props {
 #define LIO_FLAG_MSIX_ENABLED	0x1
 #define MSIX_PO_INT		0x1
 #define MSIX_PI_INT		0x2
+#define MSIX_MBOX_INT		0x4
 
 struct octeon_pf_vf_hs_word {
 #ifdef __LITTLE_ENDIAN_BITFIELD
@@ -401,8 +403,13 @@ struct octeon_device {
 
 	/** Octeon Chip type. */
 	u16 chip_id;
+
 	u16 rev_id;
+
 	u16 pf_num;
+
+	u16 vf_num;
+
 	/** This device's id - set by the driver. */
 	u32 octeon_id;
 
@@ -766,7 +773,7 @@ int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no);
 /** Turns off the input and output queues for the device
  *  @param oct which octeon to disable
  */
-void octeon_set_io_queues_off(struct octeon_device *oct);
+int octeon_set_io_queues_off(struct octeon_device *oct);
 
 /** Turns on or off the given output queue for the device
  *  @param oct which octeon to change

+ 10 - 1
drivers/net/ethernet/cavium/liquidio/request_manager.c

@@ -28,6 +28,7 @@
 #include "octeon_network.h"
 #include "cn66xx_device.h"
 #include "cn23xx_pf_device.h"
+#include "cn23xx_vf_device.h"
 
 struct iq_post_status {
 	int status;
@@ -68,6 +69,9 @@ int octeon_init_instr_queue(struct octeon_device *oct,
 		conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
 	else if (OCTEON_CN23XX_PF(oct))
 		conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_pf)));
+	else if (OCTEON_CN23XX_VF(oct))
+		conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_vf)));
+
 	if (!conf) {
 		dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
 			oct->chip_id);
@@ -183,6 +187,9 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
 	else if (OCTEON_CN23XX_PF(oct))
 		desc_size =
 		    CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_pf));
+	else if (OCTEON_CN23XX_VF(oct))
+		desc_size =
+		    CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_vf));
 
 	vfree(iq->request_list);
 
@@ -235,7 +242,9 @@ int octeon_setup_iq(struct octeon_device *oct,
 	}
 
 	oct->num_iqs++;
-	oct->fn_list.enable_io_queues(oct);
+	if (oct->fn_list.enable_io_queues(oct))
+		return 1;
+
 	return 0;
 }