Browse Source

Merge branch 'liquidio-CN23XX-part-1'

Raghu Vatsavayi says:

====================
liquidio CN23XX support

Following patchset adds support for new device "CN23XX" in
liquidio family of adapters. As adviced by you I have split
the previous V3 patch of 18 patches into two halves. This
first patchset has first 10 patches, which are tested against
net-next. I will post the second half after this one.

This V4 patch also addressed all the comments from previous
submission:
1) Avoid busy loop while reading registers.
2) Other minor comments about debug messages and constants.

Please apply patches in following order as some of the
patches depend on earlier patches.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 9 years ago
parent
commit
dbeb714a5b
25 changed files with 3022 additions and 611 deletions
  1. 1 1
      drivers/net/ethernet/cavium/Kconfig
  2. 13 11
      drivers/net/ethernet/cavium/liquidio/Makefile
  3. 1171 0
      drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
  4. 57 0
      drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
  5. 604 0
      drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
  6. 9 36
      drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
  7. 3 4
      drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
  8. 0 1
      drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
  9. 261 0
      drivers/net/ethernet/cavium/liquidio/lio_core.c
  10. 8 10
      drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
  11. 376 390
      drivers/net/ethernet/cavium/liquidio/lio_main.c
  12. 18 4
      drivers/net/ethernet/cavium/liquidio/liquidio_common.h
  13. 51 8
      drivers/net/ethernet/cavium/liquidio/octeon_config.h
  14. 116 1
      drivers/net/ethernet/cavium/liquidio/octeon_console.c
  15. 192 110
      drivers/net/ethernet/cavium/liquidio/octeon_device.c
  16. 93 7
      drivers/net/ethernet/cavium/liquidio/octeon_device.h
  17. 20 13
      drivers/net/ethernet/cavium/liquidio/octeon_droq.c
  18. 2 0
      drivers/net/ethernet/cavium/liquidio/octeon_droq.h
  19. 2 0
      drivers/net/ethernet/cavium/liquidio/octeon_iq.h
  20. 19 5
      drivers/net/ethernet/cavium/liquidio/octeon_main.h
  21. 0 1
      drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
  22. 0 2
      drivers/net/ethernet/cavium/liquidio/octeon_network.h
  23. 2 6
      drivers/net/ethernet/cavium/liquidio/octeon_nic.c
  24. 1 1
      drivers/net/ethernet/cavium/liquidio/octeon_nic.h
  25. 3 0
      drivers/net/ethernet/cavium/liquidio/request_manager.c

+ 1 - 1
drivers/net/ethernet/cavium/Kconfig

@@ -58,7 +58,7 @@ config LIQUIDIO
 	select LIBCRC32C
 	select LIBCRC32C
 	---help---
 	---help---
 	  This driver supports Cavium LiquidIO Intelligent Server Adapters
 	  This driver supports Cavium LiquidIO Intelligent Server Adapters
-	  based on CN66XX and CN68XX chips.
+	  based on CN66XX, CN68XX and CN23XX chips.
 
 
 	  To compile this driver as a module, choose M here: the module
 	  To compile this driver as a module, choose M here: the module
 	  will be called liquidio.  This is recommended.
 	  will be called liquidio.  This is recommended.

+ 13 - 11
drivers/net/ethernet/cavium/liquidio/Makefile

@@ -3,14 +3,16 @@
 #
 #
 obj-$(CONFIG_LIQUIDIO) += liquidio.o
 obj-$(CONFIG_LIQUIDIO) += liquidio.o
 
 
-liquidio-objs := lio_main.o  \
-	      lio_ethtool.o      \
-	      request_manager.o  \
-	      response_manager.o \
-	      octeon_device.o    \
-	      cn66xx_device.o    \
-	      cn68xx_device.o    \
-	      octeon_mem_ops.o   \
-	      octeon_droq.o      \
-	      octeon_console.o   \
-	      octeon_nic.o
+liquidio-$(CONFIG_LIQUIDIO) += lio_ethtool.o \
+			lio_core.o         \
+			request_manager.o  \
+			response_manager.o \
+			octeon_device.o    \
+			cn66xx_device.o    \
+			cn68xx_device.o    \
+			cn23xx_pf_device.o \
+			octeon_mem_ops.o   \
+			octeon_droq.o      \
+			octeon_nic.o
+
+liquidio-objs := lio_main.o octeon_console.o $(liquidio-y)

+ 1171 - 0
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c

@@ -0,0 +1,1171 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "cn23xx_pf_device.h"
+#include "octeon_main.h"
+
+#define RESET_NOTDONE 0
+#define RESET_DONE 1
+
+/* Change the value of SLI Packet Input Jabber Register to allow
+ * VXLAN TSO packets which can be 64424 bytes, exceeding the
+ * MAX_GSO_SIZE we supplied to the kernel
+ */
+#define CN23XX_INPUT_JABBER 64600
+
+#define LIOLUT_RING_DISTRIBUTION 9
+const int liolut_num_vfs_to_rings_per_vf[LIOLUT_RING_DISTRIBUTION] = {
+	0, 8, 4, 2, 2, 2, 1, 1, 1
+};
+
+void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct)
+{
+	int i = 0;
+	u32 regval = 0;
+	struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+	/*In cn23xx_soft_reset*/
+	dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n",
+		"CN23XX_WIN_WR_MASK_REG", CVM_CAST64(CN23XX_WIN_WR_MASK_REG),
+		CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG)));
+	dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+		"CN23XX_SLI_SCRATCH1", CVM_CAST64(CN23XX_SLI_SCRATCH1),
+		CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)));
+	dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+		"CN23XX_RST_SOFT_RST", CN23XX_RST_SOFT_RST,
+		lio_pci_readq(oct, CN23XX_RST_SOFT_RST));
+
+	/*In cn23xx_set_dpi_regs*/
+	dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+		"CN23XX_DPI_DMA_CONTROL", CN23XX_DPI_DMA_CONTROL,
+		lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL));
+
+	for (i = 0; i < 6; i++) {
+		dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+			"CN23XX_DPI_DMA_ENG_ENB", i,
+			CN23XX_DPI_DMA_ENG_ENB(i),
+			lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_ENB(i)));
+		dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+			"CN23XX_DPI_DMA_ENG_BUF", i,
+			CN23XX_DPI_DMA_ENG_BUF(i),
+			lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_BUF(i)));
+	}
+
+	dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", "CN23XX_DPI_CTL",
+		CN23XX_DPI_CTL, lio_pci_readq(oct, CN23XX_DPI_CTL));
+
+	/*In cn23xx_setup_pcie_mps and cn23xx_setup_pcie_mrrs */
+	pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+	dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+		"CN23XX_CONFIG_PCIE_DEVCTL",
+		CVM_CAST64(CN23XX_CONFIG_PCIE_DEVCTL), CVM_CAST64(regval));
+
+	dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+		"CN23XX_DPI_SLI_PRTX_CFG", oct->pcie_port,
+		CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
+		lio_pci_readq(oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port)));
+
+	/*In cn23xx_specific_regs_setup */
+	dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+		"CN23XX_SLI_S2M_PORTX_CTL", oct->pcie_port,
+		CVM_CAST64(CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)),
+		CVM_CAST64(octeon_read_csr64(
+			oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
+
+	dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+		"CN23XX_SLI_RING_RST", CVM_CAST64(CN23XX_SLI_PKT_IOQ_RING_RST),
+		(u64)octeon_read_csr64(oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+
+	/*In cn23xx_setup_global_mac_regs*/
+	for (i = 0; i < CN23XX_MAX_MACS; i++) {
+		dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+			"CN23XX_SLI_PKT_MAC_RINFO64", i,
+			CVM_CAST64(CN23XX_SLI_PKT_MAC_RINFO64(i, oct->pf_num)),
+			CVM_CAST64(octeon_read_csr64
+				(oct, CN23XX_SLI_PKT_MAC_RINFO64
+					(i, oct->pf_num))));
+	}
+
+	/*In cn23xx_setup_global_input_regs*/
+	for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+		dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+			"CN23XX_SLI_IQ_PKT_CONTROL64", i,
+			CVM_CAST64(CN23XX_SLI_IQ_PKT_CONTROL64(i)),
+			CVM_CAST64(octeon_read_csr64
+				(oct, CN23XX_SLI_IQ_PKT_CONTROL64(i))));
+	}
+
+	/*In cn23xx_setup_global_output_regs*/
+	dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+		"CN23XX_SLI_OQ_WMARK", CVM_CAST64(CN23XX_SLI_OQ_WMARK),
+		CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_OQ_WMARK)));
+
+	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+		dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+			"CN23XX_SLI_OQ_PKT_CONTROL", i,
+			CVM_CAST64(CN23XX_SLI_OQ_PKT_CONTROL(i)),
+			CVM_CAST64(octeon_read_csr(
+				oct, CN23XX_SLI_OQ_PKT_CONTROL(i))));
+		dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+			"CN23XX_SLI_OQ_PKT_INT_LEVELS", i,
+			CVM_CAST64(CN23XX_SLI_OQ_PKT_INT_LEVELS(i)),
+			CVM_CAST64(octeon_read_csr64(
+				oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(i))));
+	}
+
+	/*In cn23xx_enable_interrupt and cn23xx_disable_interrupt*/
+	dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+		"cn23xx->intr_enb_reg64",
+		CVM_CAST64((long)(cn23xx->intr_enb_reg64)),
+		CVM_CAST64(readq(cn23xx->intr_enb_reg64)));
+
+	dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+		"cn23xx->intr_sum_reg64",
+		CVM_CAST64((long)(cn23xx->intr_sum_reg64)),
+		CVM_CAST64(readq(cn23xx->intr_sum_reg64)));
+
+	/*In cn23xx_setup_iq_regs*/
+	for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+		dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+			"CN23XX_SLI_IQ_BASE_ADDR64", i,
+			CVM_CAST64(CN23XX_SLI_IQ_BASE_ADDR64(i)),
+			CVM_CAST64(octeon_read_csr64(
+				oct, CN23XX_SLI_IQ_BASE_ADDR64(i))));
+		dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+			"CN23XX_SLI_IQ_SIZE", i,
+			CVM_CAST64(CN23XX_SLI_IQ_SIZE(i)),
+			CVM_CAST64(octeon_read_csr
+				(oct, CN23XX_SLI_IQ_SIZE(i))));
+		dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+			"CN23XX_SLI_IQ_DOORBELL", i,
+			CVM_CAST64(CN23XX_SLI_IQ_DOORBELL(i)),
+			CVM_CAST64(octeon_read_csr64(
+				oct, CN23XX_SLI_IQ_DOORBELL(i))));
+		dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+			"CN23XX_SLI_IQ_INSTR_COUNT64", i,
+			CVM_CAST64(CN23XX_SLI_IQ_INSTR_COUNT64(i)),
+			CVM_CAST64(octeon_read_csr64(
+				oct, CN23XX_SLI_IQ_INSTR_COUNT64(i))));
+	}
+
+	/*In cn23xx_setup_oq_regs*/
+	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+		dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+			"CN23XX_SLI_OQ_BASE_ADDR64", i,
+			CVM_CAST64(CN23XX_SLI_OQ_BASE_ADDR64(i)),
+			CVM_CAST64(octeon_read_csr64(
+				oct, CN23XX_SLI_OQ_BASE_ADDR64(i))));
+		dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+			"CN23XX_SLI_OQ_SIZE", i,
+			CVM_CAST64(CN23XX_SLI_OQ_SIZE(i)),
+			CVM_CAST64(octeon_read_csr
+				(oct, CN23XX_SLI_OQ_SIZE(i))));
+		dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+			"CN23XX_SLI_OQ_BUFF_INFO_SIZE", i,
+			CVM_CAST64(CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)),
+			CVM_CAST64(octeon_read_csr(
+				oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(i))));
+		dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+			"CN23XX_SLI_OQ_PKTS_SENT", i,
+			CVM_CAST64(CN23XX_SLI_OQ_PKTS_SENT(i)),
+			CVM_CAST64(octeon_read_csr64(
+				oct, CN23XX_SLI_OQ_PKTS_SENT(i))));
+		dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+			"CN23XX_SLI_OQ_PKTS_CREDIT", i,
+			CVM_CAST64(CN23XX_SLI_OQ_PKTS_CREDIT(i)),
+			CVM_CAST64(octeon_read_csr64(
+				oct, CN23XX_SLI_OQ_PKTS_CREDIT(i))));
+	}
+
+	dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+		"CN23XX_SLI_PKT_TIME_INT",
+		CVM_CAST64(CN23XX_SLI_PKT_TIME_INT),
+		CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_TIME_INT)));
+	dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+		"CN23XX_SLI_PKT_CNT_INT",
+		CVM_CAST64(CN23XX_SLI_PKT_CNT_INT),
+		CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_CNT_INT)));
+}
+
+static int cn23xx_pf_soft_reset(struct octeon_device *oct)
+{
+	octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
+
+	dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: BIST enabled for CN23XX soft reset\n",
+		oct->octeon_id);
+
+	octeon_write_csr64(oct, CN23XX_SLI_SCRATCH1, 0x1234ULL);
+
+	/* Initiate chip-wide soft reset */
+	lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
+	lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
+
+	/* Wait for 100ms as Octeon resets. */
+	mdelay(100);
+
+	if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1) == 0x1234ULL) {
+		dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
+			oct->octeon_id);
+		return 1;
+	}
+
+	dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
+		oct->octeon_id);
+
+	/* restore the  reset value*/
+	octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
+
+	return 0;
+}
+
+static void cn23xx_enable_error_reporting(struct octeon_device *oct)
+{
+	u32 regval;
+	u32 uncorrectable_err_mask, corrtable_err_status;
+
+	pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+	if (regval & CN23XX_CONFIG_PCIE_DEVCTL_MASK) {
+		uncorrectable_err_mask = 0;
+		corrtable_err_status = 0;
+		pci_read_config_dword(oct->pci_dev,
+				      CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK,
+				      &uncorrectable_err_mask);
+		pci_read_config_dword(oct->pci_dev,
+				      CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS,
+				      &corrtable_err_status);
+		dev_err(&oct->pci_dev->dev, "PCI-E Fatal error detected;\n"
+				 "\tdev_ctl_status_reg = 0x%08x\n"
+				 "\tuncorrectable_error_mask_reg = 0x%08x\n"
+				 "\tcorrectable_error_status_reg = 0x%08x\n",
+			    regval, uncorrectable_err_mask,
+			    corrtable_err_status);
+	}
+
+	regval |= 0xf; /* Enable Link error reporting */
+
+	dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Enabling PCI-E error reporting..\n",
+		oct->octeon_id);
+	pci_write_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, regval);
+}
+
+static u32 cn23xx_coprocessor_clock(struct octeon_device *oct)
+{
+	/* Bits 29:24 of RST_BOOT[PNR_MUL] holds the ref.clock MULTIPLIER
+	 * for SLI.
+	 */
+
+	/* TBD: get the info in Hand-shake */
+	return (((lio_pci_readq(oct, CN23XX_RST_BOOT) >> 24) & 0x3f) * 50);
+}
+
+u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
+{
+	/* This gives the SLI clock per microsec */
+	u32 oqticks_per_us = cn23xx_coprocessor_clock(oct);
+
+	oct->pfvf_hsword.coproc_tics_per_us = oqticks_per_us;
+
+	/* This gives the clock cycles per millisecond */
+	oqticks_per_us *= 1000;
+
+	/* This gives the oq ticks (1024 core clock cycles) per millisecond */
+	oqticks_per_us /= 1024;
+
+	/* time_intr is in microseconds. The next 2 steps gives the oq ticks
+	 *  corressponding to time_intr.
+	 */
+	oqticks_per_us *= time_intr_in_us;
+	oqticks_per_us /= 1000;
+
+	return oqticks_per_us;
+}
+
+static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
+{
+	u64 reg_val;
+	u16 mac_no = oct->pcie_port;
+	u16 pf_num = oct->pf_num;
+
+	/* programming SRN and TRS for each MAC(0..3)  */
+
+	dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
+		__func__, mac_no);
+	/* By default, mapping all 64 IOQs to  a single MACs */
+
+	reg_val =
+	    octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
+
+	if (oct->rev_id == OCTEON_CN23XX_REV_1_1) {
+		/* setting SRN <6:0>  */
+		reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
+	} else {
+		/* setting SRN <6:0>  */
+		reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF;
+	}
+
+	/* setting TRS <23:16> */
+	reg_val = reg_val |
+		  (oct->sriov_info.trs << CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS);
+	/* write these settings to MAC register */
+	octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
+			   reg_val);
+
+	dev_dbg(&oct->pci_dev->dev, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016llx\n",
+		mac_no, pf_num, (u64)octeon_read_csr64
+		(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)));
+}
+
+static int cn23xx_reset_io_queues(struct octeon_device *oct)
+{
+	int ret_val = 0;
+	u64 d64;
+	u32 q_no, srn, ern;
+	u32 loop = 1000;
+
+	srn = oct->sriov_info.pf_srn;
+	ern = srn + oct->sriov_info.num_pf_rings;
+
+	/*As per HRM reg description, s/w cant write 0 to ENB. */
+	/*to make the queue off, need to set the RST bit. */
+
+	/* Reset the Enable bit for all the 64 IQs.  */
+	for (q_no = srn; q_no < ern; q_no++) {
+		/* set RST bit to 1. This bit applies to both IQ and OQ */
+		d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+		d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
+		octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
+	}
+
+	/*wait until the RST bit is clear or the RST and quite bits are set*/
+	for (q_no = srn; q_no < ern; q_no++) {
+		u64 reg_val = octeon_read_csr64(oct,
+					CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+		while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
+		       !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
+		       loop--) {
+			WRITE_ONCE(reg_val, octeon_read_csr64(
+			    oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
+		}
+		if (!loop) {
+			dev_err(&oct->pci_dev->dev,
+				"clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+				q_no);
+			return -1;
+		}
+		WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
+			~CN23XX_PKT_INPUT_CTL_RST);
+		octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+				   READ_ONCE(reg_val));
+
+		WRITE_ONCE(reg_val, octeon_read_csr64(
+			   oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
+		if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
+			dev_err(&oct->pci_dev->dev,
+				"clearing the reset failed for qno: %u\n",
+				q_no);
+			ret_val = -1;
+		}
+	}
+
+	return ret_val;
+}
+
+static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
+{
+	u32 q_no, ern, srn;
+	u64 pf_num;
+	u64 intr_threshold, reg_val;
+	struct octeon_instr_queue *iq;
+	struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+	pf_num = oct->pf_num;
+
+	srn = oct->sriov_info.pf_srn;
+	ern = srn + oct->sriov_info.num_pf_rings;
+
+	if (cn23xx_reset_io_queues(oct))
+		return -1;
+
+	/** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
+	* for all queues.Only PF can set these bits.
+	* bits 29:30 indicate the MAC num.
+	* bits 32:47 indicate the PVF num.
+	*/
+	for (q_no = 0; q_no < ern; q_no++) {
+		reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
+		reg_val |= pf_num << CN23XX_PKT_INPUT_CTL_PF_NUM_POS;
+
+		octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+				   reg_val);
+	}
+
+	/* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+	 * pf queues
+	 */
+	for (q_no = srn; q_no < ern; q_no++) {
+		void __iomem *inst_cnt_reg;
+
+		iq = oct->instr_queue[q_no];
+		if (iq)
+			inst_cnt_reg = iq->inst_cnt_reg;
+		else
+			inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
+				       CN23XX_SLI_IQ_INSTR_COUNT64(q_no);
+
+		reg_val =
+		    octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+
+		reg_val |= CN23XX_PKT_INPUT_CTL_MASK;
+
+		octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+				   reg_val);
+
+		/* Set WMARK level for triggering PI_INT */
+		/* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
+		intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
+				 CN23XX_PKT_IN_DONE_WMARK_MASK;
+
+		writeq((readq(inst_cnt_reg) &
+			~(CN23XX_PKT_IN_DONE_WMARK_MASK <<
+			  CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
+		       (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS),
+		       inst_cnt_reg);
+	}
+	return 0;
+}
+
+static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
+{
+	u32 reg_val;
+	u32 q_no, ern, srn;
+	u64 time_threshold;
+
+	struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+	srn = oct->sriov_info.pf_srn;
+	ern = srn + oct->sriov_info.num_pf_rings;
+
+	if (CFG_GET_IS_SLI_BP_ON(cn23xx->conf)) {
+		octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 32);
+	} else {
+		/** Set Output queue watermark to 0 to disable backpressure */
+		octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 0);
+	}
+
+	for (q_no = srn; q_no < ern; q_no++) {
+		reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+
+		/* set IPTR & DPTR */
+		reg_val |=
+		    (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
+
+		/* reset BMODE */
+		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
+
+		/* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+		 * for Output Queue ScatterList
+		 * reset ROR_P, NSR_P
+		 */
+		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
+		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
+#else
+		reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
+#endif
+		/* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+		 * for Output Queue Data
+		 * reset ROR, NSR
+		 */
+		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
+		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
+		/* set the ES bit */
+		reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
+
+		/* write all the selected settings */
+		octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
+
+		/* Enabling these interrupt in oct->fn_list.enable_interrupt()
+		 * routine which called after IOQ init.
+		 * Set up interrupt packet and time thresholds
+		 * for all the OQs
+		 */
+		time_threshold = cn23xx_pf_get_oq_ticks(
+		    oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
+
+		octeon_write_csr64(oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
+				   (CFG_GET_OQ_INTR_PKT(cn23xx->conf) |
+				    (time_threshold << 32)));
+	}
+
+	/** Setting the water mark level for pko back pressure **/
+	writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
+
+	/** Disabling setting OQs in reset when ring has no dorebells
+	  * enabling this will cause of head of line blocking
+	  */
+	/* Do it only for pass1.1. and pass1.2 */
+	if ((oct->rev_id == OCTEON_CN23XX_REV_1_0) ||
+	    (oct->rev_id == OCTEON_CN23XX_REV_1_1))
+		writeq(readq((u8 *)oct->mmio[0].hw_addr +
+				     CN23XX_SLI_GBL_CONTROL) | 0x2,
+		       (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
+
+	/** Enable channel-level backpressure */
+	if (oct->pf_num)
+		writeq(0xffffffffffffffffULL,
+		       (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
+	else
+		writeq(0xffffffffffffffffULL,
+		       (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN_W1S);
+}
+
+static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
+{
+	cn23xx_enable_error_reporting(oct);
+
+	/* program the MAC(0..3)_RINFO before setting up input/output regs */
+	cn23xx_setup_global_mac_regs(oct);
+
+	if (cn23xx_pf_setup_global_input_regs(oct))
+		return -1;
+
+	cn23xx_pf_setup_global_output_regs(oct);
+
+	/* Default error timeout value should be 0x200000 to avoid host hang
+	 * when reads invalid register
+	 */
+	octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
+			   CN23XX_SLI_WINDOW_CTL_DEFAULT);
+
+	/* set SLI_PKT_IN_JABBER to handle large VXLAN packets */
+	octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
+	return 0;
+}
+
+static void cn23xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+	struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+	u64 pkt_in_done;
+
+	iq_no += oct->sriov_info.pf_srn;
+
+	/* Write the start of the input queue's ring and its size  */
+	octeon_write_csr64(oct, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
+			   iq->base_addr_dma);
+	octeon_write_csr(oct, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
+
+	/* Remember the doorbell & instruction count register addr
+	 * for this queue
+	 */
+	iq->doorbell_reg =
+	    (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_DOORBELL(iq_no);
+	iq->inst_cnt_reg =
+	    (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
+	dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+		iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+	/* Store the current instruction counter (used in flush_iq
+	 * calculation)
+	 */
+	pkt_in_done = readq(iq->inst_cnt_reg);
+
+	if (oct->msix_on) {
+		/* Set CINT_ENB to enable IQ interrupt   */
+		writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
+		       iq->inst_cnt_reg);
+	} else {
+		/* Clear the count by writing back what we read, but don't
+		 * enable interrupts
+		 */
+		writeq(pkt_in_done, iq->inst_cnt_reg);
+	}
+
+	iq->reset_instr_cnt = 0;
+}
+
+static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
+{
+	u32 reg_val;
+	struct octeon_droq *droq = oct->droq[oq_no];
+	struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+	u64 time_threshold;
+	u64 cnt_threshold;
+
+	oq_no += oct->sriov_info.pf_srn;
+
+	octeon_write_csr64(oct, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
+			   droq->desc_ring_dma);
+	octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+	octeon_write_csr(oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+			 (droq->buffer_size | (OCT_RH_SIZE << 16)));
+
+	/* Get the mapped address of the pkt_sent and pkts_credit regs */
+	droq->pkts_sent_reg =
+	    (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_SENT(oq_no);
+	droq->pkts_credit_reg =
+	    (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_CREDIT(oq_no);
+
+	if (!oct->msix_on) {
+		/* Enable this output queue to generate Packet Timer Interrupt
+		 */
+		reg_val =
+		    octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
+		reg_val |= CN23XX_PKT_OUTPUT_CTL_TENB;
+		octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
+				 reg_val);
+
+		/* Enable this output queue to generate Packet Count Interrupt
+		 */
+		reg_val =
+		    octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
+		reg_val |= CN23XX_PKT_OUTPUT_CTL_CENB;
+		octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
+				 reg_val);
+	} else {
+		time_threshold = cn23xx_pf_get_oq_ticks(
+		    oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
+		cnt_threshold = (u32)CFG_GET_OQ_INTR_PKT(cn23xx->conf);
+
+		octeon_write_csr64(
+		    oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(oq_no),
+		    ((time_threshold << 32 | cnt_threshold)));
+	}
+}
+
+static int cn23xx_enable_io_queues(struct octeon_device *oct)
+{
+	u64 reg_val;
+	u32 srn, ern, q_no;
+	u32 loop = 1000;
+
+	srn = oct->sriov_info.pf_srn;
+	ern = srn + oct->num_iqs;
+
+	for (q_no = srn; q_no < ern; q_no++) {
+		/* set the corresponding IQ IS_64B bit */
+		if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
+			reg_val = octeon_read_csr64(
+			    oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+			reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;
+			octeon_write_csr64(
+			    oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+		}
+
+		/* set the corresponding IQ ENB bit */
+		if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
+			/* IOQs are in reset by default in PEM2 mode,
+			 * clearing reset bit
+			 */
+			reg_val = octeon_read_csr64(
+			    oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+
+			if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+				while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
+				       !(reg_val &
+					 CN23XX_PKT_INPUT_CTL_QUIET) &&
+				       loop--) {
+					reg_val = octeon_read_csr64(
+					    oct,
+					    CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+				}
+				if (!loop) {
+					dev_err(&oct->pci_dev->dev,
+						"clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+						q_no);
+					return -1;
+				}
+				reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
+				octeon_write_csr64(
+				    oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+				    reg_val);
+
+				reg_val = octeon_read_csr64(
+				    oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+				if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+					dev_err(&oct->pci_dev->dev,
+						"clearing the reset failed for qno: %u\n",
+						q_no);
+					return -1;
+				}
+			}
+			reg_val = octeon_read_csr64(
+			    oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+			reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;
+			octeon_write_csr64(
+			    oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+		}
+	}
+	for (q_no = srn; q_no < ern; q_no++) {
+		u32 reg_val;
+		/* set the corresponding OQ ENB bit */
+		if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
+			reg_val = octeon_read_csr(
+			    oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+			reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;
+			octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no),
+					 reg_val);
+		}
+	}
+	return 0;
+}
+
+static void cn23xx_disable_io_queues(struct octeon_device *oct)
+{
+	int q_no, loop;
+	u64 d64;
+	u32 d32;
+	u32 srn, ern;
+
+	srn = oct->sriov_info.pf_srn;
+	ern = srn + oct->num_iqs;
+
+	/*** Disable Input Queues. ***/
+	for (q_no = srn; q_no < ern; q_no++) {
+		loop = HZ;
+
+		/* start the Reset for a particular ring */
+		WRITE_ONCE(d64, octeon_read_csr64(
+			   oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
+		WRITE_ONCE(d64, READ_ONCE(d64) &
+					(~(CN23XX_PKT_INPUT_CTL_RING_ENB)));
+		WRITE_ONCE(d64, READ_ONCE(d64) | CN23XX_PKT_INPUT_CTL_RST);
+		octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+				   READ_ONCE(d64));
+
+		/* Wait until hardware indicates that the particular IQ
+		 * is out of reset.
+		 */
+		WRITE_ONCE(d64, octeon_read_csr64(
+					oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+		while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
+			WRITE_ONCE(d64, octeon_read_csr64(
+					oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+			schedule_timeout_uninterruptible(1);
+		}
+
+		/* Reset the doorbell register for this Input Queue. */
+		octeon_write_csr(oct, CN23XX_SLI_IQ_DOORBELL(q_no), 0xFFFFFFFF);
+		while (octeon_read_csr64(oct, CN23XX_SLI_IQ_DOORBELL(q_no)) &&
+		       loop--) {
+			schedule_timeout_uninterruptible(1);
+		}
+	}
+
+	/*** Disable Output Queues. ***/
+	for (q_no = srn; q_no < ern; q_no++) {
+		loop = HZ;
+
+		/* Wait until hardware indicates that the particular IQ
+		 * is out of reset.It given that SLI_PKT_RING_RST is
+		 * common for both IQs and OQs
+		 */
+		WRITE_ONCE(d64, octeon_read_csr64(
+					oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+		while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
+			WRITE_ONCE(d64, octeon_read_csr64(
+					oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+			schedule_timeout_uninterruptible(1);
+		}
+
+		/* Reset the doorbell register for this Output Queue. */
+		octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
+				 0xFFFFFFFF);
+		while (octeon_read_csr64(oct,
+					 CN23XX_SLI_OQ_PKTS_CREDIT(q_no)) &&
+		       loop--) {
+			schedule_timeout_uninterruptible(1);
+		}
+
+		/* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
+		WRITE_ONCE(d32, octeon_read_csr(
+					oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
+		octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
+				 READ_ONCE(d32));
+	}
+}
+
+static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
+{
+	struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+	struct octeon_device *oct = ioq_vector->oct_dev;
+	u64 pkts_sent;
+	u64 ret = 0;
+	struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+
+	dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
+
+	if (!droq) {
+		dev_err(&oct->pci_dev->dev, "23XX bringup FIXME: oct pfnum:%d ioq_vector->ioq_num :%d droq is NULL\n",
+			oct->pf_num, ioq_vector->ioq_num);
+		return 0;
+	}
+
+	pkts_sent = readq(droq->pkts_sent_reg);
+
+	/* If our device has interrupted, then proceed. Also check
+	 * for all f's if interrupt was triggered on an error
+	 * and the PCI read fails.
+	 */
+	if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
+		return ret;
+
+	/* Write count reg in sli_pkt_cnts to clear these int.*/
+	if ((pkts_sent & CN23XX_INTR_PO_INT) ||
+	    (pkts_sent & CN23XX_INTR_PI_INT)) {
+		if (pkts_sent & CN23XX_INTR_PO_INT)
+			ret |= MSIX_PO_INT;
+	}
+
+	if (pkts_sent & CN23XX_INTR_PI_INT)
+		/* We will clear the count when we update the read_index. */
+		ret |= MSIX_PI_INT;
+
+	/* Never need to handle msix mbox intr for pf. They arrive on the last
+	 * msix
+	 */
+	return ret;
+}
+
+static irqreturn_t cn23xx_interrupt_handler(void *dev)
+{
+	struct octeon_device *oct = (struct octeon_device *)dev;
+	struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+	u64 intr64;
+
+	dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
+	intr64 = readq(cn23xx->intr_sum_reg64);
+
+	oct->int_status = 0;
+
+	if (intr64 & CN23XX_INTR_ERR)
+		dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Error Intr: 0x%016llx\n",
+			oct->octeon_id, CVM_CAST64(intr64));
+
+	if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) {
+		if (intr64 & CN23XX_INTR_PKT_DATA)
+			oct->int_status |= OCT_DEV_INTR_PKT_DATA;
+	}
+
+	if (intr64 & (CN23XX_INTR_DMA0_FORCE))
+		oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
+	if (intr64 & (CN23XX_INTR_DMA1_FORCE))
+		oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
+
+	/* Clear the current interrupts */
+	writeq(intr64, cn23xx->intr_sum_reg64);
+
+	return IRQ_HANDLED;
+}
+
+static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+	struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+	u64 intr_val = 0;
+
+	/*  Divide the single write to multiple writes based on the flag. */
+	/* Enable Interrupt */
+	if (intr_flag == OCTEON_ALL_INTR) {
+		writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
+	} else if (intr_flag & OCTEON_OUTPUT_INTR) {
+		intr_val = readq(cn23xx->intr_enb_reg64);
+		intr_val |= CN23XX_INTR_PKT_DATA;
+		writeq(intr_val, cn23xx->intr_enb_reg64);
+	}
+}
+
+static void cn23xx_disable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+	struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+	u64 intr_val = 0;
+
+	/* Disable Interrupts */
+	if (intr_flag == OCTEON_ALL_INTR) {
+		writeq(0, cn23xx->intr_enb_reg64);
+	} else if (intr_flag & OCTEON_OUTPUT_INTR) {
+		intr_val = readq(cn23xx->intr_enb_reg64);
+		intr_val &= ~CN23XX_INTR_PKT_DATA;
+		writeq(intr_val, cn23xx->intr_enb_reg64);
+	}
+}
+
+static void cn23xx_get_pcie_qlmport(struct octeon_device *oct)
+{
+	oct->pcie_port = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
+
+	dev_dbg(&oct->pci_dev->dev, "OCTEON: CN23xx uses PCIE Port %d\n",
+		oct->pcie_port);
+}
+
+static void cn23xx_get_pf_num(struct octeon_device *oct)
+{
+	u32 fdl_bit = 0;
+
+	/** Read Function Dependency Link reg to get the function number */
+	pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL, &fdl_bit);
+	oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
+		       CN23XX_PCIE_SRIOV_FDL_MASK);
+}
+
+static void cn23xx_setup_reg_address(struct octeon_device *oct)
+{
+	u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+	struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+	oct->reg_list.pci_win_wr_addr_hi =
+	    (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_HI);
+	oct->reg_list.pci_win_wr_addr_lo =
+	    (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_LO);
+	oct->reg_list.pci_win_wr_addr =
+	    (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR64);
+
+	oct->reg_list.pci_win_rd_addr_hi =
+	    (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_HI);
+	oct->reg_list.pci_win_rd_addr_lo =
+	    (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_LO);
+	oct->reg_list.pci_win_rd_addr =
+	    (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR64);
+
+	oct->reg_list.pci_win_wr_data_hi =
+	    (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_HI);
+	oct->reg_list.pci_win_wr_data_lo =
+	    (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_LO);
+	oct->reg_list.pci_win_wr_data =
+	    (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA64);
+
+	oct->reg_list.pci_win_rd_data_hi =
+	    (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_HI);
+	oct->reg_list.pci_win_rd_data_lo =
+	    (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_LO);
+	oct->reg_list.pci_win_rd_data =
+	    (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA64);
+
+	cn23xx_get_pcie_qlmport(oct);
+
+	cn23xx->intr_mask64 = CN23XX_INTR_MASK;
+	if (!oct->msix_on)
+		cn23xx->intr_mask64 |= CN23XX_INTR_PKT_TIME;
+	if (oct->rev_id >= OCTEON_CN23XX_REV_1_1)
+		cn23xx->intr_mask64 |= CN23XX_INTR_VF_MBOX;
+
+	cn23xx->intr_sum_reg64 =
+	    bar0_pciaddr +
+	    CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
+	cn23xx->intr_enb_reg64 =
+	    bar0_pciaddr +
+	    CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
+}
+
+static int cn23xx_sriov_config(struct octeon_device *oct)
+{
+	u32 total_rings;
+	struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+	/* num_vfs is already filled for us */
+	u32 pf_srn, num_pf_rings;
+
+	cn23xx->conf =
+	    (struct octeon_config *)oct_get_config_info(oct, LIO_23XX);
+	switch (oct->rev_id) {
+	case OCTEON_CN23XX_REV_1_0:
+		total_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_0;
+		break;
+	case OCTEON_CN23XX_REV_1_1:
+		total_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
+		break;
+	default:
+		total_rings = CN23XX_MAX_RINGS_PER_PF;
+		break;
+	}
+	if (!oct->sriov_info.num_pf_rings) {
+		if (total_rings > num_present_cpus())
+			num_pf_rings = num_present_cpus();
+		else
+			num_pf_rings = total_rings;
+	} else {
+		num_pf_rings = oct->sriov_info.num_pf_rings;
+
+		if (num_pf_rings > total_rings) {
+			dev_warn(&oct->pci_dev->dev,
+				 "num_queues_per_pf requested %u is more than available rings. Reducing to %u\n",
+				 num_pf_rings, total_rings);
+			num_pf_rings = total_rings;
+		}
+	}
+
+	total_rings = num_pf_rings;
+	/* the first ring of the pf */
+	pf_srn = total_rings - num_pf_rings;
+
+	oct->sriov_info.trs = total_rings;
+	oct->sriov_info.pf_srn = pf_srn;
+	oct->sriov_info.num_pf_rings = num_pf_rings;
+	dev_dbg(&oct->pci_dev->dev, "trs:%d pf_srn:%d num_pf_rings:%d\n",
+		oct->sriov_info.trs, oct->sriov_info.pf_srn,
+		oct->sriov_info.num_pf_rings);
+	return 0;
+}
+
+int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
+{
+	if (octeon_map_pci_barx(oct, 0, 0))
+		return 1;
+
+	if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
+		dev_err(&oct->pci_dev->dev, "%s CN23XX BAR1 map failed\n",
+			__func__);
+		octeon_unmap_pci_barx(oct, 0);
+		return 1;
+	}
+
+	cn23xx_get_pf_num(oct);
+
+	if (cn23xx_sriov_config(oct)) {
+		octeon_unmap_pci_barx(oct, 0);
+		octeon_unmap_pci_barx(oct, 1);
+		return 1;
+	}
+
+	octeon_write_csr64(oct, CN23XX_SLI_MAC_CREDIT_CNT, 0x3F802080802080ULL);
+
+	oct->fn_list.setup_iq_regs = cn23xx_setup_iq_regs;
+	oct->fn_list.setup_oq_regs = cn23xx_setup_oq_regs;
+	oct->fn_list.process_interrupt_regs = cn23xx_interrupt_handler;
+	oct->fn_list.msix_interrupt_handler = cn23xx_pf_msix_interrupt_handler;
+
+	oct->fn_list.soft_reset = cn23xx_pf_soft_reset;
+	oct->fn_list.setup_device_regs = cn23xx_setup_pf_device_regs;
+
+	oct->fn_list.enable_interrupt = cn23xx_enable_pf_interrupt;
+	oct->fn_list.disable_interrupt = cn23xx_disable_pf_interrupt;
+
+	oct->fn_list.enable_io_queues = cn23xx_enable_io_queues;
+	oct->fn_list.disable_io_queues = cn23xx_disable_io_queues;
+
+	cn23xx_setup_reg_address(oct);
+
+	oct->coproc_clock_rate = 1000000ULL * cn23xx_coprocessor_clock(oct);
+
+	return 0;
+}
+
+int validate_cn23xx_pf_config_info(struct octeon_device *oct,
+				   struct octeon_config *conf23xx)
+{
+	if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) {
+		dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
+			__func__, CFG_GET_IQ_MAX_Q(conf23xx),
+			CN23XX_MAX_INPUT_QUEUES);
+		return 1;
+	}
+
+	if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) {
+		dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
+			__func__, CFG_GET_OQ_MAX_Q(conf23xx),
+			CN23XX_MAX_OUTPUT_QUEUES);
+		return 1;
+	}
+
+	if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR &&
+	    CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) {
+		dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
+			__func__);
+		return 1;
+	}
+
+	if (!(CFG_GET_OQ_INFO_PTR(conf23xx)) ||
+	    !(CFG_GET_OQ_REFILL_THRESHOLD(conf23xx))) {
+		dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
+			__func__);
+		return 1;
+	}
+
+	if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) {
+		dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
+			__func__);
+		return 1;
+	}
+
+	return 0;
+}
+
+void cn23xx_dump_iq_regs(struct octeon_device *oct)
+{
+	u32 regval, q_no;
+
+	dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
+		CN23XX_SLI_IQ_DOORBELL(0),
+		CVM_CAST64(octeon_read_csr64
+			(oct, CN23XX_SLI_IQ_DOORBELL(0))));
+
+	dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
+		CN23XX_SLI_IQ_BASE_ADDR64(0),
+		CVM_CAST64(octeon_read_csr64
+			(oct, CN23XX_SLI_IQ_BASE_ADDR64(0))));
+
+	dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
+		CN23XX_SLI_IQ_SIZE(0),
+		CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_IQ_SIZE(0))));
+
+	dev_dbg(&oct->pci_dev->dev, "SLI_CTL_STATUS [0x%x]: 0x%016llx\n",
+		CN23XX_SLI_CTL_STATUS,
+		CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_CTL_STATUS)));
+
+	for (q_no = 0; q_no < CN23XX_MAX_INPUT_QUEUES; q_no++) {
+		dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
+			q_no, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+			CVM_CAST64(octeon_read_csr64
+				(oct,
+					CN23XX_SLI_IQ_PKT_CONTROL64(q_no))));
+	}
+
+	pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+	dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
+		CN23XX_CONFIG_PCIE_DEVCTL, regval);
+
+	dev_dbg(&oct->pci_dev->dev, "SLI_PRT[%d]_CFG [0x%llx]: 0x%016llx\n",
+		oct->pcie_port, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
+		CVM_CAST64(lio_pci_readq(
+			oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port))));
+
+	dev_dbg(&oct->pci_dev->dev, "SLI_S2M_PORT[%d]_CTL [0x%x]: 0x%016llx\n",
+		oct->pcie_port, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port),
+		CVM_CAST64(octeon_read_csr64(
+			oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
+}
+
+int cn23xx_fw_loaded(struct octeon_device *oct)
+{
+	u64 val;
+
+	val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1);
+	return (val >> 1) & 1ULL;
+}

+ 57 - 0
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h

@@ -0,0 +1,57 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file  cn23xx_device.h
+ * \brief Host Driver: Routines that perform CN23XX specific operations.
+*/
+
+#ifndef __CN23XX_PF_DEVICE_H__
+#define __CN23XX_PF_DEVICE_H__
+
+#include "cn23xx_pf_regs.h"
+
+/* Register address and configuration for a CN23XX devices.
+ * If device specific changes need to be made then add a struct to include
+ * device specific fields as shown in the commented section
+ */
+struct octeon_cn23xx_pf {
+	/** PCI interrupt summary register */
+	u8 __iomem *intr_sum_reg64;
+
+	/** PCI interrupt enable register */
+	u8 __iomem *intr_enb_reg64;
+
+	/** The PCI interrupt mask used by interrupt handler */
+	u64 intr_mask64;
+
+	struct octeon_config *conf;
+};
+
+int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
+
+int validate_cn23xx_pf_config_info(struct octeon_device *oct,
+				   struct octeon_config *conf23xx);
+
+void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct);
+
+int cn23xx_fw_loaded(struct octeon_device *oct);
+#endif

+ 604 - 0
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h

@@ -0,0 +1,604 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn23xx_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN23XX devices.
+*/
+
+#ifndef __CN23XX_PF_REGS_H__
+#define __CN23XX_PF_REGS_H__
+
+#define     CN23XX_CONFIG_VENDOR_ID	0x00
+#define     CN23XX_CONFIG_DEVICE_ID	0x02
+
+#define     CN23XX_CONFIG_XPANSION_BAR             0x38
+
+#define     CN23XX_CONFIG_MSIX_CAP		   0x50
+#define     CN23XX_CONFIG_MSIX_LMSI		   0x54
+#define     CN23XX_CONFIG_MSIX_UMSI		   0x58
+#define     CN23XX_CONFIG_MSIX_MSIMD		   0x5C
+#define     CN23XX_CONFIG_MSIX_MSIMM		   0x60
+#define     CN23XX_CONFIG_MSIX_MSIMP		   0x64
+
+#define     CN23XX_CONFIG_PCIE_CAP                 0x70
+#define     CN23XX_CONFIG_PCIE_DEVCAP              0x74
+#define     CN23XX_CONFIG_PCIE_DEVCTL              0x78
+#define     CN23XX_CONFIG_PCIE_LINKCAP             0x7C
+#define     CN23XX_CONFIG_PCIE_LINKCTL             0x80
+#define     CN23XX_CONFIG_PCIE_SLOTCAP             0x84
+#define     CN23XX_CONFIG_PCIE_SLOTCTL             0x88
+#define     CN23XX_CONFIG_PCIE_DEVCTL2             0x98
+#define     CN23XX_CONFIG_PCIE_LINKCTL2            0xA0
+#define     CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK  0x108
+#define     CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS  0x110
+#define     CN23XX_CONFIG_PCIE_DEVCTL_MASK         0x00040000
+
+#define     CN23XX_PCIE_SRIOV_FDL		   0x188
+#define     CN23XX_PCIE_SRIOV_FDL_BIT_POS	   0x10
+#define     CN23XX_PCIE_SRIOV_FDL_MASK		   0xFF
+
+#define     CN23XX_CONFIG_PCIE_FLTMSK              0x720
+
+#define     CN23XX_CONFIG_SRIOV_VFDEVID            0x190
+
+#define     CN23XX_CONFIG_SRIOV_BAR_START	   0x19C
+#define     CN23XX_CONFIG_SRIOV_BARX(i)		\
+		(CN23XX_CONFIG_SRIOV_BAR_START + (i * 4))
+#define     CN23XX_CONFIG_SRIOV_BAR_PF		   0x08
+#define     CN23XX_CONFIG_SRIOV_BAR_64BIT	   0x04
+#define     CN23XX_CONFIG_SRIOV_BAR_IO		   0x01
+
+/* ##############  BAR0 Registers ################ */
+
+#define    CN23XX_SLI_CTL_PORT_START               0x286E0
+#define    CN23XX_PORT_OFFSET                      0x10
+
+#define    CN23XX_SLI_CTL_PORT(p)                  \
+		(CN23XX_SLI_CTL_PORT_START + ((p) * CN23XX_PORT_OFFSET))
+
+/* 2 scatch registers (64-bit)  */
+#define    CN23XX_SLI_WINDOW_CTL                   0x282E0
+#define    CN23XX_SLI_SCRATCH1                     0x283C0
+#define    CN23XX_SLI_SCRATCH2                     0x283D0
+#define    CN23XX_SLI_WINDOW_CTL_DEFAULT           0x200000ULL
+
+/* 1 registers (64-bit)  - SLI_CTL_STATUS */
+#define    CN23XX_SLI_CTL_STATUS                   0x28570
+
+/* SLI Packet Input Jabber Register (64 bit register)
+ * <31:0> for Byte count for limiting sizes of packet sizes
+ * that are allowed for sli packet inbound packets.
+ * the default value is 0xFA00(=64000).
+ */
+#define    CN23XX_SLI_PKT_IN_JABBER                0x29170
+/* The input jabber is used to determine the TSO max size.
+ * Due to H/W limitation, this need to be reduced to 60000
+ * in order to to H/W TSO and avoid the WQE malfarmation
+ * PKO_BUG_24989_WQE_LEN
+ */
+#define    CN23XX_DEFAULT_INPUT_JABBER             0xEA60 /*60000*/
+
+#define    CN23XX_WIN_WR_ADDR_LO                   0x20000
+#define    CN23XX_WIN_WR_ADDR_HI                   0x20004
+#define    CN23XX_WIN_WR_ADDR64                    CN23XX_WIN_WR_ADDR_LO
+
+#define    CN23XX_WIN_RD_ADDR_LO                   0x20010
+#define    CN23XX_WIN_RD_ADDR_HI                   0x20014
+#define    CN23XX_WIN_RD_ADDR64                    CN23XX_WIN_RD_ADDR_LO
+
+#define    CN23XX_WIN_WR_DATA_LO                   0x20020
+#define    CN23XX_WIN_WR_DATA_HI                   0x20024
+#define    CN23XX_WIN_WR_DATA64                    CN23XX_WIN_WR_DATA_LO
+
+#define    CN23XX_WIN_RD_DATA_LO                   0x20040
+#define    CN23XX_WIN_RD_DATA_HI                   0x20044
+#define    CN23XX_WIN_RD_DATA64                    CN23XX_WIN_RD_DATA_LO
+
+#define    CN23XX_WIN_WR_MASK_LO                   0x20030
+#define    CN23XX_WIN_WR_MASK_HI                   0x20034
+#define    CN23XX_WIN_WR_MASK_REG                  CN23XX_WIN_WR_MASK_LO
+#define    CN23XX_SLI_MAC_CREDIT_CNT               0x23D70
+
+/* 4 registers (64-bit) for mapping IOQs to MACs(PEMs)-
+ * SLI_PKT_MAC(0..3)_PF(0..1)_RINFO
+ */
+#define    CN23XX_SLI_PKT_MAC_RINFO_START64       0x29030
+
+/*1 register (64-bit) to determine whether IOQs are in reset. */
+#define    CN23XX_SLI_PKT_IOQ_RING_RST            0x291E0
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define    CN23XX_IQ_OFFSET                       0x20000
+
+#define    CN23XX_MAC_RINFO_OFFSET                0x20
+#define    CN23XX_PF_RINFO_OFFSET                 0x10
+
+#define CN23XX_SLI_PKT_MAC_RINFO64(mac, pf)		\
+		(CN23XX_SLI_PKT_MAC_RINFO_START64 +     \
+		 ((mac) * CN23XX_MAC_RINFO_OFFSET) +	\
+		 ((pf) * CN23XX_PF_RINFO_OFFSET))
+
+/** mask for total rings, setting TRS to base */
+#define    CN23XX_PKT_MAC_CTL_RINFO_TRS               BIT_ULL(16)
+/** mask for starting ring number: setting SRN <6:0> = 0x7F */
+#define    CN23XX_PKT_MAC_CTL_RINFO_SRN               (0x7F)
+
+/* Starting bit of the TRS field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define    CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS     16
+/* Starting bit of SRN field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define    CN23XX_PKT_MAC_CTL_RINFO_SRN_BIT_POS     0
+/* Starting bit of RPVF field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define    CN23XX_PKT_MAC_CTL_RINFO_RPVF_BIT_POS     32
+/* Starting bit of NVFS field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define    CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS     48
+
+/*###################### REQUEST QUEUE #########################*/
+
+/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */
+#define    CN23XX_SLI_IQ_INSTR_COUNT_START64     0x10040
+
+/* 64 registers for Input Queues Start Addr - SLI_PKT0_INSTR_BADDR */
+#define    CN23XX_SLI_IQ_BASE_ADDR_START64       0x10010
+
+/* 64 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */
+#define    CN23XX_SLI_IQ_DOORBELL_START          0x10020
+
+/* 64 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */
+#define    CN23XX_SLI_IQ_SIZE_START              0x10030
+
+/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL.
+ */
+#define    CN23XX_SLI_IQ_PKT_CONTROL_START64    0x10000
+
+/*------- Request Queue Macros ---------*/
+#define    CN23XX_SLI_IQ_PKT_CONTROL64(iq)          \
+		(CN23XX_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define    CN23XX_SLI_IQ_BASE_ADDR64(iq)          \
+		(CN23XX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define    CN23XX_SLI_IQ_SIZE(iq)                 \
+		(CN23XX_SLI_IQ_SIZE_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define    CN23XX_SLI_IQ_DOORBELL(iq)             \
+		(CN23XX_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define    CN23XX_SLI_IQ_INSTR_COUNT64(iq)          \
+		(CN23XX_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+/*------------------ Masks ----------------*/
+#define    CN23XX_PKT_INPUT_CTL_VF_NUM                  BIT_ULL(32)
+#define    CN23XX_PKT_INPUT_CTL_MAC_NUM                 BIT(29)
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define    CN23XX_PKT_INPUT_CTL_RDSIZE                  (3 << 25)
+#define    CN23XX_PKT_INPUT_CTL_IS_64B                  BIT(24)
+#define    CN23XX_PKT_INPUT_CTL_RST                     BIT(23)
+#define    CN23XX_PKT_INPUT_CTL_QUIET                   BIT(28)
+#define    CN23XX_PKT_INPUT_CTL_RING_ENB                BIT(22)
+#define    CN23XX_PKT_INPUT_CTL_DATA_NS                 BIT(8)
+#define    CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP        BIT(6)
+#define    CN23XX_PKT_INPUT_CTL_DATA_RO                 BIT(5)
+#define    CN23XX_PKT_INPUT_CTL_USE_CSR                 BIT(4)
+#define    CN23XX_PKT_INPUT_CTL_GATHER_NS               BIT(3)
+#define    CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP      (2)
+#define    CN23XX_PKT_INPUT_CTL_GATHER_RO               (1)
+
+/** Rings per Virtual Function **/
+#define    CN23XX_PKT_INPUT_CTL_RPVF_MASK               (0x3F)
+#define    CN23XX_PKT_INPUT_CTL_RPVF_POS                (48)
+/** These bits[47:44] select the Physical function number within the MAC */
+#define    CN23XX_PKT_INPUT_CTL_PF_NUM_MASK             (0x7)
+#define    CN23XX_PKT_INPUT_CTL_PF_NUM_POS              (45)
+/** These bits[43:32] select the function number within the PF */
+#define    CN23XX_PKT_INPUT_CTL_VF_NUM_MASK             (0x1FFF)
+#define    CN23XX_PKT_INPUT_CTL_VF_NUM_POS              (32)
+#define    CN23XX_PKT_INPUT_CTL_MAC_NUM_MASK            (0x3)
+#define    CN23XX_PKT_INPUT_CTL_MAC_NUM_POS             (29)
+#define    CN23XX_PKT_IN_DONE_WMARK_MASK                (0xFFFFULL)
+#define    CN23XX_PKT_IN_DONE_WMARK_BIT_POS             (32)
+#define    CN23XX_PKT_IN_DONE_CNT_MASK                  (0x00000000FFFFFFFFULL)
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+#define    CN23XX_PKT_INPUT_CTL_MASK				\
+		(CN23XX_PKT_INPUT_CTL_RDSIZE		|	\
+		 CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP	|	\
+		 CN23XX_PKT_INPUT_CTL_USE_CSR)
+#else
+#define    CN23XX_PKT_INPUT_CTL_MASK				\
+		(CN23XX_PKT_INPUT_CTL_RDSIZE		|	\
+		 CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP	|	\
+		 CN23XX_PKT_INPUT_CTL_USE_CSR		|	\
+		 CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP)
+#endif
+
+/** Masks for SLI_PKT_IN_DONE(0..63)_CNTS Register */
+#define    CN23XX_IN_DONE_CNTS_PI_INT               BIT_ULL(62)
+#define    CN23XX_IN_DONE_CNTS_CINT_ENB             BIT_ULL(48)
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */
+#define    CN23XX_SLI_OQ_PKT_CONTROL_START       0x10050
+
+/* 64 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */
+#define    CN23XX_SLI_OQ0_BUFF_INFO_SIZE         0x10060
+
+/* 64 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */
+#define    CN23XX_SLI_OQ_BASE_ADDR_START64       0x10070
+
+/* 64 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */
+#define    CN23XX_SLI_OQ_PKT_CREDITS_START       0x10080
+
+/* 64 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */
+#define    CN23XX_SLI_OQ_SIZE_START              0x10090
+
+/* 64 registers for Output Queue Packet Count - SLI_PKT0_CNTS */
+#define    CN23XX_SLI_OQ_PKT_SENT_START          0x100B0
+
+/* 64 registers for Output Queue INT Levels - SLI_PKT0_INT_LEVELS */
+#define    CN23XX_SLI_OQ_PKT_INT_LEVELS_START64   0x100A0
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define    CN23XX_OQ_OFFSET                      0x20000
+
+/* 1 (64-bit register) for Output Queue backpressure across all rings. */
+#define    CN23XX_SLI_OQ_WMARK                   0x29180
+
+/* Global pkt control register */
+#define    CN23XX_SLI_GBL_CONTROL                0x29210
+
+/* Backpressure enable register for PF0  */
+#define    CN23XX_SLI_OUT_BP_EN_W1S              0x29260
+
+/* Backpressure enable register for PF1  */
+#define    CN23XX_SLI_OUT_BP_EN2_W1S             0x29270
+
+/* Backpressure disable register for PF0  */
+#define    CN23XX_SLI_OUT_BP_EN_W1C              0x29280
+
+/* Backpressure disable register for PF1  */
+#define    CN23XX_SLI_OUT_BP_EN2_W1C             0x29290
+
+/*------- Output Queue Macros ---------*/
+
+#define    CN23XX_SLI_OQ_PKT_CONTROL(oq)          \
+		(CN23XX_SLI_OQ_PKT_CONTROL_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define    CN23XX_SLI_OQ_BASE_ADDR64(oq)          \
+		(CN23XX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN23XX_OQ_OFFSET))
+
+#define    CN23XX_SLI_OQ_SIZE(oq)                 \
+		(CN23XX_SLI_OQ_SIZE_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define    CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq)                 \
+		(CN23XX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN23XX_OQ_OFFSET))
+
+#define    CN23XX_SLI_OQ_PKTS_SENT(oq)            \
+		(CN23XX_SLI_OQ_PKT_SENT_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define    CN23XX_SLI_OQ_PKTS_CREDIT(oq)          \
+		(CN23XX_SLI_OQ_PKT_CREDITS_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define    CN23XX_SLI_OQ_PKT_INT_LEVELS(oq)		\
+		(CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 +	\
+		 ((oq) * CN23XX_OQ_OFFSET))
+
+/*Macro's for accessing CNT and TIME separately from INT_LEVELS*/
+#define    CN23XX_SLI_OQ_PKT_INT_LEVELS_CNT(oq)		\
+		(CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \
+		 ((oq) * CN23XX_OQ_OFFSET))
+
+#define    CN23XX_SLI_OQ_PKT_INT_LEVELS_TIME(oq)	\
+		(CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 +	\
+		 ((oq) * CN23XX_OQ_OFFSET) + 4)
+
+/*------------------ Masks ----------------*/
+#define    CN23XX_PKT_OUTPUT_CTL_TENB                  BIT(13)
+#define    CN23XX_PKT_OUTPUT_CTL_CENB                  BIT(12)
+#define    CN23XX_PKT_OUTPUT_CTL_IPTR                  BIT(11)
+#define    CN23XX_PKT_OUTPUT_CTL_ES                    BIT(9)
+#define    CN23XX_PKT_OUTPUT_CTL_NSR                   BIT(8)
+#define    CN23XX_PKT_OUTPUT_CTL_ROR                   BIT(7)
+#define    CN23XX_PKT_OUTPUT_CTL_DPTR                  BIT(6)
+#define    CN23XX_PKT_OUTPUT_CTL_BMODE                 BIT(5)
+#define    CN23XX_PKT_OUTPUT_CTL_ES_P                  BIT(3)
+#define    CN23XX_PKT_OUTPUT_CTL_NSR_P                 BIT(2)
+#define    CN23XX_PKT_OUTPUT_CTL_ROR_P                 BIT(1)
+#define    CN23XX_PKT_OUTPUT_CTL_RING_ENB              BIT(0)
+
+/*######################### Mailbox Reg Macros ########################*/
+#define    CN23XX_SLI_PKT_MBOX_INT_START             0x10210
+#define    CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START       0x10200
+#define    CN23XX_SLI_MAC_PF_MBOX_INT_START          0x27380
+
+#define    CN23XX_SLI_MBOX_OFFSET		     0x20000
+#define    CN23XX_SLI_MBOX_SIG_IDX_OFFSET	     0x8
+
+#define    CN23XX_SLI_PKT_MBOX_INT(q)          \
+		(CN23XX_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET))
+
+#define    CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx)		\
+		(CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START +		\
+		 ((q) * CN23XX_SLI_MBOX_OFFSET +		\
+		  (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET))
+
+#define    CN23XX_SLI_MAC_PF_MBOX_INT(mac, pf)		\
+		(CN23XX_SLI_MAC_PF_MBOX_INT_START +	\
+		 ((mac) * CN23XX_MAC_INT_OFFSET +	\
+		  (pf) * CN23XX_PF_INT_OFFSET))
+
+/*######################### DMA Counters #########################*/
+
+/* 2 registers (64-bit) - DMA Count - 1 for each DMA counter 0/1. */
+#define    CN23XX_DMA_CNT_START                   0x28400
+
+/* 2 registers (64-bit) - DMA Timer 0/1, contains DMA timer values */
+/* SLI_DMA_0_TIM */
+#define    CN23XX_DMA_TIM_START                   0x28420
+
+/* 2 registers (64-bit) - DMA count & Time Interrupt threshold -
+ * SLI_DMA_0_INT_LEVEL
+ */
+#define    CN23XX_DMA_INT_LEVEL_START             0x283E0
+
+/* Each DMA register is at a 16-byte Offset in BAR0 */
+#define    CN23XX_DMA_OFFSET                      0x10
+
+/*---------- DMA Counter Macros ---------*/
+#define    CN23XX_DMA_CNT(dq)                      \
+		(CN23XX_DMA_CNT_START + ((dq) * CN23XX_DMA_OFFSET))
+
+#define    CN23XX_DMA_INT_LEVEL(dq)                \
+		(CN23XX_DMA_INT_LEVEL_START + ((dq) * CN23XX_DMA_OFFSET))
+
+#define    CN23XX_DMA_PKT_INT_LEVEL(dq)            \
+		(CN23XX_DMA_INT_LEVEL_START + ((dq) * CN23XX_DMA_OFFSET))
+
+#define    CN23XX_DMA_TIME_INT_LEVEL(dq)           \
+		(CN23XX_DMA_INT_LEVEL_START + 4 + ((dq) * CN23XX_DMA_OFFSET))
+
+#define    CN23XX_DMA_TIM(dq)                     \
+		(CN23XX_DMA_TIM_START + ((dq) * CN23XX_DMA_OFFSET))
+
+/*######################## MSIX TABLE #########################*/
+
+#define	CN23XX_MSIX_TABLE_ADDR_START		0x0
+#define	CN23XX_MSIX_TABLE_DATA_START		0x8
+
+#define	CN23XX_MSIX_TABLE_SIZE			0x10
+#define	CN23XX_MSIX_TABLE_ENTRIES		0x41
+
+#define CN23XX_MSIX_ENTRY_VECTOR_CTL	BIT_ULL(32)
+
+#define	CN23XX_MSIX_TABLE_ADDR(idx)		\
+	(CN23XX_MSIX_TABLE_ADDR_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+#define	CN23XX_MSIX_TABLE_DATA(idx)		\
+	(CN23XX_MSIX_TABLE_DATA_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+/*######################## INTERRUPTS #########################*/
+#define CN23XX_MAC_INT_OFFSET   0x20
+#define CN23XX_PF_INT_OFFSET    0x10
+
+/* 1 register (64-bit) for Interrupt Summary */
+#define    CN23XX_SLI_INT_SUM64            0x27000
+
+/* 4 registers (64-bit) for Interrupt Enable for each Port */
+#define    CN23XX_SLI_INT_ENB64            0x27080
+
+#define    CN23XX_SLI_MAC_PF_INT_SUM64(mac, pf)			\
+		(CN23XX_SLI_INT_SUM64 +				\
+		 ((mac) * CN23XX_MAC_INT_OFFSET) +		\
+		 ((pf) * CN23XX_PF_INT_OFFSET))
+
+#define    CN23XX_SLI_MAC_PF_INT_ENB64(mac, pf)		\
+		(CN23XX_SLI_INT_ENB64 +			\
+		 ((mac) * CN23XX_MAC_INT_OFFSET) +	\
+		 ((pf) * CN23XX_PF_INT_OFFSET))
+
+/* 1 register (64-bit) to indicate which Output Queue reached pkt threshold */
+#define    CN23XX_SLI_PKT_CNT_INT                0x29130
+
+/* 1 register (64-bit) to indicate which Output Queue reached time threshold */
+#define    CN23XX_SLI_PKT_TIME_INT               0x29140
+
+/*------------------ Interrupt Masks ----------------*/
+
+#define    CN23XX_INTR_PO_INT			BIT_ULL(63)
+#define    CN23XX_INTR_PI_INT			BIT_ULL(62)
+#define    CN23XX_INTR_MBOX_INT			BIT_ULL(61)
+#define    CN23XX_INTR_RESEND			BIT_ULL(60)
+
+#define    CN23XX_INTR_CINT_ENB                 BIT_ULL(48)
+#define    CN23XX_INTR_MBOX_ENB                 BIT(0)
+
+#define    CN23XX_INTR_RML_TIMEOUT_ERR           (1)
+
+#define    CN23XX_INTR_MIO_INT                   BIT(1)
+
+#define    CN23XX_INTR_RESERVED1                 (3 << 2)
+
+#define    CN23XX_INTR_PKT_COUNT                 BIT(4)
+#define    CN23XX_INTR_PKT_TIME                  BIT(5)
+
+#define    CN23XX_INTR_RESERVED2                 (3 << 6)
+
+#define    CN23XX_INTR_M0UPB0_ERR                BIT(8)
+#define    CN23XX_INTR_M0UPWI_ERR                BIT(9)
+#define    CN23XX_INTR_M0UNB0_ERR                BIT(10)
+#define    CN23XX_INTR_M0UNWI_ERR                BIT(11)
+
+#define    CN23XX_INTR_RESERVED3                 (0xFFFFFULL << 12)
+
+#define    CN23XX_INTR_DMA0_FORCE                BIT_ULL(32)
+#define    CN23XX_INTR_DMA1_FORCE                BIT_ULL(33)
+
+#define    CN23XX_INTR_DMA0_COUNT                BIT_ULL(34)
+#define    CN23XX_INTR_DMA1_COUNT                BIT_ULL(35)
+
+#define    CN23XX_INTR_DMA0_TIME                 BIT_ULL(36)
+#define    CN23XX_INTR_DMA1_TIME                 BIT_ULL(37)
+
+#define    CN23XX_INTR_RESERVED4                 (0x7FFFFULL << 38)
+
+#define    CN23XX_INTR_VF_MBOX                   BIT_ULL(57)
+#define    CN23XX_INTR_DMAVF_ERR                 BIT_ULL(58)
+#define    CN23XX_INTR_DMAPF_ERR                 BIT_ULL(59)
+
+#define    CN23XX_INTR_PKTVF_ERR                 BIT_ULL(60)
+#define    CN23XX_INTR_PKTPF_ERR                 BIT_ULL(61)
+#define    CN23XX_INTR_PPVF_ERR                  BIT_ULL(62)
+#define    CN23XX_INTR_PPPF_ERR                  BIT_ULL(63)
+
+#define    CN23XX_INTR_DMA0_DATA                 (CN23XX_INTR_DMA0_TIME)
+#define    CN23XX_INTR_DMA1_DATA                 (CN23XX_INTR_DMA1_TIME)
+
+#define    CN23XX_INTR_DMA_DATA                  \
+		(CN23XX_INTR_DMA0_DATA | CN23XX_INTR_DMA1_DATA)
+
+/* By fault only TIME based */
+#define    CN23XX_INTR_PKT_DATA                  (CN23XX_INTR_PKT_TIME)
+/* For both COUNT and TIME based */
+/* #define    CN23XX_INTR_PKT_DATA                  \
+ * (CN23XX_INTR_PKT_COUNT | CN23XX_INTR_PKT_TIME)
+ */
+
+/* Sum of interrupts for all PCI-Express Data Interrupts */
+#define    CN23XX_INTR_PCIE_DATA                 \
+		(CN23XX_INTR_DMA_DATA | CN23XX_INTR_PKT_DAT)
+
+/* Sum of interrupts for error events */
+#define    CN23XX_INTR_ERR			\
+		(CN23XX_INTR_M0UPB0_ERR	|	\
+		 CN23XX_INTR_M0UPWI_ERR	|	\
+		 CN23XX_INTR_M0UNB0_ERR	|	\
+		 CN23XX_INTR_M0UNWI_ERR	|	\
+		 CN23XX_INTR_DMAVF_ERR	|	\
+		 CN23XX_INTR_DMAPF_ERR	|	\
+		 CN23XX_INTR_PKTPF_ERR	|	\
+		 CN23XX_INTR_PPPF_ERR	|	\
+		 CN23XX_INTR_PPVF_ERR)
+
+/* Programmed Mask for Interrupt Sum */
+#define    CN23XX_INTR_MASK			\
+		(CN23XX_INTR_DMA_DATA	|	\
+		 CN23XX_INTR_DMA0_FORCE	|	\
+		 CN23XX_INTR_DMA1_FORCE	|	\
+		 CN23XX_INTR_MIO_INT	|	\
+		 CN23XX_INTR_ERR)
+
+/* 4 Registers (64 - bit) */
+#define    CN23XX_SLI_S2M_PORT_CTL_START         0x23D80
+#define    CN23XX_SLI_S2M_PORTX_CTL(port)	\
+		(CN23XX_SLI_S2M_PORT_CTL_START + (port * 0x10))
+
+#define    CN23XX_SLI_MAC_NUMBER                 0x20050
+
+/** PEM(0..3)_BAR1_INDEX(0..15)address is defined as
+ *  addr = (0x00011800C0000100  |port <<24 |idx <<3 )
+ *  Here, port is PEM(0..3) & idx is INDEX(0..15)
+ */
+#define    CN23XX_PEM_BAR1_INDEX_START             0x00011800C0000100ULL
+#define    CN23XX_PEM_OFFSET                       24
+#define    CN23XX_BAR1_INDEX_OFFSET                3
+
+#define    CN23XX_PEM_BAR1_INDEX_REG(port, idx)		\
+		(CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \
+		 ((idx) << CN23XX_BAR1_INDEX_OFFSET))
+
+/*############################ DPI #########################*/
+
+/* 1 register (64-bit) - provides DMA Enable */
+#define    CN23XX_DPI_CTL                 0x0001df0000000040ULL
+
+/* 1 register (64-bit) - Controls the DMA IO Operation */
+#define    CN23XX_DPI_DMA_CONTROL         0x0001df0000000048ULL
+
+/* 1 register (64-bit) - Provides DMA Instr'n Queue Enable  */
+#define    CN23XX_DPI_REQ_GBL_ENB         0x0001df0000000050ULL
+
+/* 1 register (64-bit) - DPI_REQ_ERR_RSP
+ * Indicates which Instr'n Queue received error response from the IO sub-system
+ */
+#define    CN23XX_DPI_REQ_ERR_RSP         0x0001df0000000058ULL
+
+/* 1 register (64-bit) - DPI_REQ_ERR_RST
+ * Indicates which Instr'n Queue dropped an Instr'n
+ */
+#define    CN23XX_DPI_REQ_ERR_RST         0x0001df0000000060ULL
+
+/* 6 register (64-bit) - DPI_DMA_ENG(0..5)_EN
+ * Provides DMA Engine Queue Enable
+ */
+#define    CN23XX_DPI_DMA_ENG0_ENB        0x0001df0000000080ULL
+#define    CN23XX_DPI_DMA_ENG_ENB(eng) (CN23XX_DPI_DMA_ENG0_ENB + (eng * 8))
+
+/* 8 register (64-bit) - DPI_DMA(0..7)_REQQ_CTL
+ * Provides control bits for transaction on 8 Queues
+ */
+#define    CN23XX_DPI_DMA_REQQ0_CTL       0x0001df0000000180ULL
+#define    CN23XX_DPI_DMA_REQQ_CTL(q_no)	\
+		(CN23XX_DPI_DMA_REQQ0_CTL + (q_no * 8))
+
+/* 6 register (64-bit) - DPI_ENG(0..5)_BUF
+ * Provides DMA Engine FIFO (Queue) Size
+ */
+#define    CN23XX_DPI_DMA_ENG0_BUF        0x0001df0000000880ULL
+#define    CN23XX_DPI_DMA_ENG_BUF(eng)   \
+		(CN23XX_DPI_DMA_ENG0_BUF + (eng * 8))
+
+/* 4 Registers (64-bit) */
+#define    CN23XX_DPI_SLI_PRT_CFG_START   0x0001df0000000900ULL
+#define    CN23XX_DPI_SLI_PRTX_CFG(port)        \
+		(CN23XX_DPI_SLI_PRT_CFG_START + (port * 0x8))
+
+/* Masks for DPI_DMA_CONTROL Register */
+#define    CN23XX_DPI_DMA_COMMIT_MODE     BIT_ULL(58)
+#define    CN23XX_DPI_DMA_PKT_EN          BIT_ULL(56)
+#define    CN23XX_DPI_DMA_ENB             (0x0FULL << 48)
+/* Set the DMA Control, to update packet count not byte count sent by DMA,
+ * when we use Interrupt Coalescing (CA mode)
+ */
+#define    CN23XX_DPI_DMA_O_ADD1          BIT(19)
+/*selecting 64-bit Byte Swap Mode */
+#define    CN23XX_DPI_DMA_O_ES            BIT(15)
+#define    CN23XX_DPI_DMA_O_MODE          BIT(14)
+
+#define    CN23XX_DPI_DMA_CTL_MASK			\
+		(CN23XX_DPI_DMA_COMMIT_MODE	|	\
+		 CN23XX_DPI_DMA_PKT_EN		|	\
+		 CN23XX_DPI_DMA_O_ES		|	\
+		 CN23XX_DPI_DMA_O_MODE)
+
+/*############################ RST #########################*/
+
+#define    CN23XX_RST_BOOT            0x0001180006001600ULL
+#define    CN23XX_RST_SOFT_RST        0x0001180006001680ULL
+
+#define    CN23XX_LMC0_RESET_CTL               0x0001180088000180ULL
+#define    CN23XX_LMC0_RESET_CTL_DDR3RST_MASK  0x0000000000000001ULL
+
+#endif

+ 9 - 36
drivers/net/ethernet/cavium/liquidio/cn66xx_device.c

@@ -338,7 +338,7 @@ void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
 	octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr);
 	octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr);
 }
 }
 
 
-void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
+int lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
 {
 {
 	u32 mask;
 	u32 mask;
 
 
@@ -353,6 +353,8 @@ void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
 	mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
 	mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
 	mask |= oct->io_qmask.oq;
 	mask |= oct->io_qmask.oq;
 	octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
 	octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
+
+	return 0;
 }
 }
 
 
 void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
 void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
@@ -418,36 +420,6 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
 		octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32);
 		octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32);
 }
 }
 
 
-void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
-{
-	int i;
-
-	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
-		if (!(oct->io_qmask.iq & (1ULL << i)))
-			continue;
-		oct->fn_list.setup_iq_regs(oct, i);
-	}
-
-	for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
-		if (!(oct->io_qmask.oq & (1ULL << i)))
-			continue;
-		oct->fn_list.setup_oq_regs(oct, i);
-	}
-
-	oct->fn_list.setup_device_regs(oct);
-
-	oct->fn_list.enable_interrupt(oct->chip);
-
-	oct->fn_list.enable_io_queues(oct);
-
-	/* for (i = 0; i < oct->num_oqs; i++) { */
-	for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
-		if (!(oct->io_qmask.oq & (1ULL << i)))
-			continue;
-		writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg);
-	}
-}
-
 void
 void
 lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct,
 lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct,
 			  u64 core_addr,
 			  u64 core_addr,
@@ -507,18 +479,20 @@ lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq)
 	return new_idx;
 	return new_idx;
 }
 }
 
 
-void lio_cn6xxx_enable_interrupt(void *chip)
+void lio_cn6xxx_enable_interrupt(struct octeon_device *oct,
+				 u8 unused __attribute__((unused)))
 {
 {
-	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
 	u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE;
 	u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE;
 
 
 	/* Enable Interrupt */
 	/* Enable Interrupt */
 	writeq(mask, cn6xxx->intr_enb_reg64);
 	writeq(mask, cn6xxx->intr_enb_reg64);
 }
 }
 
 
-void lio_cn6xxx_disable_interrupt(void *chip)
+void lio_cn6xxx_disable_interrupt(struct octeon_device *oct,
+				  u8 unused __attribute__((unused)))
 {
 {
-	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+	struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
 
 
 	/* Disable Interrupts */
 	/* Disable Interrupts */
 	writeq(0, cn6xxx->intr_enb_reg64);
 	writeq(0, cn6xxx->intr_enb_reg64);
@@ -714,7 +688,6 @@ int lio_setup_cn66xx_octeon_device(struct octeon_device *oct)
 
 
 	oct->fn_list.soft_reset = lio_cn6xxx_soft_reset;
 	oct->fn_list.soft_reset = lio_cn6xxx_soft_reset;
 	oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs;
 	oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs;
-	oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
 	oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
 	oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
 
 
 	oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
 	oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;

+ 3 - 4
drivers/net/ethernet/cavium/liquidio/cn66xx_device.h

@@ -80,18 +80,17 @@ void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct);
 void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct);
 void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct);
 void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
 void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
 void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
 void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
-void lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
+int lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
 void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
 void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
 irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
 irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
-void lio_cn6xxx_reinit_regs(struct octeon_device *oct);
 void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
 void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
 			       u32 idx, int valid);
 			       u32 idx, int valid);
 void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
 void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
 u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
 u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
 u32
 u32
 lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
 lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
-void lio_cn6xxx_enable_interrupt(void *chip);
-void lio_cn6xxx_disable_interrupt(void *chip);
+void lio_cn6xxx_enable_interrupt(struct octeon_device *oct, u8 unused);
+void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, u8 unused);
 void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
 void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
 void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
 void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
 				  struct octeon_reg_list *reg_list);
 				  struct octeon_reg_list *reg_list);

+ 0 - 1
drivers/net/ethernet/cavium/liquidio/cn68xx_device.c

@@ -148,7 +148,6 @@ int lio_setup_cn68xx_octeon_device(struct octeon_device *oct)
 	oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
 	oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
 	oct->fn_list.soft_reset = lio_cn68xx_soft_reset;
 	oct->fn_list.soft_reset = lio_cn68xx_soft_reset;
 	oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs;
 	oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs;
-	oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
 	oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
 	oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
 
 
 	oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
 	oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;

+ 261 - 0
drivers/net/ethernet/cavium/liquidio/lio_core.c

@@ -0,0 +1,261 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+*          Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT.  See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+
+int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
+{
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+	struct octnic_ctrl_pkt nctrl;
+	int ret = 0;
+
+	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+	nctrl.ncmd.u64 = 0;
+	nctrl.ncmd.s.cmd = cmd;
+	nctrl.ncmd.s.param1 = param1;
+	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+	nctrl.wait_time = 100;
+	nctrl.netpndev = (u64)netdev;
+	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+	if (ret < 0) {
+		dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
+			ret);
+	}
+	return ret;
+}
+
+void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
+					unsigned int bytes_compl)
+{
+	struct netdev_queue *netdev_queue = txq;
+
+	netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
+}
+
+void octeon_update_tx_completion_counters(void *buf, int reqtype,
+					  unsigned int *pkts_compl,
+					  unsigned int *bytes_compl)
+{
+	struct octnet_buf_free_info *finfo;
+	struct sk_buff *skb = NULL;
+	struct octeon_soft_command *sc;
+
+	switch (reqtype) {
+	case REQTYPE_NORESP_NET:
+	case REQTYPE_NORESP_NET_SG:
+		finfo = buf;
+		skb = finfo->skb;
+		break;
+
+	case REQTYPE_RESP_NET_SG:
+	case REQTYPE_RESP_NET:
+		sc = buf;
+		skb = sc->callback_arg;
+		break;
+
+	default:
+		return;
+	}
+
+	(*pkts_compl)++;
+/*TODO, Use some other pound define to suggest
+ * the fact that iqs are not tied to netdevs
+ * and can take traffic from different netdevs
+ * hence bql reporting is done per packet
+ * than in bulk. Usage of NO_NAPI in txq completion is
+ * a little confusing
+ */
+	*bytes_compl += skb->len;
+}
+
+void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
+{
+	struct octnet_buf_free_info *finfo;
+	struct sk_buff *skb;
+	struct octeon_soft_command *sc;
+	struct netdev_queue *txq;
+
+	switch (reqtype) {
+	case REQTYPE_NORESP_NET:
+	case REQTYPE_NORESP_NET_SG:
+		finfo = buf;
+		skb = finfo->skb;
+		break;
+
+	case REQTYPE_RESP_NET_SG:
+	case REQTYPE_RESP_NET:
+		sc = buf;
+		skb = sc->callback_arg;
+		break;
+
+	default:
+		return;
+	}
+
+	txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
+	netdev_tx_sent_queue(txq, skb->len);
+}
+
+void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
+{
+	struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
+	struct net_device *netdev = (struct net_device *)nctrl->netpndev;
+	struct lio *lio = GET_LIO(netdev);
+	struct octeon_device *oct = lio->oct_dev;
+	u8 *mac;
+
+	switch (nctrl->ncmd.s.cmd) {
+	case OCTNET_CMD_CHANGE_DEVFLAGS:
+	case OCTNET_CMD_SET_MULTI_LIST:
+		break;
+
+	case OCTNET_CMD_CHANGE_MACADDR:
+		mac = ((u8 *)&nctrl->udd[0]) + 2;
+		netif_info(lio, probe, lio->netdev,
+			   "MACAddr changed to %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+			   mac[0], mac[1],
+			   mac[2], mac[3],
+			   mac[4], mac[5]);
+		break;
+
+	case OCTNET_CMD_CHANGE_MTU:
+		/* If command is successful, change the MTU. */
+		netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n",
+			   netdev->mtu, nctrl->ncmd.s.param1);
+		dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
+			 netdev->name, netdev->mtu,
+			 nctrl->ncmd.s.param1);
+		rtnl_lock();
+		netdev->mtu = nctrl->ncmd.s.param1;
+		call_netdevice_notifiers(NETDEV_CHANGEMTU, netdev);
+		rtnl_unlock();
+		break;
+
+	case OCTNET_CMD_GPIO_ACCESS:
+		netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
+		break;
+
+	case OCTNET_CMD_LRO_ENABLE:
+		dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
+		break;
+
+	case OCTNET_CMD_LRO_DISABLE:
+		dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
+			 netdev->name);
+		break;
+
+	case OCTNET_CMD_VERBOSE_ENABLE:
+		dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
+			 netdev->name);
+		break;
+
+	case OCTNET_CMD_VERBOSE_DISABLE:
+		dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
+			 netdev->name);
+		break;
+
+	case OCTNET_CMD_ENABLE_VLAN_FILTER:
+		dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
+			 netdev->name);
+		break;
+
+	case OCTNET_CMD_ADD_VLAN_FILTER:
+		dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
+			 netdev->name, nctrl->ncmd.s.param1);
+		break;
+
+	case OCTNET_CMD_DEL_VLAN_FILTER:
+		dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
+			 netdev->name, nctrl->ncmd.s.param1);
+		break;
+
+	case OCTNET_CMD_SET_SETTINGS:
+		dev_info(&oct->pci_dev->dev, "%s settings changed\n",
+			 netdev->name);
+
+		break;
+
+	/* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
+	 * Command passed by NIC driver
+	 */
+	case OCTNET_CMD_TNL_RX_CSUM_CTL:
+		if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
+			netif_info(lio, probe, lio->netdev,
+				   "RX Checksum Offload Enabled\n");
+		} else if (nctrl->ncmd.s.param1 ==
+			   OCTNET_CMD_RXCSUM_DISABLE) {
+			netif_info(lio, probe, lio->netdev,
+				   "RX Checksum Offload Disabled\n");
+		}
+		break;
+
+		/* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
+		 * Command passed by NIC driver
+		 */
+	case OCTNET_CMD_TNL_TX_CSUM_CTL:
+		if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
+			netif_info(lio, probe, lio->netdev,
+				   "TX Checksum Offload Enabled\n");
+		} else if (nctrl->ncmd.s.param1 ==
+			   OCTNET_CMD_TXCSUM_DISABLE) {
+			netif_info(lio, probe, lio->netdev,
+				   "TX Checksum Offload Disabled\n");
+		}
+		break;
+
+		/* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
+		 * Command passed by NIC driver
+		 */
+	case OCTNET_CMD_VXLAN_PORT_CONFIG:
+		if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
+			netif_info(lio, probe, lio->netdev,
+				   "VxLAN Destination UDP PORT:%d ADDED\n",
+				   nctrl->ncmd.s.param1);
+		} else if (nctrl->ncmd.s.more ==
+			   OCTNET_CMD_VXLAN_PORT_DEL) {
+			netif_info(lio, probe, lio->netdev,
+				   "VxLAN Destination UDP PORT:%d DELETED\n",
+				   nctrl->ncmd.s.param1);
+		}
+		break;
+
+	case OCTNET_CMD_SET_FLOW_CTL:
+		netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
+		break;
+
+	default:
+		dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
+			nctrl->ncmd.s.cmd);
+	}
+}

+ 8 - 10
drivers/net/ethernet/cavium/liquidio/lio_ethtool.c

@@ -290,18 +290,16 @@ lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
 	struct lio *lio = GET_LIO(netdev);
 	struct lio *lio = GET_LIO(netdev);
 	struct octeon_device *oct_dev = lio->oct_dev;
 	struct octeon_device *oct_dev = lio->oct_dev;
 	struct octeon_board_info *board_info;
 	struct octeon_board_info *board_info;
-	int len;
 
 
-	if (eeprom->offset != 0)
+	if (eeprom->offset)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	eeprom->magic = oct_dev->pci_dev->vendor;
 	eeprom->magic = oct_dev->pci_dev->vendor;
 	board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
 	board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
-	len =
-		sprintf((char *)bytes,
-			"boardname:%s serialnum:%s maj:%lld min:%lld\n",
-			board_info->name, board_info->serial_number,
-			board_info->major, board_info->minor);
+	sprintf((char *)bytes,
+		"boardname:%s serialnum:%s maj:%lld min:%lld\n",
+		board_info->name, board_info->serial_number,
+		board_info->major, board_info->minor);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -406,7 +404,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
 		dev_err(&oct_dev->pci_dev->dev,
 		dev_err(&oct_dev->pci_dev->dev,
 			"octnet_mdio45_access instruction failed status: %x\n",
 			"octnet_mdio45_access instruction failed status: %x\n",
 			retval);
 			retval);
-		retval =  -EBUSY;
+		retval = -EBUSY;
 	} else {
 	} else {
 		/* Sleep on a wait queue till the cond flag indicates that the
 		/* Sleep on a wait queue till the cond flag indicates that the
 		 * response arrived
 		 * response arrived
@@ -1320,8 +1318,8 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
 	return 0;
 	return 0;
 }
 }
 
 
-static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
-			       *intr_coal)
+static int oct_cfg_rx_intrtime(struct lio *lio,
+			       struct ethtool_coalesce *intr_coal)
 {
 {
 	struct octeon_device *oct = lio->oct_dev;
 	struct octeon_device *oct = lio->oct_dev;
 	u32 time_threshold, rx_coalesce_usecs;
 	u32 time_threshold, rx_coalesce_usecs;

+ 376 - 390
drivers/net/ethernet/cavium/liquidio/lio_main.c

@@ -21,8 +21,6 @@
 **********************************************************************/
 **********************************************************************/
 #include <linux/version.h>
 #include <linux/version.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
-#include <linux/net_tstamp.h>
-#include <linux/if_vlan.h>
 #include <linux/firmware.h>
 #include <linux/firmware.h>
 #include <linux/ptp_clock_kernel.h>
 #include <linux/ptp_clock_kernel.h>
 #include <net/vxlan.h>
 #include <net/vxlan.h>
@@ -37,6 +35,7 @@
 #include "cn66xx_regs.h"
 #include "cn66xx_regs.h"
 #include "cn66xx_device.h"
 #include "cn66xx_device.h"
 #include "cn68xx_device.h"
 #include "cn68xx_device.h"
+#include "cn23xx_pf_device.h"
 #include "liquidio_image.h"
 #include "liquidio_image.h"
 
 
 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
@@ -52,11 +51,6 @@ module_param(ddr_timeout, int, 0644);
 MODULE_PARM_DESC(ddr_timeout,
 MODULE_PARM_DESC(ddr_timeout,
 		 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
 		 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
 
 
-static u32 console_bitmask;
-module_param(console_bitmask, int, 0644);
-MODULE_PARM_DESC(console_bitmask,
-		 "Bitmask indicating which consoles have debug output redirected to syslog.");
-
 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
 
 
 #define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count)  \
 #define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count)  \
@@ -139,7 +133,8 @@ union tx_info {
 #define OCTNIC_MAX_SG  (MAX_SKB_FRAGS)
 #define OCTNIC_MAX_SG  (MAX_SKB_FRAGS)
 
 
 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
-#define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE)
+#define OCTNIC_GSO_MAX_SIZE                                                    \
+	(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
 
 
 /** Structure of a node in list of gather components maintained by
 /** Structure of a node in list of gather components maintained by
  * NIC driver for each network device.
  * NIC driver for each network device.
@@ -162,27 +157,6 @@ struct octnic_gather {
 	u64 sg_dma_ptr;
 	u64 sg_dma_ptr;
 };
 };
 
 
-/** This structure is used by NIC driver to store information required
- * to free the sk_buff when the packet has been fetched by Octeon.
- * Bytes offset below assume worst-case of a 64-bit system.
- */
-struct octnet_buf_free_info {
-	/** Bytes 1-8.  Pointer to network device private structure. */
-	struct lio *lio;
-
-	/** Bytes 9-16.  Pointer to sk_buff. */
-	struct sk_buff *skb;
-
-	/** Bytes 17-24.  Pointer to gather list. */
-	struct octnic_gather *g;
-
-	/** Bytes 25-32. Physical address of skb->data or gather list. */
-	u64 dptr;
-
-	/** Bytes 33-47. Piggybacked soft command, if any */
-	struct octeon_soft_command *sc;
-};
-
 struct handshake {
 struct handshake {
 	struct completion init;
 	struct completion init;
 	struct completion started;
 	struct completion started;
@@ -198,6 +172,7 @@ struct octeon_device_priv {
 };
 };
 
 
 static int octeon_device_init(struct octeon_device *);
 static int octeon_device_init(struct octeon_device *);
+static int liquidio_stop(struct net_device *netdev);
 static void liquidio_remove(struct pci_dev *pdev);
 static void liquidio_remove(struct pci_dev *pdev);
 static int liquidio_probe(struct pci_dev *pdev,
 static int liquidio_probe(struct pci_dev *pdev,
 			  const struct pci_device_id *ent);
 			  const struct pci_device_id *ent);
@@ -219,6 +194,20 @@ static void octeon_droq_bh(unsigned long pdev)
 			continue;
 			continue;
 		reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
 		reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
 							  MAX_PACKET_BUDGET);
 							  MAX_PACKET_BUDGET);
+		lio_enable_irq(oct->droq[q_no], NULL);
+
+		if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
+			/* set time and cnt interrupt thresholds for this DROQ
+			 * for NAPI
+			 */
+			int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
+
+			octeon_write_csr64(
+			    oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
+			    0x5700000040ULL);
+			octeon_write_csr64(
+			    oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
+		}
 	}
 	}
 
 
 	if (reschedule)
 	if (reschedule)
@@ -252,76 +241,6 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
 	return pkt_cnt;
 	return pkt_cnt;
 }
 }
 
 
-void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
-					unsigned int bytes_compl)
-{
-	struct netdev_queue *netdev_queue = txq;
-
-	netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
-}
-
-void octeon_update_tx_completion_counters(void *buf, int reqtype,
-					  unsigned int *pkts_compl,
-					  unsigned int *bytes_compl)
-{
-	struct octnet_buf_free_info *finfo;
-	struct sk_buff *skb = NULL;
-	struct octeon_soft_command *sc;
-
-	switch (reqtype) {
-	case REQTYPE_NORESP_NET:
-	case REQTYPE_NORESP_NET_SG:
-		finfo = buf;
-		skb = finfo->skb;
-		break;
-
-	case REQTYPE_RESP_NET_SG:
-	case REQTYPE_RESP_NET:
-		sc = buf;
-		skb = sc->callback_arg;
-		break;
-
-	default:
-		return;
-	}
-
-	(*pkts_compl)++;
-	*bytes_compl += skb->len;
-}
-
-void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
-{
-	struct octnet_buf_free_info *finfo;
-	struct sk_buff *skb;
-	struct octeon_soft_command *sc;
-	struct netdev_queue *txq;
-
-	switch (reqtype) {
-	case REQTYPE_NORESP_NET:
-	case REQTYPE_NORESP_NET_SG:
-		finfo = buf;
-		skb = finfo->skb;
-		break;
-
-	case REQTYPE_RESP_NET_SG:
-	case REQTYPE_RESP_NET:
-		sc = buf;
-		skb = sc->callback_arg;
-		break;
-
-	default:
-		return;
-	}
-
-	txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
-	netdev_tx_sent_queue(txq, skb->len);
-}
-
-int octeon_console_debug_enabled(u32 console)
-{
-	return (console_bitmask >> (console)) & 0x1;
-}
-
 /**
 /**
  * \brief Forces all IO queues off on a given device
  * \brief Forces all IO queues off on a given device
  * @param oct Pointer to Octeon device
  * @param oct Pointer to Octeon device
@@ -441,7 +360,7 @@ static void stop_pci_io(struct octeon_device *oct)
 	pci_disable_device(oct->pci_dev);
 	pci_disable_device(oct->pci_dev);
 
 
 	/* Disable interrupts  */
 	/* Disable interrupts  */
-	oct->fn_list.disable_interrupt(oct->chip);
+	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
 
 
 	pcierror_quiesce_device(oct);
 	pcierror_quiesce_device(oct);
 
 
@@ -570,6 +489,9 @@ static const struct pci_device_id liquidio_pci_tbl[] = {
 	{       /* 66xx */
 	{       /* 66xx */
 		PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 		PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 	},
 	},
+	{       /* 23xx pf */
+		PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+	},
 	{
 	{
 		0, 0, 0, 0, 0, 0, 0
 		0, 0, 0, 0, 0, 0, 0
 	}
 	}
@@ -587,7 +509,6 @@ static struct pci_driver liquidio_pci_driver = {
 	.suspend	= liquidio_suspend,
 	.suspend	= liquidio_suspend,
 	.resume		= liquidio_resume,
 	.resume		= liquidio_resume,
 #endif
 #endif
-
 };
 };
 
 
 /**
 /**
@@ -1002,6 +923,27 @@ static void update_txq_status(struct octeon_device *oct, int iq_num)
 	}
 	}
 }
 }
 
 
+static
+int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
+{
+	struct octeon_device *oct = droq->oct_dev;
+	struct octeon_device_priv *oct_priv =
+	    (struct octeon_device_priv *)oct->priv;
+
+	if (droq->ops.poll_mode) {
+		droq->ops.napi_fn(droq);
+	} else {
+		if (ret & MSIX_PO_INT) {
+			tasklet_schedule(&oct_priv->droq_tasklet);
+			return 1;
+		}
+		/* this will be flushed periodically by check iq db */
+		if (ret & MSIX_PI_INT)
+			return 0;
+	}
+	return 0;
+}
+
 /**
 /**
  * \brief Droq packet processor sceduler
  * \brief Droq packet processor sceduler
  * @param oct octeon device
  * @param oct octeon device
@@ -1032,19 +974,36 @@ void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
 	}
 	}
 }
 }
 
 
+static irqreturn_t
+liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
+{
+	u64 ret;
+	struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+	struct octeon_device *oct = ioq_vector->oct_dev;
+	struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+
+	ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
+
+	if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
+		liquidio_schedule_msix_droq_pkt_handler(droq, ret);
+
+	return IRQ_HANDLED;
+}
+
 /**
 /**
  * \brief Interrupt handler for octeon
  * \brief Interrupt handler for octeon
  * @param irq unused
  * @param irq unused
  * @param dev octeon device
  * @param dev octeon device
  */
  */
 static
 static
-irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
+irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
+					 void *dev)
 {
 {
 	struct octeon_device *oct = (struct octeon_device *)dev;
 	struct octeon_device *oct = (struct octeon_device *)dev;
 	irqreturn_t ret;
 	irqreturn_t ret;
 
 
 	/* Disable our interrupts for the duration of ISR */
 	/* Disable our interrupts for the duration of ISR */
-	oct->fn_list.disable_interrupt(oct->chip);
+	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
 
 
 	ret = oct->fn_list.process_interrupt_regs(oct);
 	ret = oct->fn_list.process_interrupt_regs(oct);
 
 
@@ -1053,7 +1012,7 @@ irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
 
 
 	/* Re-enable our interrupts  */
 	/* Re-enable our interrupts  */
 	if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
 	if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
-		oct->fn_list.enable_interrupt(oct->chip);
+		oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -1067,24 +1026,110 @@ irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
 static int octeon_setup_interrupt(struct octeon_device *oct)
 static int octeon_setup_interrupt(struct octeon_device *oct)
 {
 {
 	int irqret, err;
 	int irqret, err;
+	struct msix_entry *msix_entries;
+	int i;
+	int num_ioq_vectors;
+	int num_alloc_ioq_vectors;
 
 
-	err = pci_enable_msi(oct->pci_dev);
-	if (err)
-		dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
-			 err);
-	else
-		oct->flags |= LIO_FLAG_MSI_ENABLED;
-
-	irqret = request_irq(oct->pci_dev->irq, liquidio_intr_handler,
-			     IRQF_SHARED, "octeon", oct);
-	if (irqret) {
-		if (oct->flags & LIO_FLAG_MSI_ENABLED)
-			pci_disable_msi(oct->pci_dev);
-		dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
-			irqret);
-		return 1;
-	}
+	if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
+		oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
+		/* one non ioq interrupt for handling sli_mac_pf_int_sum */
+		oct->num_msix_irqs += 1;
+
+		oct->msix_entries = kcalloc(
+		    oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
+		if (!oct->msix_entries)
+			return 1;
 
 
+		msix_entries = (struct msix_entry *)oct->msix_entries;
+		/*Assumption is that pf msix vectors start from pf srn to pf to
+		 * trs and not from 0. if not change this code
+		 */
+		for (i = 0; i < oct->num_msix_irqs - 1; i++)
+			msix_entries[i].entry = oct->sriov_info.pf_srn + i;
+		msix_entries[oct->num_msix_irqs - 1].entry =
+		    oct->sriov_info.trs;
+		num_alloc_ioq_vectors = pci_enable_msix_range(
+						oct->pci_dev, msix_entries,
+						oct->num_msix_irqs,
+						oct->num_msix_irqs);
+		if (num_alloc_ioq_vectors < 0) {
+			dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
+			kfree(oct->msix_entries);
+			oct->msix_entries = NULL;
+			return 1;
+		}
+		dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
+
+		num_ioq_vectors = oct->num_msix_irqs;
+
+		/** For PF, there is one non-ioq interrupt handler */
+		num_ioq_vectors -= 1;
+		irqret = request_irq(msix_entries[num_ioq_vectors].vector,
+				     liquidio_legacy_intr_handler, 0, "octeon",
+				     oct);
+		if (irqret) {
+			dev_err(&oct->pci_dev->dev,
+				"OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
+				irqret);
+			pci_disable_msix(oct->pci_dev);
+			kfree(oct->msix_entries);
+			oct->msix_entries = NULL;
+			return 1;
+		}
+
+		for (i = 0; i < num_ioq_vectors; i++) {
+			irqret = request_irq(msix_entries[i].vector,
+					     liquidio_msix_intr_handler, 0,
+					     "octeon", &oct->ioq_vector[i]);
+			if (irqret) {
+				dev_err(&oct->pci_dev->dev,
+					"OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
+					irqret);
+				/** Freeing the non-ioq irq vector here . */
+				free_irq(msix_entries[num_ioq_vectors].vector,
+					 oct);
+
+				while (i) {
+					i--;
+					/** clearing affinity mask. */
+					irq_set_affinity_hint(
+						msix_entries[i].vector, NULL);
+					free_irq(msix_entries[i].vector,
+						 &oct->ioq_vector[i]);
+				}
+				pci_disable_msix(oct->pci_dev);
+				kfree(oct->msix_entries);
+				oct->msix_entries = NULL;
+				return 1;
+			}
+			oct->ioq_vector[i].vector = msix_entries[i].vector;
+			/* assign the cpu mask for this msix interrupt vector */
+			irq_set_affinity_hint(
+					msix_entries[i].vector,
+					(&oct->ioq_vector[i].affinity_mask));
+		}
+		dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
+			oct->octeon_id);
+	} else {
+		err = pci_enable_msi(oct->pci_dev);
+		if (err)
+			dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
+				 err);
+		else
+			oct->flags |= LIO_FLAG_MSI_ENABLED;
+
+		irqret = request_irq(oct->pci_dev->irq,
+				     liquidio_legacy_intr_handler, IRQF_SHARED,
+				     "octeon", oct);
+		if (irqret) {
+			if (oct->flags & LIO_FLAG_MSI_ENABLED)
+				pci_disable_msi(oct->pci_dev);
+			dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
+				irqret);
+			return 1;
+		}
+	}
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1107,6 +1152,9 @@ liquidio_probe(struct pci_dev *pdev,
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
+	if (pdev->device == OCTEON_CN23XX_PF_VID)
+		oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
+
 	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
 	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
 		 (u32)pdev->vendor, (u32)pdev->device);
 		 (u32)pdev->vendor, (u32)pdev->device);
 
 
@@ -1146,6 +1194,7 @@ liquidio_probe(struct pci_dev *pdev,
 static void octeon_destroy_resources(struct octeon_device *oct)
 static void octeon_destroy_resources(struct octeon_device *oct)
 {
 {
 	int i;
 	int i;
+	struct msix_entry *msix_entries;
 	struct octeon_device_priv *oct_priv =
 	struct octeon_device_priv *oct_priv =
 		(struct octeon_device_priv *)oct->priv;
 		(struct octeon_device_priv *)oct->priv;
 
 
@@ -1190,21 +1239,40 @@ static void octeon_destroy_resources(struct octeon_device *oct)
 			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
 			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
 
 
 		/* Disable interrupts  */
 		/* Disable interrupts  */
-		oct->fn_list.disable_interrupt(oct->chip);
+		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+		if (oct->msix_on) {
+			msix_entries = (struct msix_entry *)oct->msix_entries;
+			for (i = 0; i < oct->num_msix_irqs - 1; i++) {
+				/* clear the affinity_cpumask */
+				irq_set_affinity_hint(msix_entries[i].vector,
+						      NULL);
+				free_irq(msix_entries[i].vector,
+					 &oct->ioq_vector[i]);
+			}
+			/* non-iov vector's argument is oct struct */
+			free_irq(msix_entries[i].vector, oct);
 
 
-		/* Release the interrupt line */
-		free_irq(oct->pci_dev->irq, oct);
+			pci_disable_msix(oct->pci_dev);
+			kfree(oct->msix_entries);
+			oct->msix_entries = NULL;
+		} else {
+			/* Release the interrupt line */
+			free_irq(oct->pci_dev->irq, oct);
 
 
-		if (oct->flags & LIO_FLAG_MSI_ENABLED)
-			pci_disable_msi(oct->pci_dev);
+			if (oct->flags & LIO_FLAG_MSI_ENABLED)
+				pci_disable_msi(oct->pci_dev);
+		}
 
 
-		/* fallthrough */
+		if (OCTEON_CN23XX_PF(oct))
+			octeon_free_ioq_vector(oct);
+	/* fallthrough */
 	case OCT_DEV_IN_RESET:
 	case OCT_DEV_IN_RESET:
 	case OCT_DEV_DROQ_INIT_DONE:
 	case OCT_DEV_DROQ_INIT_DONE:
 		/*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
 		/*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
 		mdelay(100);
 		mdelay(100);
 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
-			if (!(oct->io_qmask.oq & (1ULL << i)))
+			if (!(oct->io_qmask.oq & BIT_ULL(i)))
 				continue;
 				continue;
 			octeon_delete_droq(oct, i);
 			octeon_delete_droq(oct, i);
 		}
 		}
@@ -1244,9 +1312,9 @@ static void octeon_destroy_resources(struct octeon_device *oct)
 
 
 		/* fallthrough */
 		/* fallthrough */
 	case OCT_DEV_PCI_MAP_DONE:
 	case OCT_DEV_PCI_MAP_DONE:
-
 		/* Soft reset the octeon device before exiting */
 		/* Soft reset the octeon device before exiting */
-		oct->fn_list.soft_reset(oct);
+		if ((!OCTEON_CN23XX_PF(oct)) || !oct->octeon_id)
+			oct->fn_list.soft_reset(oct);
 
 
 		octeon_unmap_pci_barx(oct, 0);
 		octeon_unmap_pci_barx(oct, 0);
 		octeon_unmap_pci_barx(oct, 1);
 		octeon_unmap_pci_barx(oct, 1);
@@ -1417,6 +1485,12 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
 		s = "CN66XX";
 		s = "CN66XX";
 		break;
 		break;
 
 
+	case OCTEON_CN23XX_PCIID_PF:
+		oct->chip_id = OCTEON_CN23XX_PF_VID;
+		ret = setup_cn23xx_octeon_pf_device(oct);
+		s = "CN23XX";
+		break;
+
 	default:
 	default:
 		s = "?";
 		s = "?";
 		dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
 		dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
@@ -2173,17 +2247,15 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
 						   lio->ifidx), NULL);
 						   lio->ifidx), NULL);
 		if (retval) {
 		if (retval) {
 			dev_err(&octeon_dev->pci_dev->dev,
 			dev_err(&octeon_dev->pci_dev->dev,
-				" %s : Runtime DROQ(RxQ) creation failed.\n",
+				"%s : Runtime DROQ(RxQ) creation failed.\n",
 				__func__);
 				__func__);
 			return 1;
 			return 1;
 		}
 		}
 
 
 		droq = octeon_dev->droq[q_no];
 		droq = octeon_dev->droq[q_no];
 		napi = &droq->napi;
 		napi = &droq->napi;
-		dev_dbg(&octeon_dev->pci_dev->dev,
-			"netif_napi_add netdev:%llx oct:%llx\n",
-			(u64)netdev,
-			(u64)octeon_dev);
+		dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n",
+			(u64)netdev, (u64)octeon_dev, octeon_dev->pf_num);
 		netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
 		netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
 
 
 		/* designate a CPU for this droq */
 		/* designate a CPU for this droq */
@@ -2235,7 +2307,7 @@ static void octnet_poll_check_txq_status(struct work_struct *work)
  * \brief Sets up the txq poll check
  * \brief Sets up the txq poll check
  * @param netdev network device
  * @param netdev network device
  */
  */
-static inline void setup_tx_poll_fn(struct net_device *netdev)
+static inline int setup_tx_poll_fn(struct net_device *netdev)
 {
 {
 	struct lio *lio = GET_LIO(netdev);
 	struct lio *lio = GET_LIO(netdev);
 	struct octeon_device *oct = lio->oct_dev;
 	struct octeon_device *oct = lio->oct_dev;
@@ -2244,21 +2316,24 @@ static inline void setup_tx_poll_fn(struct net_device *netdev)
 						WQ_MEM_RECLAIM, 0);
 						WQ_MEM_RECLAIM, 0);
 	if (!lio->txq_status_wq.wq) {
 	if (!lio->txq_status_wq.wq) {
 		dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
 		dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
-		return;
+		return -1;
 	}
 	}
 	INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
 	INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
 			  octnet_poll_check_txq_status);
 			  octnet_poll_check_txq_status);
 	lio->txq_status_wq.wk.ctxptr = lio;
 	lio->txq_status_wq.wk.ctxptr = lio;
 	queue_delayed_work(lio->txq_status_wq.wq,
 	queue_delayed_work(lio->txq_status_wq.wq,
 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
+	return 0;
 }
 }
 
 
 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
 {
 {
 	struct lio *lio = GET_LIO(netdev);
 	struct lio *lio = GET_LIO(netdev);
 
 
-	cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
-	destroy_workqueue(lio->txq_status_wq.wq);
+	if (lio->txq_status_wq.wq) {
+		cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
+		destroy_workqueue(lio->txq_status_wq.wq);
+	}
 }
 }
 
 
 /**
 /**
@@ -2282,7 +2357,14 @@ static int liquidio_open(struct net_device *netdev)
 
 
 	ifstate_set(lio, LIO_IFSTATE_RUNNING);
 	ifstate_set(lio, LIO_IFSTATE_RUNNING);
 
 
-	setup_tx_poll_fn(netdev);
+	if (OCTEON_CN23XX_PF(oct)) {
+		if (!oct->msix_on)
+			if (setup_tx_poll_fn(netdev))
+				return -1;
+	} else {
+		if (setup_tx_poll_fn(netdev))
+			return -1;
+	}
 
 
 	start_txq(netdev);
 	start_txq(netdev);
 
 
@@ -2328,7 +2410,12 @@ static int liquidio_stop(struct net_device *netdev)
 	/* Now it should be safe to tell Octeon that nic interface is down. */
 	/* Now it should be safe to tell Octeon that nic interface is down. */
 	send_rx_ctrl_cmd(lio, 0);
 	send_rx_ctrl_cmd(lio, 0);
 
 
-	cleanup_tx_poll_fn(netdev);
+	if (OCTEON_CN23XX_PF(oct)) {
+		if (!oct->msix_on)
+			cleanup_tx_poll_fn(netdev);
+	} else {
+		cleanup_tx_poll_fn(netdev);
+	}
 
 
 	if (lio->ptp_clock) {
 	if (lio->ptp_clock) {
 		ptp_clock_unregister(lio->ptp_clock);
 		ptp_clock_unregister(lio->ptp_clock);
@@ -2340,143 +2427,6 @@ static int liquidio_stop(struct net_device *netdev)
 	return 0;
 	return 0;
 }
 }
 
 
-void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
-{
-	struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
-	struct net_device *netdev = (struct net_device *)nctrl->netpndev;
-	struct lio *lio = GET_LIO(netdev);
-	struct octeon_device *oct = lio->oct_dev;
-	u8 *mac;
-
-	switch (nctrl->ncmd.s.cmd) {
-	case OCTNET_CMD_CHANGE_DEVFLAGS:
-	case OCTNET_CMD_SET_MULTI_LIST:
-		break;
-
-	case OCTNET_CMD_CHANGE_MACADDR:
-		mac = ((u8 *)&nctrl->udd[0]) + 2;
-		netif_info(lio, probe, lio->netdev,
-			   "%s %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
-			   "MACAddr changed to", mac[0], mac[1],
-			   mac[2], mac[3], mac[4], mac[5]);
-		break;
-
-	case OCTNET_CMD_CHANGE_MTU:
-		/* If command is successful, change the MTU. */
-		netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n",
-			   netdev->mtu, nctrl->ncmd.s.param1);
-		dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
-			 netdev->name, netdev->mtu,
-			 nctrl->ncmd.s.param1);
-		rtnl_lock();
-		netdev->mtu = nctrl->ncmd.s.param1;
-		call_netdevice_notifiers(NETDEV_CHANGEMTU, netdev);
-		rtnl_unlock();
-		break;
-
-	case OCTNET_CMD_GPIO_ACCESS:
-		netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
-
-		break;
-
-	case OCTNET_CMD_LRO_ENABLE:
-		dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
-		break;
-
-	case OCTNET_CMD_LRO_DISABLE:
-		dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
-			 netdev->name);
-		break;
-
-	case OCTNET_CMD_VERBOSE_ENABLE:
-		dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
-		break;
-
-	case OCTNET_CMD_VERBOSE_DISABLE:
-		dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
-			 netdev->name);
-		break;
-
-	case OCTNET_CMD_ENABLE_VLAN_FILTER:
-		dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
-			 netdev->name);
-		break;
-
-	case OCTNET_CMD_ADD_VLAN_FILTER:
-		dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
-			 netdev->name, nctrl->ncmd.s.param1);
-		break;
-
-	case OCTNET_CMD_DEL_VLAN_FILTER:
-		dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
-			 netdev->name, nctrl->ncmd.s.param1);
-		break;
-
-	case OCTNET_CMD_SET_SETTINGS:
-		dev_info(&oct->pci_dev->dev, "%s settings changed\n",
-			 netdev->name);
-
-		break;
-		/* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
-		 * Command passed by NIC driver
-		 */
-	case OCTNET_CMD_TNL_RX_CSUM_CTL:
-		if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
-			netif_info(lio, probe, lio->netdev,
-				   "%s RX Checksum Offload Enabled\n",
-				   netdev->name);
-		} else if (nctrl->ncmd.s.param1 ==
-			   OCTNET_CMD_RXCSUM_DISABLE) {
-			netif_info(lio, probe, lio->netdev,
-				   "%s RX Checksum Offload Disabled\n",
-				   netdev->name);
-		}
-		break;
-
-		/* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
-		 * Command passed by NIC driver
-		 */
-	case OCTNET_CMD_TNL_TX_CSUM_CTL:
-		if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
-			netif_info(lio, probe, lio->netdev,
-				   "%s TX Checksum Offload Enabled\n",
-				   netdev->name);
-		} else if (nctrl->ncmd.s.param1 ==
-			   OCTNET_CMD_TXCSUM_DISABLE) {
-			netif_info(lio, probe, lio->netdev,
-				   "%s TX Checksum Offload Disabled\n",
-				   netdev->name);
-		}
-		break;
-
-		/* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
-		 * Command passed by NIC driver
-		 */
-	case OCTNET_CMD_VXLAN_PORT_CONFIG:
-		if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
-			netif_info(lio, probe, lio->netdev,
-				   "%s VxLAN Destination UDP PORT:%d ADDED\n",
-				   netdev->name,
-				   nctrl->ncmd.s.param1);
-		} else if (nctrl->ncmd.s.more ==
-			   OCTNET_CMD_VXLAN_PORT_DEL) {
-			netif_info(lio, probe, lio->netdev,
-				   "%s VxLAN Destination UDP PORT:%d DELETED\n",
-				   netdev->name,
-				   nctrl->ncmd.s.param1);
-		}
-		break;
-
-	case OCTNET_CMD_SET_FLOW_CTL:
-		netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
-		break;
-
-	default:
-		dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
-			nctrl->ncmd.s.cmd);
-	}
-}
-
 /**
 /**
  * \brief Converts a mask based on net device flags
  * \brief Converts a mask based on net device flags
  * @param netdev network device
  * @param netdev network device
@@ -2817,8 +2767,7 @@ static void handle_timestamp(struct octeon_device *oct,
  */
  */
 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
 					 struct octnic_data_pkt *ndata,
 					 struct octnic_data_pkt *ndata,
-					 struct octnet_buf_free_info *finfo,
-					 int xmit_more)
+					 struct octnet_buf_free_info *finfo)
 {
 {
 	int retval;
 	int retval;
 	struct octeon_soft_command *sc;
 	struct octeon_soft_command *sc;
@@ -2848,7 +2797,7 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
 
 
 	len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz;
 	len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz;
 
 
-	ring_doorbell = !xmit_more;
+	ring_doorbell = 1;
 	retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
 	retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
 				     sc, len, ndata->reqtype);
 				     sc, len, ndata->reqtype);
 
 
@@ -2881,7 +2830,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
 	union tx_info *tx_info;
 	union tx_info *tx_info;
 	int status = 0;
 	int status = 0;
 	int q_idx = 0, iq_no = 0;
 	int q_idx = 0, iq_no = 0;
-	int xmit_more, j;
+	int j;
 	u64 dptr = 0;
 	u64 dptr = 0;
 	u32 tag = 0;
 	u32 tag = 0;
 
 
@@ -3077,12 +3026,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
 		irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
 		irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
 	}
 	}
 
 
-	xmit_more = skb->xmit_more;
-
 	if (unlikely(cmdsetup.s.timestamp))
 	if (unlikely(cmdsetup.s.timestamp))
-		status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
+		status = send_nic_timestamp_pkt(oct, &ndata, finfo);
 	else
 	else
-		status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
+		status = octnet_send_nic_data_pkt(oct, &ndata);
 	if (status == IQ_SEND_FAILED)
 	if (status == IQ_SEND_FAILED)
 		goto lio_xmit_failed;
 		goto lio_xmit_failed;
 
 
@@ -3249,31 +3196,6 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
 	return ret;
 	return ret;
 }
 }
 
 
-int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
-{
-	struct lio *lio = GET_LIO(netdev);
-	struct octeon_device *oct = lio->oct_dev;
-	struct octnic_ctrl_pkt nctrl;
-	int ret = 0;
-
-	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
-
-	nctrl.ncmd.u64 = 0;
-	nctrl.ncmd.s.cmd = cmd;
-	nctrl.ncmd.s.param1 = param1;
-	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
-	nctrl.wait_time = 100;
-	nctrl.netpndev = (u64)netdev;
-	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
-
-	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
-	if (ret < 0) {
-		dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
-			ret);
-	}
-	return ret;
-}
-
 /** \brief Net device fix features
 /** \brief Net device fix features
  * @param netdev  pointer to network device
  * @param netdev  pointer to network device
  * @param request features requested
  * @param request features requested
@@ -3492,8 +3414,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
 	union oct_nic_if_cfg if_cfg;
 	union oct_nic_if_cfg if_cfg;
 	unsigned int base_queue;
 	unsigned int base_queue;
 	unsigned int gmx_port_id;
 	unsigned int gmx_port_id;
-	u32 resp_size, ctx_size;
+	u32 resp_size, ctx_size, data_size;
 	u32 ifidx_or_pfnum;
 	u32 ifidx_or_pfnum;
+	struct lio_version *vdata;
 
 
 	/* This is to handle link status changes */
 	/* This is to handle link status changes */
 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
@@ -3515,21 +3438,37 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
 	for (i = 0; i < octeon_dev->ifcount; i++) {
 	for (i = 0; i < octeon_dev->ifcount; i++) {
 		resp_size = sizeof(struct liquidio_if_cfg_resp);
 		resp_size = sizeof(struct liquidio_if_cfg_resp);
 		ctx_size = sizeof(struct liquidio_if_cfg_context);
 		ctx_size = sizeof(struct liquidio_if_cfg_context);
+		data_size = sizeof(struct lio_version);
 		sc = (struct octeon_soft_command *)
 		sc = (struct octeon_soft_command *)
-			octeon_alloc_soft_command(octeon_dev, 0,
+			octeon_alloc_soft_command(octeon_dev, data_size,
 						  resp_size, ctx_size);
 						  resp_size, ctx_size);
 		resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
 		resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
 		ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
 		ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
+		vdata = (struct lio_version *)sc->virtdptr;
 
 
-		num_iqueues =
-			CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
-		num_oqueues =
-			CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
-		base_queue =
-			CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i);
-		gmx_port_id =
-			CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
-		ifidx_or_pfnum = i;
+		*((u64 *)vdata) = 0;
+		vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
+		vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
+		vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
+
+		if (OCTEON_CN23XX_PF(octeon_dev)) {
+			num_iqueues = octeon_dev->sriov_info.num_pf_rings;
+			num_oqueues = octeon_dev->sriov_info.num_pf_rings;
+			base_queue = octeon_dev->sriov_info.pf_srn;
+
+			gmx_port_id = octeon_dev->pf_num;
+			ifidx_or_pfnum = octeon_dev->pf_num;
+		} else {
+			num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
+						octeon_get_conf(octeon_dev), i);
+			num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
+						octeon_get_conf(octeon_dev), i);
+			base_queue = CFG_GET_BASE_QUE_NIC_IF(
+						octeon_get_conf(octeon_dev), i);
+			gmx_port_id = CFG_GET_GMXID_NIC_IF(
+						octeon_get_conf(octeon_dev), i);
+			ifidx_or_pfnum = i;
+		}
 
 
 		dev_dbg(&octeon_dev->pci_dev->dev,
 		dev_dbg(&octeon_dev->pci_dev->dev,
 			"requesting config for interface %d, iqs %d, oqs %d\n",
 			"requesting config for interface %d, iqs %d, oqs %d\n",
@@ -3633,12 +3572,16 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
 
 
 		lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 		lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 
 
-		lio->dev_capability = NETIF_F_HIGHDMA
-				| NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
-				| NETIF_F_SG | NETIF_F_RXCSUM
-				| NETIF_F_GRO
-				| NETIF_F_TSO | NETIF_F_TSO6
-				| NETIF_F_LRO;
+		if (OCTEON_CN23XX_PF(octeon_dev) ||
+		    OCTEON_CN6XXX(octeon_dev)) {
+			lio->dev_capability = NETIF_F_HIGHDMA
+					      | NETIF_F_IP_CSUM
+					      | NETIF_F_IPV6_CSUM
+					      | NETIF_F_SG | NETIF_F_RXCSUM
+					      | NETIF_F_GRO
+					      | NETIF_F_TSO | NETIF_F_TSO6
+					      | NETIF_F_LRO;
+		}
 		netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
 		netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
 
 
 		/*  Copy of transmit encapsulation capabilities:
 		/*  Copy of transmit encapsulation capabilities:
@@ -3880,6 +3823,7 @@ static void nic_starter(struct work_struct *work)
 static int octeon_device_init(struct octeon_device *octeon_dev)
 static int octeon_device_init(struct octeon_device *octeon_dev)
 {
 {
 	int j, ret;
 	int j, ret;
+	int fw_loaded = 0;
 	char bootcmd[] = "\n";
 	char bootcmd[] = "\n";
 	struct octeon_device_priv *oct_priv =
 	struct octeon_device_priv *oct_priv =
 		(struct octeon_device_priv *)octeon_dev->priv;
 		(struct octeon_device_priv *)octeon_dev->priv;
@@ -3901,9 +3845,23 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
 
 
 	octeon_dev->app_mode = CVM_DRV_INVALID_APP;
 	octeon_dev->app_mode = CVM_DRV_INVALID_APP;
 
 
-	/* Do a soft reset of the Octeon device. */
-	if (octeon_dev->fn_list.soft_reset(octeon_dev))
+	if (OCTEON_CN23XX_PF(octeon_dev)) {
+		if (!cn23xx_fw_loaded(octeon_dev)) {
+			fw_loaded = 0;
+			/* Do a soft reset of the Octeon device. */
+			if (octeon_dev->fn_list.soft_reset(octeon_dev))
+				return 1;
+			/* things might have changed */
+			if (!cn23xx_fw_loaded(octeon_dev))
+				fw_loaded = 0;
+			else
+				fw_loaded = 1;
+		} else {
+			fw_loaded = 1;
+		}
+	} else if (octeon_dev->fn_list.soft_reset(octeon_dev)) {
 		return 1;
 		return 1;
+	}
 
 
 	/* Initialize the dispatch mechanism used to push packets arriving on
 	/* Initialize the dispatch mechanism used to push packets arriving on
 	 * Octeon Output queues.
 	 * Octeon Output queues.
@@ -3925,6 +3883,22 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
 
 
 	octeon_set_io_queues_off(octeon_dev);
 	octeon_set_io_queues_off(octeon_dev);
 
 
+	if (OCTEON_CN23XX_PF(octeon_dev)) {
+		ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
+		if (ret) {
+			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
+			return ret;
+		}
+	}
+
+	/* Initialize soft command buffer pool
+	 */
+	if (octeon_setup_sc_buffer_pool(octeon_dev)) {
+		dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
+		return 1;
+	}
+	atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
+
 	/*  Setup the data structures that manage this Octeon's Input queues. */
 	/*  Setup the data structures that manage this Octeon's Input queues. */
 	if (octeon_setup_instr_queues(octeon_dev)) {
 	if (octeon_setup_instr_queues(octeon_dev)) {
 		dev_err(&octeon_dev->pci_dev->dev,
 		dev_err(&octeon_dev->pci_dev->dev,
@@ -3936,14 +3910,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
 	}
 	}
 	atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
 	atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
 
 
-	/* Initialize soft command buffer pool
-	 */
-	if (octeon_setup_sc_buffer_pool(octeon_dev)) {
-		dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
-		return 1;
-	}
-	atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
-
 	/* Initialize lists to manage the requests of different types that
 	/* Initialize lists to manage the requests of different types that
 	 * arrive from user & kernel applications for this octeon device.
 	 * arrive from user & kernel applications for this octeon device.
 	 */
 	 */
@@ -3963,15 +3929,23 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
 
 
 	atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
 	atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
 
 
-	/* The input and output queue registers were setup earlier (the queues
-	 * were not enabled). Any additional registers that need to be
-	 * programmed should be done now.
-	 */
-	ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
-	if (ret) {
-		dev_err(&octeon_dev->pci_dev->dev,
-			"Failed to configure device registers\n");
-		return ret;
+	if (OCTEON_CN23XX_PF(octeon_dev)) {
+		if (octeon_allocate_ioq_vector(octeon_dev)) {
+			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
+			return 1;
+		}
+
+	} else {
+		/* The input and output queue registers were setup earlier (the
+		 * queues were not enabled). Any additional registers
+		 * that need to be programmed should be done now.
+		 */
+		ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
+		if (ret) {
+			dev_err(&octeon_dev->pci_dev->dev,
+				"Failed to configure device registers\n");
+			return ret;
+		}
 	}
 	}
 
 
 	/* Initialize the tasklet that handles output queue packet processing.*/
 	/* Initialize the tasklet that handles output queue packet processing.*/
@@ -3985,63 +3959,76 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
 		return 1;
 		return 1;
 
 
 	/* Enable Octeon device interrupts */
 	/* Enable Octeon device interrupts */
-	octeon_dev->fn_list.enable_interrupt(octeon_dev->chip);
+	octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
 
 
 	/* Enable the input and output queues for this Octeon device */
 	/* Enable the input and output queues for this Octeon device */
-	octeon_dev->fn_list.enable_io_queues(octeon_dev);
+	ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
+	if (ret) {
+		dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
+		return ret;
+	}
 
 
 	atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
 	atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
 
 
-	dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
-
-	if (ddr_timeout == 0)
-		dev_info(&octeon_dev->pci_dev->dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
+	if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) {
+		dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
+		if (!ddr_timeout) {
+			dev_info(&octeon_dev->pci_dev->dev,
+				 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
+		}
 
 
-	schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
+		schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
 
 
-	/* Wait for the octeon to initialize DDR after the soft-reset. */
-	while (ddr_timeout == 0) {
-		set_current_state(TASK_INTERRUPTIBLE);
-		if (schedule_timeout(HZ / 10)) {
-			/* user probably pressed Control-C */
+		/* Wait for the octeon to initialize DDR after the soft-reset.*/
+		while (!ddr_timeout) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			if (schedule_timeout(HZ / 10)) {
+				/* user probably pressed Control-C */
+				return 1;
+			}
+		}
+		ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
+		if (ret) {
+			dev_err(&octeon_dev->pci_dev->dev,
+				"DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
+				ret);
 			return 1;
 			return 1;
 		}
 		}
-	}
-	ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
-	if (ret) {
-		dev_err(&octeon_dev->pci_dev->dev,
-			"DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
-			ret);
-		return 1;
-	}
 
 
-	if (octeon_wait_for_bootloader(octeon_dev, 1000) != 0) {
-		dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
-		return 1;
-	}
+		if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
+			dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
+			return 1;
+		}
 
 
-	/* Divert uboot to take commands from host instead. */
-	ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
+		/* Divert uboot to take commands from host instead. */
+		ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
 
 
-	dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
-	ret = octeon_init_consoles(octeon_dev);
-	if (ret) {
-		dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
-		return 1;
-	}
-	ret = octeon_add_console(octeon_dev, 0);
-	if (ret) {
-		dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
-		return 1;
-	}
+		dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
+		ret = octeon_init_consoles(octeon_dev);
+		if (ret) {
+			dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
+			return 1;
+		}
+		ret = octeon_add_console(octeon_dev, 0);
+		if (ret) {
+			dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
+			return 1;
+		}
 
 
-	atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
+		atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
 
 
-	dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
-	ret = load_firmware(octeon_dev);
-	if (ret) {
-		dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
-		return 1;
+		dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
+		ret = load_firmware(octeon_dev);
+		if (ret) {
+			dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
+			return 1;
+		}
+		/* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is
+		 * loaded
+		 */
+		if (OCTEON_CN23XX_PF(octeon_dev))
+			octeon_write_csr64(octeon_dev, CN23XX_SLI_SCRATCH1,
+					   2ULL);
 	}
 	}
 
 
 	handshake[octeon_dev->octeon_id].init_ok = 1;
 	handshake[octeon_dev->octeon_id].init_ok = 1;
@@ -4057,7 +4044,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
 		       octeon_dev->droq[j]->pkts_credit_reg);
 		       octeon_dev->droq[j]->pkts_credit_reg);
 
 
 	/* Packets can start arriving on the output queues from this point. */
 	/* Packets can start arriving on the output queues from this point. */
-
 	return 0;
 	return 0;
 }
 }
 
 

+ 18 - 4
drivers/net/ethernet/cavium/liquidio/liquidio_common.h

@@ -30,10 +30,24 @@
 
 
 #include "octeon_config.h"
 #include "octeon_config.h"
 
 
-#define LIQUIDIO_BASE_VERSION   "1.4"
-#define LIQUIDIO_MICRO_VERSION  ".1"
 #define LIQUIDIO_PACKAGE ""
 #define LIQUIDIO_PACKAGE ""
-#define LIQUIDIO_VERSION  "1.4.1"
+#define LIQUIDIO_BASE_MAJOR_VERSION 1
+#define LIQUIDIO_BASE_MINOR_VERSION 4
+#define LIQUIDIO_BASE_MICRO_VERSION 1
+#define LIQUIDIO_BASE_VERSION   __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
+				__stringify(LIQUIDIO_BASE_MINOR_VERSION)
+#define LIQUIDIO_MICRO_VERSION  "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
+#define LIQUIDIO_VERSION        LIQUIDIO_PACKAGE \
+				__stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
+				__stringify(LIQUIDIO_BASE_MINOR_VERSION) \
+				"." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
+
+struct lio_version {
+	u16  major;
+	u16  minor;
+	u16  micro;
+	u16  reserved;
+};
 
 
 #define CONTROL_IQ 0
 #define CONTROL_IQ 0
 /** Tag types used by Octeon cores in its work. */
 /** Tag types used by Octeon cores in its work. */
@@ -832,7 +846,7 @@ struct oct_mdio_cmd {
 /* intrmod: max. packets to trigger interrupt */
 /* intrmod: max. packets to trigger interrupt */
 #define LIO_INTRMOD_RXMAXCNT_TRIGGER	384
 #define LIO_INTRMOD_RXMAXCNT_TRIGGER	384
 /* intrmod: min. packets to trigger interrupt */
 /* intrmod: min. packets to trigger interrupt */
-#define LIO_INTRMOD_RXMINCNT_TRIGGER	1
+#define LIO_INTRMOD_RXMINCNT_TRIGGER	0
 /* intrmod: max. time to trigger interrupt */
 /* intrmod: max. time to trigger interrupt */
 #define LIO_INTRMOD_RXMAXTMR_TRIGGER	128
 #define LIO_INTRMOD_RXMAXTMR_TRIGGER	128
 /* 66xx:intrmod: min. time to trigger interrupt
 /* 66xx:intrmod: min. time to trigger interrupt

+ 51 - 8
drivers/net/ethernet/cavium/liquidio/octeon_config.h

@@ -64,6 +64,34 @@
 #define   DEFAULT_NUM_NIC_PORTS_68XX   4
 #define   DEFAULT_NUM_NIC_PORTS_68XX   4
 #define   DEFAULT_NUM_NIC_PORTS_68XX_210NV  2
 #define   DEFAULT_NUM_NIC_PORTS_68XX_210NV  2
 
 
+/* CN23xx  IQ configuration macros */
+#define   CN23XX_MAX_RINGS_PER_PF_PASS_1_0 12
+#define   CN23XX_MAX_RINGS_PER_PF_PASS_1_1 32
+#define   CN23XX_MAX_RINGS_PER_PF          64
+
+#define   CN23XX_MAX_INPUT_QUEUES	CN23XX_MAX_RINGS_PER_PF
+#define   CN23XX_MAX_IQ_DESCRIPTORS	2048
+#define   CN23XX_DB_MIN                 1
+#define   CN23XX_DB_MAX                 8
+#define   CN23XX_DB_TIMEOUT             1
+
+#define   CN23XX_MAX_OUTPUT_QUEUES	CN23XX_MAX_RINGS_PER_PF
+#define   CN23XX_MAX_OQ_DESCRIPTORS	2048
+#define   CN23XX_OQ_BUF_SIZE		1536
+#define   CN23XX_OQ_PKTSPER_INTR	128
+/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
+#define   CN23XX_OQ_REFIL_THRESHOLD	128
+
+#define   CN23XX_OQ_INTR_PKT		64
+#define   CN23XX_OQ_INTR_TIME		100
+#define   DEFAULT_NUM_NIC_PORTS_23XX	1
+
+#define   CN23XX_CFG_IO_QUEUES		CN23XX_MAX_RINGS_PER_PF
+/* PEMs count */
+#define   CN23XX_MAX_MACS		4
+
+#define   CN23XX_DEF_IQ_INTR_THRESHOLD	32
+#define   CN23XX_DEF_IQ_INTR_BYTE_THRESHOLD   (64 * 1024)
 /* common OCTEON configuration macros */
 /* common OCTEON configuration macros */
 #define   CN6XXX_CFG_IO_QUEUES         32
 #define   CN6XXX_CFG_IO_QUEUES         32
 #define   OCTEON_32BYTE_INSTR          32
 #define   OCTEON_32BYTE_INSTR          32
@@ -92,6 +120,9 @@
 #define CFG_GET_IQ_DB_MIN(cfg)                   ((cfg)->iq.db_min)
 #define CFG_GET_IQ_DB_MIN(cfg)                   ((cfg)->iq.db_min)
 #define CFG_GET_IQ_DB_TIMEOUT(cfg)               ((cfg)->iq.db_timeout)
 #define CFG_GET_IQ_DB_TIMEOUT(cfg)               ((cfg)->iq.db_timeout)
 
 
+#define CFG_GET_IQ_INTR_PKT(cfg)                 ((cfg)->iq.iq_intr_pkt)
+#define CFG_SET_IQ_INTR_PKT(cfg, val)            (cfg)->iq.iq_intr_pkt = val
+
 #define CFG_GET_OQ_MAX_Q(cfg)                    ((cfg)->oq.max_oqs)
 #define CFG_GET_OQ_MAX_Q(cfg)                    ((cfg)->oq.max_oqs)
 #define CFG_GET_OQ_INFO_PTR(cfg)                 ((cfg)->oq.info_ptr)
 #define CFG_GET_OQ_INFO_PTR(cfg)                 ((cfg)->oq.info_ptr)
 #define CFG_GET_OQ_PKTS_PER_INTR(cfg)            ((cfg)->oq.pkts_per_intr)
 #define CFG_GET_OQ_PKTS_PER_INTR(cfg)            ((cfg)->oq.pkts_per_intr)
@@ -140,19 +171,24 @@
 enum lio_card_type {
 enum lio_card_type {
 	LIO_210SV = 0, /* Two port, 66xx */
 	LIO_210SV = 0, /* Two port, 66xx */
 	LIO_210NV,     /* Two port, 68xx */
 	LIO_210NV,     /* Two port, 68xx */
-	LIO_410NV      /* Four port, 68xx */
+	LIO_410NV,     /* Four port, 68xx */
+	LIO_23XX       /* 23xx */
 };
 };
 
 
 #define LIO_210SV_NAME "210sv"
 #define LIO_210SV_NAME "210sv"
 #define LIO_210NV_NAME "210nv"
 #define LIO_210NV_NAME "210nv"
 #define LIO_410NV_NAME "410nv"
 #define LIO_410NV_NAME "410nv"
+#define LIO_23XX_NAME  "23xx"
 
 
 /** Structure to define the configuration attributes for each Input queue.
 /** Structure to define the configuration attributes for each Input queue.
  *  Applicable to all Octeon processors
  *  Applicable to all Octeon processors
  **/
  **/
 struct octeon_iq_config {
 struct octeon_iq_config {
 #ifdef __BIG_ENDIAN_BITFIELD
 #ifdef __BIG_ENDIAN_BITFIELD
-	u64 reserved:32;
+	u64 reserved:16;
+
+	/** Tx interrupt packets. Applicable to 23xx only */
+	u64 iq_intr_pkt:16;
 
 
 	/** Minimum ticks to wait before checking for pending instructions. */
 	/** Minimum ticks to wait before checking for pending instructions. */
 	u64 db_timeout:16;
 	u64 db_timeout:16;
@@ -192,7 +228,10 @@ struct octeon_iq_config {
 	/** Minimum ticks to wait before checking for pending instructions. */
 	/** Minimum ticks to wait before checking for pending instructions. */
 	u64 db_timeout:16;
 	u64 db_timeout:16;
 
 
-	u64 reserved:32;
+	/** Tx interrupt packets. Applicable to 23xx only */
+	u64 iq_intr_pkt:16;
+
+	u64 reserved:16;
 #endif
 #endif
 };
 };
 
 
@@ -416,11 +455,15 @@ struct octeon_config {
 #define DISPATCH_LIST_SIZE                      BIT(OPCODE_MASK_BITS)
 #define DISPATCH_LIST_SIZE                      BIT(OPCODE_MASK_BITS)
 
 
 /* Maximum number of Octeon Instruction (command) queues */
 /* Maximum number of Octeon Instruction (command) queues */
-#define MAX_OCTEON_INSTR_QUEUES(oct)         CN6XXX_MAX_INPUT_QUEUES
-/* Maximum number of Octeon Output queues */
-#define MAX_OCTEON_OUTPUT_QUEUES(oct)         CN6XXX_MAX_OUTPUT_QUEUES
+#define MAX_OCTEON_INSTR_QUEUES(oct)		\
+		(OCTEON_CN23XX_PF(oct) ? CN23XX_MAX_INPUT_QUEUES : \
+					CN6XXX_MAX_INPUT_QUEUES)
 
 
-#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES       CN6XXX_MAX_INPUT_QUEUES
-#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES      CN6XXX_MAX_OUTPUT_QUEUES
+/* Maximum number of Octeon Instruction (command) queues */
+#define MAX_OCTEON_OUTPUT_QUEUES(oct)		\
+		(OCTEON_CN23XX_PF(oct) ? CN23XX_MAX_OUTPUT_QUEUES : \
+					CN6XXX_MAX_OUTPUT_QUEUES)
 
 
+#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES	CN23XX_MAX_INPUT_QUEUES
+#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES	CN23XX_MAX_OUTPUT_QUEUES
 #endif /* __OCTEON_CONFIG_H__  */
 #endif /* __OCTEON_CONFIG_H__  */

+ 116 - 1
drivers/net/ethernet/cavium/liquidio/octeon_console.c

@@ -25,12 +25,13 @@
  */
  */
 #include <linux/pci.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/netdevice.h>
+#include <linux/crc32.h>
 #include "liquidio_common.h"
 #include "liquidio_common.h"
 #include "octeon_droq.h"
 #include "octeon_droq.h"
 #include "octeon_iq.h"
 #include "octeon_iq.h"
 #include "response_manager.h"
 #include "response_manager.h"
 #include "octeon_device.h"
 #include "octeon_device.h"
-#include "octeon_main.h"
+#include "liquidio_image.h"
 #include "octeon_mem_ops.h"
 #include "octeon_mem_ops.h"
 
 
 static void octeon_remote_lock(void);
 static void octeon_remote_lock(void);
@@ -40,6 +41,10 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
 					     u32 flags);
 					     u32 flags);
 static int octeon_console_read(struct octeon_device *oct, u32 console_num,
 static int octeon_console_read(struct octeon_device *oct, u32 console_num,
 			       char *buffer, u32 buf_size);
 			       char *buffer, u32 buf_size);
+static u32 console_bitmask;
+module_param(console_bitmask, int, 0644);
+MODULE_PARM_DESC(console_bitmask,
+		 "Bitmask indicating which consoles have debug output redirected to syslog.");
 
 
 #define MIN(a, b) min((a), (b))
 #define MIN(a, b) min((a), (b))
 #define CAST_ULL(v) ((u64)(v))
 #define CAST_ULL(v) ((u64)(v))
@@ -177,6 +182,15 @@ struct octeon_pci_console_desc {
 	__cvmx_bootmem_desc_get(oct, addr,                               \
 	__cvmx_bootmem_desc_get(oct, addr,                               \
 		offsetof(struct cvmx_bootmem_named_block_desc, field),   \
 		offsetof(struct cvmx_bootmem_named_block_desc, field),   \
 		SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field))
 		SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field))
+/**
+ * \brief determines if a given console has debug enabled.
+ * @param console console to check
+ * @returns  1 = enabled. 0 otherwise
+ */
+static int octeon_console_debug_enabled(u32 console)
+{
+	return (console_bitmask >> (console)) & 0x1;
+}
 
 
 /**
 /**
  * This function is the implementation of the get macros defined
  * This function is the implementation of the get macros defined
@@ -709,3 +723,104 @@ static int octeon_console_read(struct octeon_device *oct, u32 console_num,
 
 
 	return bytes_to_read;
 	return bytes_to_read;
 }
 }
+
+#define FBUF_SIZE	(4 * 1024 * 1024)
+u8 fbuf[FBUF_SIZE];
+
+int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
+			     size_t size)
+{
+	int ret = 0;
+	u8 *p = fbuf;
+	u32 crc32_result;
+	u64 load_addr;
+	u32 image_len;
+	struct octeon_firmware_file_header *h;
+	u32 i, rem;
+
+	if (size < sizeof(struct octeon_firmware_file_header)) {
+		dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
+			(u32)size,
+			(u32)sizeof(struct octeon_firmware_file_header));
+		return -EINVAL;
+	}
+
+	h = (struct octeon_firmware_file_header *)data;
+
+	if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
+		dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
+		return -EINVAL;
+	}
+
+	crc32_result = crc32((unsigned int)~0, data,
+			     sizeof(struct octeon_firmware_file_header) -
+			     sizeof(u32)) ^ ~0U;
+	if (crc32_result != be32_to_cpu(h->crc32)) {
+		dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
+			crc32_result, be32_to_cpu(h->crc32));
+		return -EINVAL;
+	}
+
+	if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
+		dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
+			LIQUIDIO_PACKAGE, h->version);
+		return -EINVAL;
+	}
+
+	if (memcmp(LIQUIDIO_BASE_VERSION, h->version + strlen(LIQUIDIO_PACKAGE),
+		   strlen(LIQUIDIO_BASE_VERSION))) {
+		dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
+			LIQUIDIO_BASE_VERSION,
+			h->version + strlen(LIQUIDIO_PACKAGE));
+		return -EINVAL;
+	}
+
+	if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
+		dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
+			be32_to_cpu(h->num_images));
+		return -EINVAL;
+	}
+
+	dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
+	snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
+		 h->version);
+
+	data += sizeof(struct octeon_firmware_file_header);
+
+	dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__,
+		 be32_to_cpu(h->num_images));
+	/* load all images */
+	for (i = 0; i < be32_to_cpu(h->num_images); i++) {
+		load_addr = be64_to_cpu(h->desc[i].addr);
+		image_len = be32_to_cpu(h->desc[i].len);
+
+		dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n",
+			 image_len, load_addr);
+
+		/* Write in 4MB chunks*/
+		rem = image_len;
+
+		while (rem) {
+			if (rem < FBUF_SIZE)
+				size = rem;
+			else
+				size = FBUF_SIZE;
+
+			memcpy(p, data, size);
+
+			/* download the image */
+			octeon_pci_write_core_mem(oct, load_addr, p, (u32)size);
+
+			data += size;
+			rem -= (u32)size;
+			load_addr += size;
+		}
+	}
+	dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n",
+		 h->bootcmd);
+
+	/* Invoke the bootcmd */
+	ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
+
+	return 0;
+}

+ 192 - 110
drivers/net/ethernet/cavium/liquidio/octeon_device.c

@@ -20,7 +20,6 @@
 * Contact Cavium, Inc. for more information
 * Contact Cavium, Inc. for more information
 **********************************************************************/
 **********************************************************************/
 #include <linux/pci.h>
 #include <linux/pci.h>
-#include <linux/crc32.h>
 #include <linux/netdevice.h>
 #include <linux/netdevice.h>
 #include <linux/vmalloc.h>
 #include <linux/vmalloc.h>
 #include "liquidio_common.h"
 #include "liquidio_common.h"
@@ -32,8 +31,7 @@
 #include "octeon_network.h"
 #include "octeon_network.h"
 #include "cn66xx_regs.h"
 #include "cn66xx_regs.h"
 #include "cn66xx_device.h"
 #include "cn66xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
+#include "cn23xx_pf_device.h"
 
 
 /** Default configuration
 /** Default configuration
  *  for CN66XX OCTEON Models.
  *  for CN66XX OCTEON Models.
@@ -420,6 +418,108 @@ static struct octeon_config default_cn68xx_210nv_conf = {
 	,
 	,
 };
 };
 
 
+static struct octeon_config default_cn23xx_conf = {
+	.card_type                              = LIO_23XX,
+	.card_name                              = LIO_23XX_NAME,
+	/** IQ attributes */
+	.iq = {
+		.max_iqs		= CN23XX_CFG_IO_QUEUES,
+		.pending_list_size	= (CN23XX_MAX_IQ_DESCRIPTORS *
+					   CN23XX_CFG_IO_QUEUES),
+		.instr_type		= OCTEON_64BYTE_INSTR,
+		.db_min			= CN23XX_DB_MIN,
+		.db_timeout		= CN23XX_DB_TIMEOUT,
+		.iq_intr_pkt		= CN23XX_DEF_IQ_INTR_THRESHOLD,
+	},
+
+	/** OQ attributes */
+	.oq = {
+		.max_oqs		= CN23XX_CFG_IO_QUEUES,
+		.info_ptr		= OCTEON_OQ_INFOPTR_MODE,
+		.pkts_per_intr	= CN23XX_OQ_PKTSPER_INTR,
+		.refill_threshold	= CN23XX_OQ_REFIL_THRESHOLD,
+		.oq_intr_pkt	= CN23XX_OQ_INTR_PKT,
+		.oq_intr_time	= CN23XX_OQ_INTR_TIME,
+	},
+
+	.num_nic_ports				= DEFAULT_NUM_NIC_PORTS_23XX,
+	.num_def_rx_descs			= CN23XX_MAX_OQ_DESCRIPTORS,
+	.num_def_tx_descs			= CN23XX_MAX_IQ_DESCRIPTORS,
+	.def_rx_buf_size			= CN23XX_OQ_BUF_SIZE,
+
+	/* For ethernet interface 0:  Port cfg Attributes */
+	.nic_if_cfg[0] = {
+		/* Max Txqs: Half for each of the two ports :max_iq/2 */
+		.max_txqs			= MAX_TXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_txqs */
+		.num_txqs			= DEF_TXQS_PER_INTF,
+
+		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
+		.max_rxqs			= MAX_RXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_rxqs */
+		.num_rxqs			= DEF_RXQS_PER_INTF,
+
+		/* Num of desc for rx rings */
+		.num_rx_descs			= CN23XX_MAX_OQ_DESCRIPTORS,
+
+		/* Num of desc for tx rings */
+		.num_tx_descs			= CN23XX_MAX_IQ_DESCRIPTORS,
+
+		/* SKB size, We need not change buf size even for Jumbo frames.
+		 * Octeon can send jumbo frames in 4 consecutive descriptors,
+		 */
+		.rx_buf_size			= CN23XX_OQ_BUF_SIZE,
+
+		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
+
+		.gmx_port_id			= 0,
+	},
+
+	.nic_if_cfg[1] = {
+		/* Max Txqs: Half for each of the two ports :max_iq/2 */
+		.max_txqs			= MAX_TXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_txqs */
+		.num_txqs			= DEF_TXQS_PER_INTF,
+
+		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
+		.max_rxqs			= MAX_RXQS_PER_INTF,
+
+		/* Actual configured value. Range could be: 1...max_rxqs */
+		.num_rxqs			= DEF_RXQS_PER_INTF,
+
+		/* Num of desc for rx rings */
+		.num_rx_descs			= CN23XX_MAX_OQ_DESCRIPTORS,
+
+		/* Num of desc for tx rings */
+		.num_tx_descs			= CN23XX_MAX_IQ_DESCRIPTORS,
+
+		/* SKB size, We need not change buf size even for Jumbo frames.
+		 * Octeon can send jumbo frames in 4 consecutive descriptors,
+		 */
+		.rx_buf_size			= CN23XX_OQ_BUF_SIZE,
+
+		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
+
+		.gmx_port_id			= 1,
+	},
+
+	.misc					= {
+		/* Host driver link query interval */
+		.oct_link_query_interval	= 100,
+
+		/* Octeon link query interval */
+		.host_link_query_interval	= 500,
+
+		.enable_sli_oq_bp		= 0,
+
+		/* Control queue group */
+		.ctrlq_grp			= 1,
+	}
+};
+
 enum {
 enum {
 	OCTEON_CONFIG_TYPE_DEFAULT = 0,
 	OCTEON_CONFIG_TYPE_DEFAULT = 0,
 	NUM_OCTEON_CONFS,
 	NUM_OCTEON_CONFS,
@@ -487,6 +587,8 @@ static void *__retrieve_octeon_config_info(struct octeon_device *oct,
 		} else if ((oct->chip_id == OCTEON_CN68XX) &&
 		} else if ((oct->chip_id == OCTEON_CN68XX) &&
 			   (card_type == LIO_410NV)) {
 			   (card_type == LIO_410NV)) {
 			ret =  (void *)&default_cn68xx_conf;
 			ret =  (void *)&default_cn68xx_conf;
+		} else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
+			ret =  (void *)&default_cn23xx_conf;
 		}
 		}
 		break;
 		break;
 	default:
 	default:
@@ -501,7 +603,8 @@ static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
 	case OCTEON_CN66XX:
 	case OCTEON_CN66XX:
 	case OCTEON_CN68XX:
 	case OCTEON_CN68XX:
 		return lio_validate_cn6xxx_config_info(oct, conf);
 		return lio_validate_cn6xxx_config_info(oct, conf);
-
+	case OCTEON_CN23XX_PF_VID:
+		return 0;
 	default:
 	default:
 		break;
 		break;
 	}
 	}
@@ -541,107 +644,6 @@ static char *get_oct_app_string(u32 app_mode)
 	return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
 	return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
 }
 }
 
 
-u8 fbuf[4 * 1024 * 1024];
-
-int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
-			     size_t size)
-{
-	int ret = 0;
-	u8 *p = fbuf;
-	u32 crc32_result;
-	u64 load_addr;
-	u32 image_len;
-	struct octeon_firmware_file_header *h;
-	u32 i, rem, base_len = strlen(LIQUIDIO_BASE_VERSION);
-	char *base;
-
-	if (size < sizeof(struct octeon_firmware_file_header)) {
-		dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
-			(u32)size,
-			(u32)sizeof(struct octeon_firmware_file_header));
-		return -EINVAL;
-	}
-
-	h = (struct octeon_firmware_file_header *)data;
-
-	if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
-		dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
-		return -EINVAL;
-	}
-
-	crc32_result = crc32((unsigned int)~0, data,
-			     sizeof(struct octeon_firmware_file_header) -
-			     sizeof(u32)) ^ ~0U;
-	if (crc32_result != be32_to_cpu(h->crc32)) {
-		dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
-			crc32_result, be32_to_cpu(h->crc32));
-		return -EINVAL;
-	}
-
-	if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
-		dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
-			LIQUIDIO_PACKAGE, h->version);
-		return -EINVAL;
-	}
-
-	base = h->version + strlen(LIQUIDIO_PACKAGE);
-	ret = memcmp(LIQUIDIO_BASE_VERSION, base, base_len);
-	if (ret) {
-		dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
-			LIQUIDIO_BASE_VERSION, base);
-		return -EINVAL;
-	}
-
-	if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
-		dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
-			be32_to_cpu(h->num_images));
-		return -EINVAL;
-	}
-
-	dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
-	snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
-		 h->version);
-
-	data += sizeof(struct octeon_firmware_file_header);
-
-	dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__,
-		 be32_to_cpu(h->num_images));
-	/* load all images */
-	for (i = 0; i < be32_to_cpu(h->num_images); i++) {
-		load_addr = be64_to_cpu(h->desc[i].addr);
-		image_len = be32_to_cpu(h->desc[i].len);
-
-		dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n",
-			 image_len, load_addr);
-
-		/* Write in 4MB chunks*/
-		rem = image_len;
-
-		while (rem) {
-			if (rem < (4 * 1024 * 1024))
-				size = rem;
-			else
-				size = 4 * 1024 * 1024;
-
-			memcpy(p, data, size);
-
-			/* download the image */
-			octeon_pci_write_core_mem(oct, load_addr, p, (u32)size);
-
-			data += size;
-			rem -= (u32)size;
-			load_addr += size;
-		}
-	}
-	dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n",
-		 h->bootcmd);
-
-	/* Invoke the bootcmd */
-	ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
-
-	return 0;
-}
-
 void octeon_free_device_mem(struct octeon_device *oct)
 void octeon_free_device_mem(struct octeon_device *oct)
 {
 {
 	int i;
 	int i;
@@ -676,6 +678,9 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
 		configsize = sizeof(struct octeon_cn6xxx);
 		configsize = sizeof(struct octeon_cn6xxx);
 		break;
 		break;
 
 
+	case OCTEON_CN23XX_PF_VID:
+		configsize = sizeof(struct octeon_cn23xx_pf);
+		break;
 	default:
 	default:
 		pr_err("%s: Unknown PCI Device: 0x%x\n",
 		pr_err("%s: Unknown PCI Device: 0x%x\n",
 		       __func__,
 		       __func__,
@@ -741,6 +746,45 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
 	return oct;
 	return oct;
 }
 }
 
 
+int
+octeon_allocate_ioq_vector(struct octeon_device  *oct)
+{
+	int i, num_ioqs = 0;
+	struct octeon_ioq_vector *ioq_vector;
+	int cpu_num;
+	int size;
+
+	if (OCTEON_CN23XX_PF(oct))
+		num_ioqs = oct->sriov_info.num_pf_rings;
+	size = sizeof(struct octeon_ioq_vector) * num_ioqs;
+
+	oct->ioq_vector = vmalloc(size);
+	if (!oct->ioq_vector)
+		return 1;
+	memset(oct->ioq_vector, 0, size);
+	for (i = 0; i < num_ioqs; i++) {
+		ioq_vector		= &oct->ioq_vector[i];
+		ioq_vector->oct_dev	= oct;
+		ioq_vector->iq_index	= i;
+		ioq_vector->droq_index	= i;
+
+		cpu_num = i % num_online_cpus();
+		cpumask_set_cpu(cpu_num, &ioq_vector->affinity_mask);
+
+		if (oct->chip_id == OCTEON_CN23XX_PF_VID)
+			ioq_vector->ioq_num	= i + oct->sriov_info.pf_srn;
+		else
+			ioq_vector->ioq_num	= i;
+	}
+	return 0;
+}
+
+void
+octeon_free_ioq_vector(struct octeon_device *oct)
+{
+	vfree(oct->ioq_vector);
+}
+
 /* this function is only for setting up the first queue */
 /* this function is only for setting up the first queue */
 int octeon_setup_instr_queues(struct octeon_device *oct)
 int octeon_setup_instr_queues(struct octeon_device *oct)
 {
 {
@@ -753,6 +797,9 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
 	if (OCTEON_CN6XXX(oct))
 	if (OCTEON_CN6XXX(oct))
 		num_descs =
 		num_descs =
 			CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
 			CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
+	else if (OCTEON_CN23XX_PF(oct))
+		num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn23xx_pf,
+								conf));
 
 
 	oct->num_iqs = 0;
 	oct->num_iqs = 0;
 
 
@@ -794,8 +841,12 @@ int octeon_setup_output_queues(struct octeon_device *oct)
 			CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
 			CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
 		desc_size =
 		desc_size =
 			CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf));
 			CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf));
+	} else if (OCTEON_CN23XX_PF(oct)) {
+		num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn23xx_pf,
+								conf));
+		desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn23xx_pf,
+							       conf));
 	}
 	}
-
 	oct->num_oqs = 0;
 	oct->num_oqs = 0;
 	oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
 	oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
 	if (!oct->droq[0])
 	if (!oct->droq[0])
@@ -1019,6 +1070,9 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
 	if (OCTEON_CN6XXX(oct))
 	if (OCTEON_CN6XXX(oct))
 		num_nic_ports =
 		num_nic_ports =
 			CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf));
 			CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf));
+	else if (OCTEON_CN23XX_PF(oct))
+		num_nic_ports =
+			CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn23xx_pf, conf));
 
 
 	if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
 	if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
 		dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
 		dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
@@ -1108,8 +1162,10 @@ struct octeon_config *octeon_get_conf(struct octeon_device *oct)
 	if (OCTEON_CN6XXX(oct)) {
 	if (OCTEON_CN6XXX(oct)) {
 		default_oct_conf =
 		default_oct_conf =
 			(struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf));
 			(struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf));
+	} else if (OCTEON_CN23XX_PF(oct)) {
+		default_oct_conf = (struct octeon_config *)
+			(CHIP_FIELD(oct, cn23xx_pf, conf));
 	}
 	}
-
 	return default_oct_conf;
 	return default_oct_conf;
 }
 }
 
 
@@ -1141,7 +1197,9 @@ u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
 	 * So write MSB first
 	 * So write MSB first
 	 */
 	 */
 	addrhi = (addr >> 32);
 	addrhi = (addr >> 32);
-	if ((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX))
+	if ((oct->chip_id == OCTEON_CN66XX) ||
+	    (oct->chip_id == OCTEON_CN68XX) ||
+	    (oct->chip_id == OCTEON_CN23XX_PF_VID))
 		addrhi |= 0x00060000;
 		addrhi |= 0x00060000;
 	writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
 	writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
 
 
@@ -1185,8 +1243,15 @@ int octeon_mem_access_ok(struct octeon_device *oct)
 	u64 lmc0_reset_ctl;
 	u64 lmc0_reset_ctl;
 
 
 	/* Check to make sure a DDR interface is enabled */
 	/* Check to make sure a DDR interface is enabled */
-	lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
-	access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
+	if (OCTEON_CN23XX_PF(oct)) {
+		lmc0_reset_ctl = lio_pci_readq(oct, CN23XX_LMC0_RESET_CTL);
+		access_okay =
+			(lmc0_reset_ctl & CN23XX_LMC0_RESET_CTL_DDR3RST_MASK);
+	} else {
+		lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
+		access_okay =
+			(lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
+	}
 
 
 	return access_okay ? 0 : 1;
 	return access_okay ? 0 : 1;
 }
 }
@@ -1226,3 +1291,20 @@ int lio_get_device_id(void *dev)
 			return octeon_dev->octeon_id;
 			return octeon_dev->octeon_id;
 	return -1;
 	return -1;
 }
 }
+
+void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
+{
+	/* the whole thing needs to be atomic, ideally */
+	if (droq) {
+		spin_lock_bh(&droq->lock);
+		writel(droq->pkt_count, droq->pkts_sent_reg);
+		droq->pkt_count = 0;
+		spin_unlock_bh(&droq->lock);
+	}
+	if (iq) {
+		spin_lock_bh(&iq->lock);
+		writel(iq->pkt_in_done, iq->inst_cnt_reg);
+		iq->pkt_in_done = 0;
+		spin_unlock_bh(&iq->lock);
+	}
+}

+ 93 - 7
drivers/net/ethernet/cavium/liquidio/octeon_device.h

@@ -30,13 +30,19 @@
 /** PCI VendorId Device Id */
 /** PCI VendorId Device Id */
 #define  OCTEON_CN68XX_PCIID          0x91177d
 #define  OCTEON_CN68XX_PCIID          0x91177d
 #define  OCTEON_CN66XX_PCIID          0x92177d
 #define  OCTEON_CN66XX_PCIID          0x92177d
-
+#define  OCTEON_CN23XX_PCIID_PF       0x9702177d
 /** Driver identifies chips by these Ids, created by clubbing together
 /** Driver identifies chips by these Ids, created by clubbing together
  *  DeviceId+RevisionId; Where Revision Id is not used to distinguish
  *  DeviceId+RevisionId; Where Revision Id is not used to distinguish
  *  between chips, a value of 0 is used for revision id.
  *  between chips, a value of 0 is used for revision id.
  */
  */
 #define  OCTEON_CN68XX                0x0091
 #define  OCTEON_CN68XX                0x0091
 #define  OCTEON_CN66XX                0x0092
 #define  OCTEON_CN66XX                0x0092
+#define  OCTEON_CN23XX_PF_VID         0x9702
+
+/**RevisionId for the chips */
+#define  OCTEON_CN23XX_REV_1_0        0x00
+#define  OCTEON_CN23XX_REV_1_1        0x01
+#define  OCTEON_CN23XX_REV_2_0        0x80
 
 
 /** Endian-swap modes supported by Octeon. */
 /** Endian-swap modes supported by Octeon. */
 enum octeon_pci_swap_mode {
 enum octeon_pci_swap_mode {
@@ -46,6 +52,9 @@ enum octeon_pci_swap_mode {
 	OCTEON_PCI_32BIT_LW_SWAP = 3
 	OCTEON_PCI_32BIT_LW_SWAP = 3
 };
 };
 
 
+#define  OCTEON_OUTPUT_INTR   (2)
+#define  OCTEON_ALL_INTR      0xff
+
 /*---------------   PCI BAR1 index registers -------------*/
 /*---------------   PCI BAR1 index registers -------------*/
 
 
 /* BAR1 Mask */
 /* BAR1 Mask */
@@ -198,9 +207,9 @@ struct octeon_fn_list {
 	void (*setup_oq_regs)(struct octeon_device *, u32);
 	void (*setup_oq_regs)(struct octeon_device *, u32);
 
 
 	irqreturn_t (*process_interrupt_regs)(void *);
 	irqreturn_t (*process_interrupt_regs)(void *);
+	u64 (*msix_interrupt_handler)(void *);
 	int (*soft_reset)(struct octeon_device *);
 	int (*soft_reset)(struct octeon_device *);
 	int (*setup_device_regs)(struct octeon_device *);
 	int (*setup_device_regs)(struct octeon_device *);
-	void (*reinit_regs)(struct octeon_device *);
 	void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
 	void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
 	void (*bar1_idx_write)(struct octeon_device *, u32, u32);
 	void (*bar1_idx_write)(struct octeon_device *, u32, u32);
 	u32 (*bar1_idx_read)(struct octeon_device *, u32);
 	u32 (*bar1_idx_read)(struct octeon_device *, u32);
@@ -209,10 +218,10 @@ struct octeon_fn_list {
 	void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
 	void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
 	void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
 	void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
 
 
-	void (*enable_interrupt)(void *);
-	void (*disable_interrupt)(void *);
+	void (*enable_interrupt)(struct octeon_device *, u8);
+	void (*disable_interrupt)(struct octeon_device *, u8);
 
 
-	void (*enable_io_queues)(struct octeon_device *);
+	int (*enable_io_queues)(struct octeon_device *);
 	void (*disable_io_queues)(struct octeon_device *);
 	void (*disable_io_queues)(struct octeon_device *);
 };
 };
 
 
@@ -271,6 +280,66 @@ struct octdev_props {
 	struct net_device *netdev;
 	struct net_device *netdev;
 };
 };
 
 
+#define LIO_FLAG_MSIX_ENABLED	0x1
+#define MSIX_PO_INT		0x1
+#define MSIX_PI_INT		0x2
+
+struct octeon_pf_vf_hs_word {
+#ifdef __LITTLE_ENDIAN_BITFIELD
+	/** PKIND value assigned for the DPI interface */
+	u64        pkind : 8;
+
+	/** OCTEON core clock multiplier   */
+	u64        core_tics_per_us : 16;
+
+	/** OCTEON coprocessor clock multiplier  */
+	u64        coproc_tics_per_us : 16;
+
+	/** app that currently running on OCTEON  */
+	u64        app_mode : 8;
+
+	/** RESERVED */
+	u64 reserved : 16;
+
+#else
+
+	/** RESERVED */
+	u64 reserved : 16;
+
+	/** app that currently running on OCTEON  */
+	u64        app_mode : 8;
+
+	/** OCTEON coprocessor clock multiplier  */
+	u64        coproc_tics_per_us : 16;
+
+	/** OCTEON core clock multiplier   */
+	u64        core_tics_per_us : 16;
+
+	/** PKIND value assigned for the DPI interface */
+	u64        pkind : 8;
+#endif
+};
+
+struct octeon_sriov_info {
+	/* Actual rings left for PF device */
+	u32	num_pf_rings;
+
+	/* SRN of PF usable IO queues   */
+	u32	pf_srn;
+	/* total pf rings */
+	u32	trs;
+
+};
+
+struct octeon_ioq_vector {
+	struct octeon_device   *oct_dev;
+	int		        iq_index;
+	int		        droq_index;
+	int			vector;
+	struct cpumask		affinity_mask;
+	u32			ioq_num;
+};
+
 /** The Octeon device.
 /** The Octeon device.
  *  Each Octeon device has this structure to represent all its
  *  Each Octeon device has this structure to represent all its
  *  components.
  *  components.
@@ -296,7 +365,7 @@ struct octeon_device {
 	/** Octeon Chip type. */
 	/** Octeon Chip type. */
 	u16 chip_id;
 	u16 chip_id;
 	u16 rev_id;
 	u16 rev_id;
-
+	u16 pf_num;
 	/** This device's id - set by the driver. */
 	/** This device's id - set by the driver. */
 	u32 octeon_id;
 	u32 octeon_id;
 
 
@@ -305,7 +374,6 @@ struct octeon_device {
 
 
 	u16 flags;
 	u16 flags;
 #define LIO_FLAG_MSI_ENABLED                  (u32)(1 << 1)
 #define LIO_FLAG_MSI_ENABLED                  (u32)(1 << 1)
-#define LIO_FLAG_MSIX_ENABLED                 (u32)(1 << 2)
 
 
 	/** The state of this device */
 	/** The state of this device */
 	atomic_t status;
 	atomic_t status;
@@ -395,6 +463,19 @@ struct octeon_device {
 
 
 	void *priv;
 	void *priv;
 
 
+	int num_msix_irqs;
+
+	void *msix_entries;
+
+	struct octeon_sriov_info sriov_info;
+
+	struct octeon_pf_vf_hs_word pfvf_hsword;
+
+	int msix_on;
+
+	/** IOq information of it's corresponding MSI-X interrupt. */
+	struct octeon_ioq_vector    *ioq_vector;
+
 	int rx_pause;
 	int rx_pause;
 	int tx_pause;
 	int tx_pause;
 
 
@@ -408,6 +489,7 @@ struct octeon_device {
 #define  OCT_DRV_OFFLINE 2
 #define  OCT_DRV_OFFLINE 2
 #define  OCTEON_CN6XXX(oct)           ((oct->chip_id == OCTEON_CN66XX) || \
 #define  OCTEON_CN6XXX(oct)           ((oct->chip_id == OCTEON_CN66XX) || \
 				       (oct->chip_id == OCTEON_CN68XX))
 				       (oct->chip_id == OCTEON_CN68XX))
+#define  OCTEON_CN23XX_PF(oct)        (oct->chip_id == OCTEON_CN23XX_PF_VID)
 #define CHIP_FIELD(oct, TYPE, field)             \
 #define CHIP_FIELD(oct, TYPE, field)             \
 	(((struct octeon_ ## TYPE  *)(oct->chip))->field)
 	(((struct octeon_ ## TYPE  *)(oct->chip))->field)
 
 
@@ -661,6 +743,10 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
  */
  */
 struct octeon_config *octeon_get_conf(struct octeon_device *oct);
 struct octeon_config *octeon_get_conf(struct octeon_device *oct);
 
 
+void octeon_free_ioq_vector(struct octeon_device *oct);
+int octeon_allocate_ioq_vector(struct octeon_device  *oct);
+void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq);
+
 /* LiquidIO driver pivate flags */
 /* LiquidIO driver pivate flags */
 enum {
 enum {
 	OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */
 	OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */

+ 20 - 13
drivers/net/ethernet/cavium/liquidio/octeon_droq.c

@@ -92,22 +92,25 @@ static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
 	return fn_arg;
 	return fn_arg;
 }
 }
 
 
-/** Check for packets on Droq. This function should be called with
- * lock held.
+/** Check for packets on Droq. This function should be called with lock held.
  *  @param  droq - Droq on which count is checked.
  *  @param  droq - Droq on which count is checked.
  *  @return Returns packet count.
  *  @return Returns packet count.
  */
  */
 u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
 u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
 {
 {
 	u32 pkt_count = 0;
 	u32 pkt_count = 0;
+	u32 last_count;
 
 
 	pkt_count = readl(droq->pkts_sent_reg);
 	pkt_count = readl(droq->pkts_sent_reg);
-	if (pkt_count) {
-		atomic_add(pkt_count, &droq->pkts_pending);
-		writel(pkt_count, droq->pkts_sent_reg);
-	}
 
 
-	return pkt_count;
+	last_count = pkt_count - droq->pkt_count;
+	droq->pkt_count = pkt_count;
+
+	/* we shall write to cnts  at napi irq enable or end of droq tasklet */
+	if (last_count)
+		atomic_add(last_count, &droq->pkts_pending);
+
+	return last_count;
 }
 }
 
 
 static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
 static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
@@ -735,16 +738,20 @@ octeon_droq_process_packets(struct octeon_device *oct,
 	u32 pkt_count = 0, pkts_processed = 0;
 	u32 pkt_count = 0, pkts_processed = 0;
 	struct list_head *tmp, *tmp2;
 	struct list_head *tmp, *tmp2;
 
 
+	/* Grab the droq lock */
+	spin_lock(&droq->lock);
+
+	octeon_droq_check_hw_for_pkts(droq);
 	pkt_count = atomic_read(&droq->pkts_pending);
 	pkt_count = atomic_read(&droq->pkts_pending);
-	if (!pkt_count)
+
+	if (!pkt_count) {
+		spin_unlock(&droq->lock);
 		return 0;
 		return 0;
+	}
 
 
 	if (pkt_count > budget)
 	if (pkt_count > budget)
 		pkt_count = budget;
 		pkt_count = budget;
 
 
-	/* Grab the droq lock */
-	spin_lock(&droq->lock);
-
 	pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
 	pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
 
 
 	atomic_sub(pkts_processed, &droq->pkts_pending);
 	atomic_sub(pkts_processed, &droq->pkts_pending);
@@ -789,6 +796,8 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
 	spin_lock(&droq->lock);
 	spin_lock(&droq->lock);
 
 
 	while (total_pkts_processed < budget) {
 	while (total_pkts_processed < budget) {
+		octeon_droq_check_hw_for_pkts(droq);
+
 		pkts_available =
 		pkts_available =
 			CVM_MIN((budget - total_pkts_processed),
 			CVM_MIN((budget - total_pkts_processed),
 				(u32)(atomic_read(&droq->pkts_pending)));
 				(u32)(atomic_read(&droq->pkts_pending)));
@@ -803,8 +812,6 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
 		atomic_sub(pkts_processed, &droq->pkts_pending);
 		atomic_sub(pkts_processed, &droq->pkts_pending);
 
 
 		total_pkts_processed += pkts_processed;
 		total_pkts_processed += pkts_processed;
-
-		octeon_droq_check_hw_for_pkts(droq);
 	}
 	}
 
 
 	spin_unlock(&droq->lock);
 	spin_unlock(&droq->lock);

+ 2 - 0
drivers/net/ethernet/cavium/liquidio/octeon_droq.h

@@ -261,6 +261,8 @@ struct octeon_droq {
 
 
 	u32 q_no;
 	u32 q_no;
 
 
+	u32 pkt_count;
+
 	struct octeon_droq_ops ops;
 	struct octeon_droq_ops ops;
 
 
 	struct octeon_device *oct_dev;
 	struct octeon_device *oct_dev;

+ 2 - 0
drivers/net/ethernet/cavium/liquidio/octeon_iq.h

@@ -88,6 +88,8 @@ struct octeon_instr_queue {
 	/** A spinlock to protect while posting on the ring.  */
 	/** A spinlock to protect while posting on the ring.  */
 	spinlock_t post_lock;
 	spinlock_t post_lock;
 
 
+	u32 pkt_in_done;
+
 	/** A spinlock to protect access to the input ring.*/
 	/** A spinlock to protect access to the input ring.*/
 	spinlock_t iq_flush_running_lock;
 	spinlock_t iq_flush_running_lock;
 
 

+ 19 - 5
drivers/net/ethernet/cavium/liquidio/octeon_main.h

@@ -38,12 +38,26 @@
 
 
 #define DRV_NAME "LiquidIO"
 #define DRV_NAME "LiquidIO"
 
 
-/**
- * \brief determines if a given console has debug enabled.
- * @param console console to check
- * @returns  1 = enabled. 0 otherwise
+/** This structure is used by NIC driver to store information required
+ * to free the sk_buff when the packet has been fetched by Octeon.
+ * Bytes offset below assume worst-case of a 64-bit system.
  */
  */
-int octeon_console_debug_enabled(u32 console);
+struct octnet_buf_free_info {
+	/** Bytes 1-8.  Pointer to network device private structure. */
+	struct lio *lio;
+
+	/** Bytes 9-16.  Pointer to sk_buff. */
+	struct sk_buff *skb;
+
+	/** Bytes 17-24.  Pointer to gather list. */
+	struct octnic_gather *g;
+
+	/** Bytes 25-32. Physical address of skb->data or gather list. */
+	u64 dptr;
+
+	/** Bytes 33-47. Piggybacked soft command, if any */
+	struct octeon_soft_command *sc;
+};
 
 
 /* BQL-related functions */
 /* BQL-related functions */
 void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
 void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);

+ 0 - 1
drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c

@@ -19,7 +19,6 @@
  * This file may also be available under a different license from Cavium.
  * This file may also be available under a different license from Cavium.
  * Contact Cavium, Inc. for more information
  * Contact Cavium, Inc. for more information
  **********************************************************************/
  **********************************************************************/
-#include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/netdevice.h>
 #include "liquidio_common.h"
 #include "liquidio_common.h"
 #include "octeon_droq.h"
 #include "octeon_droq.h"

+ 0 - 2
drivers/net/ethernet/cavium/liquidio/octeon_network.h

@@ -26,8 +26,6 @@
 
 
 #ifndef __OCTEON_NETWORK_H__
 #ifndef __OCTEON_NETWORK_H__
 #define __OCTEON_NETWORK_H__
 #define __OCTEON_NETWORK_H__
-#include <linux/version.h>
-#include <linux/dma-mapping.h>
 #include <linux/ptp_clock_kernel.h>
 #include <linux/ptp_clock_kernel.h>
 
 
 #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
 #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)

+ 2 - 6
drivers/net/ethernet/cavium/liquidio/octeon_nic.c

@@ -19,7 +19,6 @@
  * This file may also be available under a different license from Cavium.
  * This file may also be available under a different license from Cavium.
  * Contact Cavium, Inc. for more information
  * Contact Cavium, Inc. for more information
  **********************************************************************/
  **********************************************************************/
-#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/netdevice.h>
 #include "liquidio_common.h"
 #include "liquidio_common.h"
@@ -73,12 +72,9 @@ octeon_alloc_soft_command_resp(struct octeon_device    *oct,
 }
 }
 
 
 int octnet_send_nic_data_pkt(struct octeon_device *oct,
 int octnet_send_nic_data_pkt(struct octeon_device *oct,
-			     struct octnic_data_pkt *ndata,
-			     u32 xmit_more)
+			     struct octnic_data_pkt *ndata)
 {
 {
-	int ring_doorbell;
-
-	ring_doorbell = !xmit_more;
+	int ring_doorbell = 1;
 
 
 	return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd,
 	return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd,
 				   ndata->buf, ndata->datasize,
 				   ndata->buf, ndata->datasize,

+ 1 - 1
drivers/net/ethernet/cavium/liquidio/octeon_nic.h

@@ -278,7 +278,7 @@ octeon_alloc_soft_command_resp(struct octeon_device    *oct,
  * queue should be stopped, and IQ_SEND_OK if it sent okay.
  * queue should be stopped, and IQ_SEND_OK if it sent okay.
  */
  */
 int octnet_send_nic_data_pkt(struct octeon_device *oct,
 int octnet_send_nic_data_pkt(struct octeon_device *oct,
-			     struct octnic_data_pkt *ndata, u32 xmit_more);
+			     struct octnic_data_pkt *ndata);
 
 
 /** Send a NIC control packet to the device
 /** Send a NIC control packet to the device
  * @param oct - octeon device pointer
  * @param oct - octeon device pointer

+ 3 - 0
drivers/net/ethernet/cavium/liquidio/request_manager.c

@@ -499,6 +499,7 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
 
 
 	if (!oct)
 	if (!oct)
 		return;
 		return;
+
 	iq = oct->instr_queue[iq_no];
 	iq = oct->instr_queue[iq_no];
 	if (!iq)
 	if (!iq)
 		return;
 		return;
@@ -514,6 +515,8 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
 
 
 	/* Flush the instruction queue */
 	/* Flush the instruction queue */
 	octeon_flush_iq(oct, iq, 1, 0);
 	octeon_flush_iq(oct, iq, 1, 0);
+
+	lio_enable_irq(NULL, iq);
 }
 }
 
 
 /* Called by the Poll thread at regular intervals to check the instruction
 /* Called by the Poll thread at regular intervals to check the instruction