|
@@ -1736,10 +1736,11 @@ EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
|
|
|
|
|
|
int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
|
|
|
{
|
|
|
- struct adapter *adap;
|
|
|
- u32 offset, memtype, memaddr;
|
|
|
u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
|
|
|
u32 edc0_end, edc1_end, mc0_end, mc1_end;
|
|
|
+ u32 offset, memtype, memaddr;
|
|
|
+ struct adapter *adap;
|
|
|
+ u32 hma_size = 0;
|
|
|
int ret;
|
|
|
|
|
|
adap = netdev2adap(dev);
|
|
@@ -1759,6 +1760,10 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
|
|
|
size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
|
|
|
mc0_size = EXT_MEM0_SIZE_G(size) << 20;
|
|
|
|
|
|
+ if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
|
|
|
+ size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
|
|
|
+ hma_size = EXT_MEM1_SIZE_G(size) << 20;
|
|
|
+ }
|
|
|
edc0_end = edc0_size;
|
|
|
edc1_end = edc0_end + edc1_size;
|
|
|
mc0_end = edc1_end + mc0_size;
|
|
@@ -1770,7 +1775,10 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
|
|
|
memtype = MEM_EDC1;
|
|
|
memaddr = offset - edc0_end;
|
|
|
} else {
|
|
|
- if (offset < mc0_end) {
|
|
|
+ if (hma_size && (offset < (edc1_end + hma_size))) {
|
|
|
+ memtype = MEM_HMA;
|
|
|
+ memaddr = offset - edc1_end;
|
|
|
+ } else if (offset < mc0_end) {
|
|
|
memtype = MEM_MC0;
|
|
|
memaddr = offset - edc1_end;
|
|
|
} else if (is_t5(adap->params.chip)) {
|
|
@@ -3301,6 +3309,206 @@ static void setup_memwin_rdma(struct adapter *adap)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/* HMA Definitions */
|
|
|
+
|
|
|
+/* The maximum number of address that can be send in a single FW cmd */
|
|
|
+#define HMA_MAX_ADDR_IN_CMD 5
|
|
|
+
|
|
|
+#define HMA_PAGE_SIZE PAGE_SIZE
|
|
|
+
|
|
|
+#define HMA_MAX_NO_FW_ADDRESS (16 << 10) /* FW supports 16K addresses */
|
|
|
+
|
|
|
+#define HMA_PAGE_ORDER \
|
|
|
+ ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \
|
|
|
+ ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
|
|
|
+
|
|
|
+/* The minimum and maximum possible HMA sizes that can be specified in the FW
|
|
|
+ * configuration(in units of MB).
|
|
|
+ */
|
|
|
+#define HMA_MIN_TOTAL_SIZE 1
|
|
|
+#define HMA_MAX_TOTAL_SIZE \
|
|
|
+ (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \
|
|
|
+ HMA_MAX_NO_FW_ADDRESS) >> 20)
|
|
|
+
|
|
|
+static void adap_free_hma_mem(struct adapter *adapter)
|
|
|
+{
|
|
|
+ struct scatterlist *iter;
|
|
|
+ struct page *page;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ if (!adapter->hma.sgt)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
|
|
|
+ dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
|
|
|
+ adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
|
|
|
+ adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
|
|
|
+ }
|
|
|
+
|
|
|
+ for_each_sg(adapter->hma.sgt->sgl, iter,
|
|
|
+ adapter->hma.sgt->orig_nents, i) {
|
|
|
+ page = sg_page(iter);
|
|
|
+ if (page)
|
|
|
+ __free_pages(page, HMA_PAGE_ORDER);
|
|
|
+ }
|
|
|
+
|
|
|
+ kfree(adapter->hma.phy_addr);
|
|
|
+ sg_free_table(adapter->hma.sgt);
|
|
|
+ kfree(adapter->hma.sgt);
|
|
|
+ adapter->hma.sgt = NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static int adap_config_hma(struct adapter *adapter)
|
|
|
+{
|
|
|
+ struct scatterlist *sgl, *iter;
|
|
|
+ struct sg_table *sgt;
|
|
|
+ struct page *newpage;
|
|
|
+ unsigned int i, j, k;
|
|
|
+ u32 param, hma_size;
|
|
|
+ unsigned int ncmds;
|
|
|
+ size_t page_size;
|
|
|
+ u32 page_order;
|
|
|
+ int node, ret;
|
|
|
+
|
|
|
+ /* HMA is supported only for T6+ cards.
|
|
|
+ * Avoid initializing HMA in kdump kernels.
|
|
|
+ */
|
|
|
+ if (is_kdump_kernel() ||
|
|
|
+ CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* Get the HMA region size required by fw */
|
|
|
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
|
|
|
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
|
|
|
+ ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
|
|
|
+ 1, ¶m, &hma_size);
|
|
|
+ /* An error means card has its own memory or HMA is not supported by
|
|
|
+ * the firmware. Return without any errors.
|
|
|
+ */
|
|
|
+ if (ret || !hma_size)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (hma_size < HMA_MIN_TOTAL_SIZE ||
|
|
|
+ hma_size > HMA_MAX_TOTAL_SIZE) {
|
|
|
+ dev_err(adapter->pdev_dev,
|
|
|
+ "HMA size %uMB beyond bounds(%u-%lu)MB\n",
|
|
|
+ hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ page_size = HMA_PAGE_SIZE;
|
|
|
+ page_order = HMA_PAGE_ORDER;
|
|
|
+ adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
|
|
|
+ if (unlikely(!adapter->hma.sgt)) {
|
|
|
+ dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ sgt = adapter->hma.sgt;
|
|
|
+ /* FW returned value will be in MB's
|
|
|
+ */
|
|
|
+ sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
|
|
|
+ if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
|
|
|
+ dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
|
|
|
+ kfree(adapter->hma.sgt);
|
|
|
+ adapter->hma.sgt = NULL;
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ sgl = adapter->hma.sgt->sgl;
|
|
|
+ node = dev_to_node(adapter->pdev_dev);
|
|
|
+ for_each_sg(sgl, iter, sgt->orig_nents, i) {
|
|
|
+ newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL,
|
|
|
+ page_order);
|
|
|
+ if (!newpage) {
|
|
|
+ dev_err(adapter->pdev_dev,
|
|
|
+ "Not enough memory for HMA page allocation\n");
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto free_hma;
|
|
|
+ }
|
|
|
+ sg_set_page(iter, newpage, page_size << page_order, 0);
|
|
|
+ }
|
|
|
+
|
|
|
+ sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
|
|
|
+ DMA_BIDIRECTIONAL);
|
|
|
+ if (!sgt->nents) {
|
|
|
+ dev_err(adapter->pdev_dev,
|
|
|
+ "Not enough memory for HMA DMA mapping");
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto free_hma;
|
|
|
+ }
|
|
|
+ adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
|
|
|
+
|
|
|
+ adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (unlikely(!adapter->hma.phy_addr))
|
|
|
+ goto free_hma;
|
|
|
+
|
|
|
+ for_each_sg(sgl, iter, sgt->nents, i) {
|
|
|
+ newpage = sg_page(iter);
|
|
|
+ adapter->hma.phy_addr[i] = sg_dma_address(iter);
|
|
|
+ }
|
|
|
+
|
|
|
+ ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
|
|
|
+ /* Pass on the addresses to firmware */
|
|
|
+ for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
|
|
|
+ struct fw_hma_cmd hma_cmd;
|
|
|
+ u8 naddr = HMA_MAX_ADDR_IN_CMD;
|
|
|
+ u8 soc = 0, eoc = 0;
|
|
|
+ u8 hma_mode = 1; /* Presently we support only Page table mode */
|
|
|
+
|
|
|
+ soc = (i == 0) ? 1 : 0;
|
|
|
+ eoc = (i == ncmds - 1) ? 1 : 0;
|
|
|
+
|
|
|
+ /* For last cmd, set naddr corresponding to remaining
|
|
|
+ * addresses
|
|
|
+ */
|
|
|
+ if (i == ncmds - 1) {
|
|
|
+ naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
|
|
|
+ naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
|
|
|
+ }
|
|
|
+ memset(&hma_cmd, 0, sizeof(hma_cmd));
|
|
|
+ hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
|
|
|
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
|
|
|
+ hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));
|
|
|
+
|
|
|
+ hma_cmd.mode_to_pcie_params =
|
|
|
+ htonl(FW_HMA_CMD_MODE_V(hma_mode) |
|
|
|
+ FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));
|
|
|
+
|
|
|
+ /* HMA cmd size specified in MB's */
|
|
|
+ hma_cmd.naddr_size =
|
|
|
+ htonl(FW_HMA_CMD_SIZE_V(hma_size) |
|
|
|
+ FW_HMA_CMD_NADDR_V(naddr));
|
|
|
+
|
|
|
+ /* Total Page size specified in units of 4K */
|
|
|
+ hma_cmd.addr_size_pkd =
|
|
|
+ htonl(FW_HMA_CMD_ADDR_SIZE_V
|
|
|
+ ((page_size << page_order) >> 12));
|
|
|
+
|
|
|
+ /* Fill the 5 addresses */
|
|
|
+ for (j = 0; j < naddr; j++) {
|
|
|
+ hma_cmd.phy_address[j] =
|
|
|
+ cpu_to_be64(adapter->hma.phy_addr[j + k]);
|
|
|
+ }
|
|
|
+ ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
|
|
|
+ sizeof(hma_cmd), &hma_cmd);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(adapter->pdev_dev,
|
|
|
+ "HMA FW command failed with err %d\n", ret);
|
|
|
+ goto free_hma;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!ret)
|
|
|
+ dev_info(adapter->pdev_dev,
|
|
|
+ "Reserved %uMB host memory for HMA\n", hma_size);
|
|
|
+ return ret;
|
|
|
+
|
|
|
+free_hma:
|
|
|
+ adap_free_hma_mem(adapter);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
|
|
|
{
|
|
|
u32 v;
|
|
@@ -3754,6 +3962,12 @@ static int adap_init0_config(struct adapter *adapter, int reset)
|
|
|
if (ret < 0)
|
|
|
goto bye;
|
|
|
|
|
|
+ /* We will proceed even if HMA init fails. */
|
|
|
+ ret = adap_config_hma(adapter);
|
|
|
+ if (ret)
|
|
|
+ dev_err(adapter->pdev_dev,
|
|
|
+ "HMA configuration failed with error %d\n", ret);
|
|
|
+
|
|
|
/*
|
|
|
* And finally tell the firmware to initialize itself using the
|
|
|
* parameters from the Configuration File.
|
|
@@ -3960,6 +4174,11 @@ static int adap_init0(struct adapter *adap)
|
|
|
* effect. Otherwise, it's time to try initializing the adapter.
|
|
|
*/
|
|
|
if (state == DEV_STATE_INIT) {
|
|
|
+ ret = adap_config_hma(adap);
|
|
|
+ if (ret)
|
|
|
+ dev_err(adap->pdev_dev,
|
|
|
+ "HMA configuration failed with error %d\n",
|
|
|
+ ret);
|
|
|
dev_info(adap->pdev_dev, "Coming up as %s: "\
|
|
|
"Adapter already initialized\n",
|
|
|
adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
|
|
@@ -4349,6 +4568,7 @@ static int adap_init0(struct adapter *adap)
|
|
|
* happened to HW/FW, stop issuing commands.
|
|
|
*/
|
|
|
bye:
|
|
|
+ adap_free_hma_mem(adap);
|
|
|
kfree(adap->sge.egr_map);
|
|
|
kfree(adap->sge.ingr_map);
|
|
|
kfree(adap->sge.starving_fl);
|
|
@@ -5576,6 +5796,8 @@ static void remove_one(struct pci_dev *pdev)
|
|
|
t4_uld_clean_up(adapter);
|
|
|
}
|
|
|
|
|
|
+ adap_free_hma_mem(adapter);
|
|
|
+
|
|
|
disable_interrupts(adapter);
|
|
|
|
|
|
for_each_port(adapter, i)
|