123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366 |
- /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
- */
- #include "qed_hw.h"
- #include "qed_int.h"
- #include "qed_reg_addr.h"
- #include "qed_sriov.h"
- #include "qed_vf.h"
- bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
- int rel_vf_id, bool b_enabled_only)
- {
- if (!p_hwfn->pf_iov_info) {
- DP_NOTICE(p_hwfn->cdev, "No iov info\n");
- return false;
- }
- if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
- (rel_vf_id < 0))
- return false;
- if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
- b_enabled_only)
- return false;
- return true;
- }
- static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
- {
- struct qed_hw_sriov_info *iov = cdev->p_iov_info;
- int pos = iov->pos;
- DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
- pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
- pci_read_config_word(cdev->pdev,
- pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
- pci_read_config_word(cdev->pdev,
- pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
- pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
- if (iov->num_vfs) {
- DP_VERBOSE(cdev,
- QED_MSG_IOV,
- "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
- iov->num_vfs = 0;
- }
- pci_read_config_word(cdev->pdev,
- pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
- pci_read_config_word(cdev->pdev,
- pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
- pci_read_config_word(cdev->pdev,
- pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
- pci_read_config_dword(cdev->pdev,
- pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
- pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
- pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
- DP_VERBOSE(cdev,
- QED_MSG_IOV,
- "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
- iov->nres,
- iov->cap,
- iov->ctrl,
- iov->total_vfs,
- iov->initial_vfs,
- iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
- /* Some sanity checks */
- if (iov->num_vfs > NUM_OF_VFS(cdev) ||
- iov->total_vfs > NUM_OF_VFS(cdev)) {
- /* This can happen only due to a bug. In this case we set
- * num_vfs to zero to avoid memory corruption in the code that
- * assumes max number of vfs
- */
- DP_NOTICE(cdev,
- "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
- iov->num_vfs);
- iov->num_vfs = 0;
- iov->total_vfs = 0;
- }
- return 0;
- }
- static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
- {
- struct qed_igu_block *p_sb;
- u16 sb_id;
- u32 val;
- if (!p_hwfn->hw_info.p_igu_info) {
- DP_ERR(p_hwfn,
- "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
- return;
- }
- for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
- sb_id++) {
- p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
- if ((p_sb->status & QED_IGU_STATUS_FREE) &&
- !(p_sb->status & QED_IGU_STATUS_PF)) {
- val = qed_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sb_id * 4);
- SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
- qed_wr(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
- }
- }
- }
- static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
- {
- struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
- struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
- struct qed_bulletin_content *p_bulletin_virt;
- dma_addr_t req_p, rply_p, bulletin_p;
- union pfvf_tlvs *p_reply_virt_addr;
- union vfpf_tlvs *p_req_virt_addr;
- u8 idx = 0;
- memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
- p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
- req_p = p_iov_info->mbx_msg_phys_addr;
- p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
- rply_p = p_iov_info->mbx_reply_phys_addr;
- p_bulletin_virt = p_iov_info->p_bulletins;
- bulletin_p = p_iov_info->bulletins_phys;
- if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
- DP_ERR(p_hwfn,
- "qed_iov_setup_vfdb called without allocating mem first\n");
- return;
- }
- for (idx = 0; idx < p_iov->total_vfs; idx++) {
- struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
- u32 concrete;
- vf->vf_mbx.req_virt = p_req_virt_addr + idx;
- vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
- vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
- vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
- vf->state = VF_STOPPED;
- vf->b_init = false;
- vf->bulletin.phys = idx *
- sizeof(struct qed_bulletin_content) +
- bulletin_p;
- vf->bulletin.p_virt = p_bulletin_virt + idx;
- vf->bulletin.size = sizeof(struct qed_bulletin_content);
- vf->relative_vf_id = idx;
- vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
- concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
- vf->concrete_fid = concrete;
- vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
- (vf->abs_vf_id << 8);
- vf->vport_id = idx + 1;
- }
- }
- static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
- {
- struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
- void **p_v_addr;
- u16 num_vfs = 0;
- num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
- DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
- /* Allocate PF Mailbox buffer (per-VF) */
- p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
- p_v_addr = &p_iov_info->mbx_msg_virt_addr;
- *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- p_iov_info->mbx_msg_size,
- &p_iov_info->mbx_msg_phys_addr,
- GFP_KERNEL);
- if (!*p_v_addr)
- return -ENOMEM;
- /* Allocate PF Mailbox Reply buffer (per-VF) */
- p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
- p_v_addr = &p_iov_info->mbx_reply_virt_addr;
- *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- p_iov_info->mbx_reply_size,
- &p_iov_info->mbx_reply_phys_addr,
- GFP_KERNEL);
- if (!*p_v_addr)
- return -ENOMEM;
- p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
- num_vfs;
- p_v_addr = &p_iov_info->p_bulletins;
- *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- p_iov_info->bulletins_size,
- &p_iov_info->bulletins_phys,
- GFP_KERNEL);
- if (!*p_v_addr)
- return -ENOMEM;
- DP_VERBOSE(p_hwfn,
- QED_MSG_IOV,
- "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
- p_iov_info->mbx_msg_virt_addr,
- (u64) p_iov_info->mbx_msg_phys_addr,
- p_iov_info->mbx_reply_virt_addr,
- (u64) p_iov_info->mbx_reply_phys_addr,
- p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
- return 0;
- }
- static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
- {
- struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
- if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- p_iov_info->mbx_msg_size,
- p_iov_info->mbx_msg_virt_addr,
- p_iov_info->mbx_msg_phys_addr);
- if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- p_iov_info->mbx_reply_size,
- p_iov_info->mbx_reply_virt_addr,
- p_iov_info->mbx_reply_phys_addr);
- if (p_iov_info->p_bulletins)
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- p_iov_info->bulletins_size,
- p_iov_info->p_bulletins,
- p_iov_info->bulletins_phys);
- }
- int qed_iov_alloc(struct qed_hwfn *p_hwfn)
- {
- struct qed_pf_iov *p_sriov;
- if (!IS_PF_SRIOV(p_hwfn)) {
- DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "No SR-IOV - no need for IOV db\n");
- return 0;
- }
- p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
- if (!p_sriov) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
- return -ENOMEM;
- }
- p_hwfn->pf_iov_info = p_sriov;
- return qed_iov_allocate_vfdb(p_hwfn);
- }
- void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
- {
- if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
- return;
- qed_iov_setup_vfdb(p_hwfn);
- qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
- }
- void qed_iov_free(struct qed_hwfn *p_hwfn)
- {
- if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
- qed_iov_free_vfdb(p_hwfn);
- kfree(p_hwfn->pf_iov_info);
- }
- }
- void qed_iov_free_hw_info(struct qed_dev *cdev)
- {
- kfree(cdev->p_iov_info);
- cdev->p_iov_info = NULL;
- }
- int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
- {
- struct qed_dev *cdev = p_hwfn->cdev;
- int pos;
- int rc;
- /* Learn the PCI configuration */
- pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
- PCI_EXT_CAP_ID_SRIOV);
- if (!pos) {
- DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
- return 0;
- }
- /* Allocate a new struct for IOV information */
- cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
- if (!cdev->p_iov_info) {
- DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
- return -ENOMEM;
- }
- cdev->p_iov_info->pos = pos;
- rc = qed_iov_pci_cfg_info(cdev);
- if (rc)
- return rc;
- /* We want PF IOV to be synonemous with the existance of p_iov_info;
- * In case the capability is published but there are no VFs, simply
- * de-allocate the struct.
- */
- if (!cdev->p_iov_info->total_vfs) {
- DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "IOV capabilities, but no VFs are published\n");
- kfree(cdev->p_iov_info);
- cdev->p_iov_info = NULL;
- return 0;
- }
- /* Calculate the first VF index - this is a bit tricky; Basically,
- * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
- * after the first engine's VFs.
- */
- cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
- p_hwfn->abs_pf_id - 16;
- if (QED_PATH_ID(p_hwfn))
- cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
- DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "First VF in hwfn 0x%08x\n",
- cdev->p_iov_info->first_vf_in_pf);
- return 0;
- }
- u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
- {
- struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
- u16 i;
- if (!p_iov)
- goto out;
- for (i = rel_vf_id; i < p_iov->total_vfs; i++)
- if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
- return i;
- out:
- return MAX_NUM_VFS;
- }
|