qed_sriov.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include "qed_hw.h"
  9. #include "qed_int.h"
  10. #include "qed_reg_addr.h"
  11. #include "qed_sriov.h"
  12. #include "qed_vf.h"
  13. bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
  14. int rel_vf_id, bool b_enabled_only)
  15. {
  16. if (!p_hwfn->pf_iov_info) {
  17. DP_NOTICE(p_hwfn->cdev, "No iov info\n");
  18. return false;
  19. }
  20. if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
  21. (rel_vf_id < 0))
  22. return false;
  23. if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
  24. b_enabled_only)
  25. return false;
  26. return true;
  27. }
  28. static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
  29. {
  30. struct qed_hw_sriov_info *iov = cdev->p_iov_info;
  31. int pos = iov->pos;
  32. DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
  33. pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
  34. pci_read_config_word(cdev->pdev,
  35. pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
  36. pci_read_config_word(cdev->pdev,
  37. pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
  38. pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
  39. if (iov->num_vfs) {
  40. DP_VERBOSE(cdev,
  41. QED_MSG_IOV,
  42. "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
  43. iov->num_vfs = 0;
  44. }
  45. pci_read_config_word(cdev->pdev,
  46. pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
  47. pci_read_config_word(cdev->pdev,
  48. pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
  49. pci_read_config_word(cdev->pdev,
  50. pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
  51. pci_read_config_dword(cdev->pdev,
  52. pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
  53. pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
  54. pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
  55. DP_VERBOSE(cdev,
  56. QED_MSG_IOV,
  57. "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
  58. iov->nres,
  59. iov->cap,
  60. iov->ctrl,
  61. iov->total_vfs,
  62. iov->initial_vfs,
  63. iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
  64. /* Some sanity checks */
  65. if (iov->num_vfs > NUM_OF_VFS(cdev) ||
  66. iov->total_vfs > NUM_OF_VFS(cdev)) {
  67. /* This can happen only due to a bug. In this case we set
  68. * num_vfs to zero to avoid memory corruption in the code that
  69. * assumes max number of vfs
  70. */
  71. DP_NOTICE(cdev,
  72. "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
  73. iov->num_vfs);
  74. iov->num_vfs = 0;
  75. iov->total_vfs = 0;
  76. }
  77. return 0;
  78. }
  79. static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
  80. struct qed_ptt *p_ptt)
  81. {
  82. struct qed_igu_block *p_sb;
  83. u16 sb_id;
  84. u32 val;
  85. if (!p_hwfn->hw_info.p_igu_info) {
  86. DP_ERR(p_hwfn,
  87. "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
  88. return;
  89. }
  90. for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
  91. sb_id++) {
  92. p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
  93. if ((p_sb->status & QED_IGU_STATUS_FREE) &&
  94. !(p_sb->status & QED_IGU_STATUS_PF)) {
  95. val = qed_rd(p_hwfn, p_ptt,
  96. IGU_REG_MAPPING_MEMORY + sb_id * 4);
  97. SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
  98. qed_wr(p_hwfn, p_ptt,
  99. IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
  100. }
  101. }
  102. }
  103. static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
  104. {
  105. struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
  106. struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
  107. struct qed_bulletin_content *p_bulletin_virt;
  108. dma_addr_t req_p, rply_p, bulletin_p;
  109. union pfvf_tlvs *p_reply_virt_addr;
  110. union vfpf_tlvs *p_req_virt_addr;
  111. u8 idx = 0;
  112. memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
  113. p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
  114. req_p = p_iov_info->mbx_msg_phys_addr;
  115. p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
  116. rply_p = p_iov_info->mbx_reply_phys_addr;
  117. p_bulletin_virt = p_iov_info->p_bulletins;
  118. bulletin_p = p_iov_info->bulletins_phys;
  119. if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
  120. DP_ERR(p_hwfn,
  121. "qed_iov_setup_vfdb called without allocating mem first\n");
  122. return;
  123. }
  124. for (idx = 0; idx < p_iov->total_vfs; idx++) {
  125. struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
  126. u32 concrete;
  127. vf->vf_mbx.req_virt = p_req_virt_addr + idx;
  128. vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
  129. vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
  130. vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
  131. vf->state = VF_STOPPED;
  132. vf->b_init = false;
  133. vf->bulletin.phys = idx *
  134. sizeof(struct qed_bulletin_content) +
  135. bulletin_p;
  136. vf->bulletin.p_virt = p_bulletin_virt + idx;
  137. vf->bulletin.size = sizeof(struct qed_bulletin_content);
  138. vf->relative_vf_id = idx;
  139. vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
  140. concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
  141. vf->concrete_fid = concrete;
  142. vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
  143. (vf->abs_vf_id << 8);
  144. vf->vport_id = idx + 1;
  145. }
  146. }
  147. static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
  148. {
  149. struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
  150. void **p_v_addr;
  151. u16 num_vfs = 0;
  152. num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
  153. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  154. "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
  155. /* Allocate PF Mailbox buffer (per-VF) */
  156. p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
  157. p_v_addr = &p_iov_info->mbx_msg_virt_addr;
  158. *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  159. p_iov_info->mbx_msg_size,
  160. &p_iov_info->mbx_msg_phys_addr,
  161. GFP_KERNEL);
  162. if (!*p_v_addr)
  163. return -ENOMEM;
  164. /* Allocate PF Mailbox Reply buffer (per-VF) */
  165. p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
  166. p_v_addr = &p_iov_info->mbx_reply_virt_addr;
  167. *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  168. p_iov_info->mbx_reply_size,
  169. &p_iov_info->mbx_reply_phys_addr,
  170. GFP_KERNEL);
  171. if (!*p_v_addr)
  172. return -ENOMEM;
  173. p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
  174. num_vfs;
  175. p_v_addr = &p_iov_info->p_bulletins;
  176. *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  177. p_iov_info->bulletins_size,
  178. &p_iov_info->bulletins_phys,
  179. GFP_KERNEL);
  180. if (!*p_v_addr)
  181. return -ENOMEM;
  182. DP_VERBOSE(p_hwfn,
  183. QED_MSG_IOV,
  184. "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
  185. p_iov_info->mbx_msg_virt_addr,
  186. (u64) p_iov_info->mbx_msg_phys_addr,
  187. p_iov_info->mbx_reply_virt_addr,
  188. (u64) p_iov_info->mbx_reply_phys_addr,
  189. p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
  190. return 0;
  191. }
  192. static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
  193. {
  194. struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
  195. if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
  196. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  197. p_iov_info->mbx_msg_size,
  198. p_iov_info->mbx_msg_virt_addr,
  199. p_iov_info->mbx_msg_phys_addr);
  200. if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
  201. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  202. p_iov_info->mbx_reply_size,
  203. p_iov_info->mbx_reply_virt_addr,
  204. p_iov_info->mbx_reply_phys_addr);
  205. if (p_iov_info->p_bulletins)
  206. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  207. p_iov_info->bulletins_size,
  208. p_iov_info->p_bulletins,
  209. p_iov_info->bulletins_phys);
  210. }
  211. int qed_iov_alloc(struct qed_hwfn *p_hwfn)
  212. {
  213. struct qed_pf_iov *p_sriov;
  214. if (!IS_PF_SRIOV(p_hwfn)) {
  215. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  216. "No SR-IOV - no need for IOV db\n");
  217. return 0;
  218. }
  219. p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
  220. if (!p_sriov) {
  221. DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
  222. return -ENOMEM;
  223. }
  224. p_hwfn->pf_iov_info = p_sriov;
  225. return qed_iov_allocate_vfdb(p_hwfn);
  226. }
  227. void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  228. {
  229. if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
  230. return;
  231. qed_iov_setup_vfdb(p_hwfn);
  232. qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
  233. }
  234. void qed_iov_free(struct qed_hwfn *p_hwfn)
  235. {
  236. if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
  237. qed_iov_free_vfdb(p_hwfn);
  238. kfree(p_hwfn->pf_iov_info);
  239. }
  240. }
  241. void qed_iov_free_hw_info(struct qed_dev *cdev)
  242. {
  243. kfree(cdev->p_iov_info);
  244. cdev->p_iov_info = NULL;
  245. }
  246. int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
  247. {
  248. struct qed_dev *cdev = p_hwfn->cdev;
  249. int pos;
  250. int rc;
  251. /* Learn the PCI configuration */
  252. pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
  253. PCI_EXT_CAP_ID_SRIOV);
  254. if (!pos) {
  255. DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
  256. return 0;
  257. }
  258. /* Allocate a new struct for IOV information */
  259. cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
  260. if (!cdev->p_iov_info) {
  261. DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
  262. return -ENOMEM;
  263. }
  264. cdev->p_iov_info->pos = pos;
  265. rc = qed_iov_pci_cfg_info(cdev);
  266. if (rc)
  267. return rc;
  268. /* We want PF IOV to be synonemous with the existance of p_iov_info;
  269. * In case the capability is published but there are no VFs, simply
  270. * de-allocate the struct.
  271. */
  272. if (!cdev->p_iov_info->total_vfs) {
  273. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  274. "IOV capabilities, but no VFs are published\n");
  275. kfree(cdev->p_iov_info);
  276. cdev->p_iov_info = NULL;
  277. return 0;
  278. }
  279. /* Calculate the first VF index - this is a bit tricky; Basically,
  280. * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
  281. * after the first engine's VFs.
  282. */
  283. cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
  284. p_hwfn->abs_pf_id - 16;
  285. if (QED_PATH_ID(p_hwfn))
  286. cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
  287. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  288. "First VF in hwfn 0x%08x\n",
  289. cdev->p_iov_info->first_vf_in_pf);
  290. return 0;
  291. }
  292. u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
  293. {
  294. struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
  295. u16 i;
  296. if (!p_iov)
  297. goto out;
  298. for (i = rel_vf_id; i < p_iov->total_vfs; i++)
  299. if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
  300. return i;
  301. out:
  302. return MAX_NUM_VFS;
  303. }