pci_clp.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. /*
  2. * Copyright IBM Corp. 2012
  3. *
  4. * Author(s):
  5. * Jan Glauber <jang@linux.vnet.ibm.com>
  6. */
  7. #define KMSG_COMPONENT "zpci"
  8. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9. #include <linux/compat.h>
  10. #include <linux/kernel.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/slab.h>
  13. #include <linux/err.h>
  14. #include <linux/delay.h>
  15. #include <linux/pci.h>
  16. #include <linux/uaccess.h>
  17. #include <asm/pci_debug.h>
  18. #include <asm/pci_clp.h>
  19. #include <asm/compat.h>
  20. #include <asm/clp.h>
  21. #include <uapi/asm/clp.h>
  22. bool zpci_unique_uid;
  23. static inline void zpci_err_clp(unsigned int rsp, int rc)
  24. {
  25. struct {
  26. unsigned int rsp;
  27. int rc;
  28. } __packed data = {rsp, rc};
  29. zpci_err_hex(&data, sizeof(data));
  30. }
  31. /*
  32. * Call Logical Processor with c=1, lps=0 and command 1
  33. * to get the bit mask of installed logical processors
  34. */
  35. static inline int clp_get_ilp(unsigned long *ilp)
  36. {
  37. unsigned long mask;
  38. int cc = 3;
  39. asm volatile (
  40. " .insn rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
  41. "0: ipm %[cc]\n"
  42. " srl %[cc],28\n"
  43. "1:\n"
  44. EX_TABLE(0b, 1b)
  45. : [cc] "+d" (cc), [mask] "=d" (mask) : [cmd] "a" (1)
  46. : "cc");
  47. *ilp = mask;
  48. return cc;
  49. }
  50. /*
  51. * Call Logical Processor with c=0, the give constant lps and an lpcb request.
  52. */
  53. static inline int clp_req(void *data, unsigned int lps)
  54. {
  55. struct { u8 _[CLP_BLK_SIZE]; } *req = data;
  56. u64 ignored;
  57. int cc = 3;
  58. asm volatile (
  59. " .insn rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
  60. "0: ipm %[cc]\n"
  61. " srl %[cc],28\n"
  62. "1:\n"
  63. EX_TABLE(0b, 1b)
  64. : [cc] "+d" (cc), [ign] "=d" (ignored), "+m" (*req)
  65. : [req] "a" (req), [lps] "i" (lps)
  66. : "cc");
  67. return cc;
  68. }
  69. static void *clp_alloc_block(gfp_t gfp_mask)
  70. {
  71. return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
  72. }
  73. static void clp_free_block(void *ptr)
  74. {
  75. free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
  76. }
  77. static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
  78. struct clp_rsp_query_pci_grp *response)
  79. {
  80. zdev->tlb_refresh = response->refresh;
  81. zdev->dma_mask = response->dasm;
  82. zdev->msi_addr = response->msia;
  83. zdev->max_msi = response->noi;
  84. zdev->fmb_update = response->mui;
  85. switch (response->version) {
  86. case 1:
  87. zdev->max_bus_speed = PCIE_SPEED_5_0GT;
  88. break;
  89. default:
  90. zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
  91. break;
  92. }
  93. }
  94. static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
  95. {
  96. struct clp_req_rsp_query_pci_grp *rrb;
  97. int rc;
  98. rrb = clp_alloc_block(GFP_KERNEL);
  99. if (!rrb)
  100. return -ENOMEM;
  101. memset(rrb, 0, sizeof(*rrb));
  102. rrb->request.hdr.len = sizeof(rrb->request);
  103. rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
  104. rrb->response.hdr.len = sizeof(rrb->response);
  105. rrb->request.pfgid = pfgid;
  106. rc = clp_req(rrb, CLP_LPS_PCI);
  107. if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
  108. clp_store_query_pci_fngrp(zdev, &rrb->response);
  109. else {
  110. zpci_err("Q PCI FGRP:\n");
  111. zpci_err_clp(rrb->response.hdr.rsp, rc);
  112. rc = -EIO;
  113. }
  114. clp_free_block(rrb);
  115. return rc;
  116. }
  117. static int clp_store_query_pci_fn(struct zpci_dev *zdev,
  118. struct clp_rsp_query_pci *response)
  119. {
  120. int i;
  121. for (i = 0; i < PCI_BAR_COUNT; i++) {
  122. zdev->bars[i].val = le32_to_cpu(response->bar[i]);
  123. zdev->bars[i].size = response->bar_size[i];
  124. }
  125. zdev->start_dma = response->sdma;
  126. zdev->end_dma = response->edma;
  127. zdev->pchid = response->pchid;
  128. zdev->pfgid = response->pfgid;
  129. zdev->pft = response->pft;
  130. zdev->vfn = response->vfn;
  131. zdev->uid = response->uid;
  132. zdev->fmb_length = sizeof(u32) * response->fmb_len;
  133. memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
  134. if (response->util_str_avail) {
  135. memcpy(zdev->util_str, response->util_str,
  136. sizeof(zdev->util_str));
  137. }
  138. return 0;
  139. }
  140. static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
  141. {
  142. struct clp_req_rsp_query_pci *rrb;
  143. int rc;
  144. rrb = clp_alloc_block(GFP_KERNEL);
  145. if (!rrb)
  146. return -ENOMEM;
  147. memset(rrb, 0, sizeof(*rrb));
  148. rrb->request.hdr.len = sizeof(rrb->request);
  149. rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
  150. rrb->response.hdr.len = sizeof(rrb->response);
  151. rrb->request.fh = fh;
  152. rc = clp_req(rrb, CLP_LPS_PCI);
  153. if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
  154. rc = clp_store_query_pci_fn(zdev, &rrb->response);
  155. if (rc)
  156. goto out;
  157. rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
  158. } else {
  159. zpci_err("Q PCI FN:\n");
  160. zpci_err_clp(rrb->response.hdr.rsp, rc);
  161. rc = -EIO;
  162. }
  163. out:
  164. clp_free_block(rrb);
  165. return rc;
  166. }
  167. int clp_add_pci_device(u32 fid, u32 fh, int configured)
  168. {
  169. struct zpci_dev *zdev;
  170. int rc;
  171. zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
  172. zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
  173. if (!zdev)
  174. return -ENOMEM;
  175. zdev->fh = fh;
  176. zdev->fid = fid;
  177. /* Query function properties and update zdev */
  178. rc = clp_query_pci_fn(zdev, fh);
  179. if (rc)
  180. goto error;
  181. if (configured)
  182. zdev->state = ZPCI_FN_STATE_CONFIGURED;
  183. else
  184. zdev->state = ZPCI_FN_STATE_STANDBY;
  185. rc = zpci_create_device(zdev);
  186. if (rc)
  187. goto error;
  188. return 0;
  189. error:
  190. kfree(zdev);
  191. return rc;
  192. }
  193. /*
  194. * Enable/Disable a given PCI function defined by its function handle.
  195. */
  196. static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
  197. {
  198. struct clp_req_rsp_set_pci *rrb;
  199. int rc, retries = 100;
  200. rrb = clp_alloc_block(GFP_KERNEL);
  201. if (!rrb)
  202. return -ENOMEM;
  203. do {
  204. memset(rrb, 0, sizeof(*rrb));
  205. rrb->request.hdr.len = sizeof(rrb->request);
  206. rrb->request.hdr.cmd = CLP_SET_PCI_FN;
  207. rrb->response.hdr.len = sizeof(rrb->response);
  208. rrb->request.fh = *fh;
  209. rrb->request.oc = command;
  210. rrb->request.ndas = nr_dma_as;
  211. rc = clp_req(rrb, CLP_LPS_PCI);
  212. if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
  213. retries--;
  214. if (retries < 0)
  215. break;
  216. msleep(20);
  217. }
  218. } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
  219. if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
  220. *fh = rrb->response.fh;
  221. else {
  222. zpci_err("Set PCI FN:\n");
  223. zpci_err_clp(rrb->response.hdr.rsp, rc);
  224. rc = -EIO;
  225. }
  226. clp_free_block(rrb);
  227. return rc;
  228. }
  229. int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
  230. {
  231. u32 fh = zdev->fh;
  232. int rc;
  233. rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
  234. if (!rc)
  235. /* Success -> store enabled handle in zdev */
  236. zdev->fh = fh;
  237. zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
  238. return rc;
  239. }
  240. int clp_disable_fh(struct zpci_dev *zdev)
  241. {
  242. u32 fh = zdev->fh;
  243. int rc;
  244. if (!zdev_enabled(zdev))
  245. return 0;
  246. rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
  247. if (!rc)
  248. /* Success -> store disabled handle in zdev */
  249. zdev->fh = fh;
  250. zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
  251. return rc;
  252. }
  253. static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
  254. void (*cb)(struct clp_fh_list_entry *entry))
  255. {
  256. u64 resume_token = 0;
  257. int entries, i, rc;
  258. do {
  259. memset(rrb, 0, sizeof(*rrb));
  260. rrb->request.hdr.len = sizeof(rrb->request);
  261. rrb->request.hdr.cmd = CLP_LIST_PCI;
  262. /* store as many entries as possible */
  263. rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
  264. rrb->request.resume_token = resume_token;
  265. /* Get PCI function handle list */
  266. rc = clp_req(rrb, CLP_LPS_PCI);
  267. if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
  268. zpci_err("List PCI FN:\n");
  269. zpci_err_clp(rrb->response.hdr.rsp, rc);
  270. rc = -EIO;
  271. goto out;
  272. }
  273. zpci_unique_uid = rrb->response.uid_checking;
  274. WARN_ON_ONCE(rrb->response.entry_size !=
  275. sizeof(struct clp_fh_list_entry));
  276. entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
  277. rrb->response.entry_size;
  278. resume_token = rrb->response.resume_token;
  279. for (i = 0; i < entries; i++)
  280. cb(&rrb->response.fh_list[i]);
  281. } while (resume_token);
  282. out:
  283. return rc;
  284. }
  285. static void __clp_add(struct clp_fh_list_entry *entry)
  286. {
  287. if (!entry->vendor_id)
  288. return;
  289. clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
  290. }
  291. static void __clp_rescan(struct clp_fh_list_entry *entry)
  292. {
  293. struct zpci_dev *zdev;
  294. if (!entry->vendor_id)
  295. return;
  296. zdev = get_zdev_by_fid(entry->fid);
  297. if (!zdev) {
  298. clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
  299. return;
  300. }
  301. if (!entry->config_state) {
  302. /*
  303. * The handle is already disabled, that means no iota/irq freeing via
  304. * the firmware interfaces anymore. Need to free resources manually
  305. * (DMA memory, debug, sysfs)...
  306. */
  307. zpci_stop_device(zdev);
  308. }
  309. }
  310. static void __clp_update(struct clp_fh_list_entry *entry)
  311. {
  312. struct zpci_dev *zdev;
  313. if (!entry->vendor_id)
  314. return;
  315. zdev = get_zdev_by_fid(entry->fid);
  316. if (!zdev)
  317. return;
  318. zdev->fh = entry->fh;
  319. }
  320. int clp_scan_pci_devices(void)
  321. {
  322. struct clp_req_rsp_list_pci *rrb;
  323. int rc;
  324. rrb = clp_alloc_block(GFP_KERNEL);
  325. if (!rrb)
  326. return -ENOMEM;
  327. rc = clp_list_pci(rrb, __clp_add);
  328. clp_free_block(rrb);
  329. return rc;
  330. }
  331. int clp_rescan_pci_devices(void)
  332. {
  333. struct clp_req_rsp_list_pci *rrb;
  334. int rc;
  335. rrb = clp_alloc_block(GFP_KERNEL);
  336. if (!rrb)
  337. return -ENOMEM;
  338. rc = clp_list_pci(rrb, __clp_rescan);
  339. clp_free_block(rrb);
  340. return rc;
  341. }
  342. int clp_rescan_pci_devices_simple(void)
  343. {
  344. struct clp_req_rsp_list_pci *rrb;
  345. int rc;
  346. rrb = clp_alloc_block(GFP_NOWAIT);
  347. if (!rrb)
  348. return -ENOMEM;
  349. rc = clp_list_pci(rrb, __clp_update);
  350. clp_free_block(rrb);
  351. return rc;
  352. }
  353. static int clp_base_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
  354. {
  355. unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
  356. if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
  357. lpcb->response.hdr.len > limit)
  358. return -EINVAL;
  359. return clp_req(lpcb, CLP_LPS_BASE) ? -EOPNOTSUPP : 0;
  360. }
  361. static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb)
  362. {
  363. switch (lpcb->cmd) {
  364. case 0x0001: /* store logical-processor characteristics */
  365. return clp_base_slpc(req, (void *) lpcb);
  366. default:
  367. return -EINVAL;
  368. }
  369. }
  370. static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
  371. {
  372. unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
  373. if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
  374. lpcb->response.hdr.len > limit)
  375. return -EINVAL;
  376. return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
  377. }
  378. static int clp_pci_list(struct clp_req *req, struct clp_req_rsp_list_pci *lpcb)
  379. {
  380. unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
  381. if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
  382. lpcb->response.hdr.len > limit)
  383. return -EINVAL;
  384. if (lpcb->request.reserved2 != 0)
  385. return -EINVAL;
  386. return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
  387. }
  388. static int clp_pci_query(struct clp_req *req,
  389. struct clp_req_rsp_query_pci *lpcb)
  390. {
  391. unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
  392. if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
  393. lpcb->response.hdr.len > limit)
  394. return -EINVAL;
  395. if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0)
  396. return -EINVAL;
  397. return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
  398. }
  399. static int clp_pci_query_grp(struct clp_req *req,
  400. struct clp_req_rsp_query_pci_grp *lpcb)
  401. {
  402. unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
  403. if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
  404. lpcb->response.hdr.len > limit)
  405. return -EINVAL;
  406. if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0 ||
  407. lpcb->request.reserved4 != 0)
  408. return -EINVAL;
  409. return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
  410. }
  411. static int clp_pci_command(struct clp_req *req, struct clp_req_hdr *lpcb)
  412. {
  413. switch (lpcb->cmd) {
  414. case 0x0001: /* store logical-processor characteristics */
  415. return clp_pci_slpc(req, (void *) lpcb);
  416. case 0x0002: /* list PCI functions */
  417. return clp_pci_list(req, (void *) lpcb);
  418. case 0x0003: /* query PCI function */
  419. return clp_pci_query(req, (void *) lpcb);
  420. case 0x0004: /* query PCI function group */
  421. return clp_pci_query_grp(req, (void *) lpcb);
  422. default:
  423. return -EINVAL;
  424. }
  425. }
  426. static int clp_normal_command(struct clp_req *req)
  427. {
  428. struct clp_req_hdr *lpcb;
  429. void __user *uptr;
  430. int rc;
  431. rc = -EINVAL;
  432. if (req->lps != 0 && req->lps != 2)
  433. goto out;
  434. rc = -ENOMEM;
  435. lpcb = clp_alloc_block(GFP_KERNEL);
  436. if (!lpcb)
  437. goto out;
  438. rc = -EFAULT;
  439. uptr = (void __force __user *)(unsigned long) req->data_p;
  440. if (copy_from_user(lpcb, uptr, PAGE_SIZE) != 0)
  441. goto out_free;
  442. rc = -EINVAL;
  443. if (lpcb->fmt != 0 || lpcb->reserved1 != 0 || lpcb->reserved2 != 0)
  444. goto out_free;
  445. switch (req->lps) {
  446. case 0:
  447. rc = clp_base_command(req, lpcb);
  448. break;
  449. case 2:
  450. rc = clp_pci_command(req, lpcb);
  451. break;
  452. }
  453. if (rc)
  454. goto out_free;
  455. rc = -EFAULT;
  456. if (copy_to_user(uptr, lpcb, PAGE_SIZE) != 0)
  457. goto out_free;
  458. rc = 0;
  459. out_free:
  460. clp_free_block(lpcb);
  461. out:
  462. return rc;
  463. }
  464. static int clp_immediate_command(struct clp_req *req)
  465. {
  466. void __user *uptr;
  467. unsigned long ilp;
  468. int exists;
  469. if (req->cmd > 1 || clp_get_ilp(&ilp) != 0)
  470. return -EINVAL;
  471. uptr = (void __force __user *)(unsigned long) req->data_p;
  472. if (req->cmd == 0) {
  473. /* Command code 0: test for a specific processor */
  474. exists = test_bit_inv(req->lps, &ilp);
  475. return put_user(exists, (int __user *) uptr);
  476. }
  477. /* Command code 1: return bit mask of installed processors */
  478. return put_user(ilp, (unsigned long __user *) uptr);
  479. }
  480. static long clp_misc_ioctl(struct file *filp, unsigned int cmd,
  481. unsigned long arg)
  482. {
  483. struct clp_req req;
  484. void __user *argp;
  485. if (cmd != CLP_SYNC)
  486. return -EINVAL;
  487. argp = is_compat_task() ? compat_ptr(arg) : (void __user *) arg;
  488. if (copy_from_user(&req, argp, sizeof(req)))
  489. return -EFAULT;
  490. if (req.r != 0)
  491. return -EINVAL;
  492. return req.c ? clp_immediate_command(&req) : clp_normal_command(&req);
  493. }
  494. static int clp_misc_release(struct inode *inode, struct file *filp)
  495. {
  496. return 0;
  497. }
  498. static const struct file_operations clp_misc_fops = {
  499. .owner = THIS_MODULE,
  500. .open = nonseekable_open,
  501. .release = clp_misc_release,
  502. .unlocked_ioctl = clp_misc_ioctl,
  503. .compat_ioctl = clp_misc_ioctl,
  504. .llseek = no_llseek,
  505. };
  506. static struct miscdevice clp_misc_device = {
  507. .minor = MISC_DYNAMIC_MINOR,
  508. .name = "clp",
  509. .fops = &clp_misc_fops,
  510. };
  511. static int __init clp_misc_init(void)
  512. {
  513. return misc_register(&clp_misc_device);
  514. }
  515. device_initcall(clp_misc_init);