dfl.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Driver for FPGA Device Feature List (DFL) Support
  4. *
  5. * Copyright (C) 2017-2018 Intel Corporation, Inc.
  6. *
  7. * Authors:
  8. * Kang Luwei <luwei.kang@intel.com>
  9. * Zhang Yi <yi.z.zhang@intel.com>
  10. * Wu Hao <hao.wu@intel.com>
  11. * Xiao Guangrong <guangrong.xiao@linux.intel.com>
  12. */
  13. #include <linux/module.h>
  14. #include "dfl.h"
  15. static DEFINE_MUTEX(dfl_id_mutex);
  16. /*
  17. * when adding a new feature dev support in DFL framework, it's required to
  18. * add a new item in enum dfl_id_type and provide related information in below
  19. * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for
  20. * platform device creation (define name strings in dfl.h, as they could be
  21. * reused by platform device drivers).
  22. *
  23. * if the new feature dev needs chardev support, then it's required to add
  24. * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as
  25. * index to dfl_chardevs table. If no chardev support just set devt_type
  26. * as one invalid index (DFL_FPGA_DEVT_MAX).
  27. */
  28. enum dfl_id_type {
  29. FME_ID, /* fme id allocation and mapping */
  30. PORT_ID, /* port id allocation and mapping */
  31. DFL_ID_MAX,
  32. };
  33. enum dfl_fpga_devt_type {
  34. DFL_FPGA_DEVT_FME,
  35. DFL_FPGA_DEVT_PORT,
  36. DFL_FPGA_DEVT_MAX,
  37. };
  38. /**
  39. * dfl_dev_info - dfl feature device information.
  40. * @name: name string of the feature platform device.
  41. * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
  42. * @id: idr id of the feature dev.
  43. * @devt_type: index to dfl_chrdevs[].
  44. */
  45. struct dfl_dev_info {
  46. const char *name;
  47. u32 dfh_id;
  48. struct idr id;
  49. enum dfl_fpga_devt_type devt_type;
  50. };
  51. /* it is indexed by dfl_id_type */
  52. static struct dfl_dev_info dfl_devs[] = {
  53. {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME,
  54. .devt_type = DFL_FPGA_DEVT_FME},
  55. {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT,
  56. .devt_type = DFL_FPGA_DEVT_PORT},
  57. };
  58. /**
  59. * dfl_chardev_info - chardev information of dfl feature device
  60. * @name: nmae string of the char device.
  61. * @devt: devt of the char device.
  62. */
  63. struct dfl_chardev_info {
  64. const char *name;
  65. dev_t devt;
  66. };
  67. /* indexed by enum dfl_fpga_devt_type */
  68. static struct dfl_chardev_info dfl_chrdevs[] = {
  69. {.name = DFL_FPGA_FEATURE_DEV_FME},
  70. {.name = DFL_FPGA_FEATURE_DEV_PORT},
  71. };
  72. static void dfl_ids_init(void)
  73. {
  74. int i;
  75. for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
  76. idr_init(&dfl_devs[i].id);
  77. }
  78. static void dfl_ids_destroy(void)
  79. {
  80. int i;
  81. for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
  82. idr_destroy(&dfl_devs[i].id);
  83. }
  84. static int dfl_id_alloc(enum dfl_id_type type, struct device *dev)
  85. {
  86. int id;
  87. WARN_ON(type >= DFL_ID_MAX);
  88. mutex_lock(&dfl_id_mutex);
  89. id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL);
  90. mutex_unlock(&dfl_id_mutex);
  91. return id;
  92. }
  93. static void dfl_id_free(enum dfl_id_type type, int id)
  94. {
  95. WARN_ON(type >= DFL_ID_MAX);
  96. mutex_lock(&dfl_id_mutex);
  97. idr_remove(&dfl_devs[type].id, id);
  98. mutex_unlock(&dfl_id_mutex);
  99. }
  100. static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
  101. {
  102. int i;
  103. for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
  104. if (!strcmp(dfl_devs[i].name, pdev->name))
  105. return i;
  106. return DFL_ID_MAX;
  107. }
  108. static enum dfl_id_type dfh_id_to_type(u32 id)
  109. {
  110. int i;
  111. for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
  112. if (dfl_devs[i].dfh_id == id)
  113. return i;
  114. return DFL_ID_MAX;
  115. }
  116. /*
  117. * introduce a global port_ops list, it allows port drivers to register ops
  118. * in such list, then other feature devices (e.g. FME), could use the port
  119. * functions even related port platform device is hidden. Below is one example,
  120. * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is
  121. * enabled, port (and it's AFU) is turned into VF and port platform device
  122. * is hidden from system but it's still required to access port to finish FPGA
  123. * reconfiguration function in FME.
  124. */
  125. static DEFINE_MUTEX(dfl_port_ops_mutex);
  126. static LIST_HEAD(dfl_port_ops_list);
  127. /**
  128. * dfl_fpga_port_ops_get - get matched port ops from the global list
  129. * @pdev: platform device to match with associated port ops.
  130. * Return: matched port ops on success, NULL otherwise.
  131. *
  132. * Please note that must dfl_fpga_port_ops_put after use the port_ops.
  133. */
  134. struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
  135. {
  136. struct dfl_fpga_port_ops *ops = NULL;
  137. mutex_lock(&dfl_port_ops_mutex);
  138. if (list_empty(&dfl_port_ops_list))
  139. goto done;
  140. list_for_each_entry(ops, &dfl_port_ops_list, node) {
  141. /* match port_ops using the name of platform device */
  142. if (!strcmp(pdev->name, ops->name)) {
  143. if (!try_module_get(ops->owner))
  144. ops = NULL;
  145. goto done;
  146. }
  147. }
  148. ops = NULL;
  149. done:
  150. mutex_unlock(&dfl_port_ops_mutex);
  151. return ops;
  152. }
  153. EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get);
  154. /**
  155. * dfl_fpga_port_ops_put - put port ops
  156. * @ops: port ops.
  157. */
  158. void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops)
  159. {
  160. if (ops && ops->owner)
  161. module_put(ops->owner);
  162. }
  163. EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put);
  164. /**
  165. * dfl_fpga_port_ops_add - add port_ops to global list
  166. * @ops: port ops to add.
  167. */
  168. void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops)
  169. {
  170. mutex_lock(&dfl_port_ops_mutex);
  171. list_add_tail(&ops->node, &dfl_port_ops_list);
  172. mutex_unlock(&dfl_port_ops_mutex);
  173. }
  174. EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add);
  175. /**
  176. * dfl_fpga_port_ops_del - remove port_ops from global list
  177. * @ops: port ops to del.
  178. */
  179. void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops)
  180. {
  181. mutex_lock(&dfl_port_ops_mutex);
  182. list_del(&ops->node);
  183. mutex_unlock(&dfl_port_ops_mutex);
  184. }
  185. EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
  186. /**
  187. * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
  188. * @pdev: feature device.
  189. */
  190. void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
  191. {
  192. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  193. struct dfl_feature *feature;
  194. dfl_fpga_dev_for_each_feature(pdata, feature)
  195. if (feature->ops) {
  196. feature->ops->uinit(pdev, feature);
  197. feature->ops = NULL;
  198. }
  199. }
  200. EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
  201. static int dfl_feature_instance_init(struct platform_device *pdev,
  202. struct dfl_feature_platform_data *pdata,
  203. struct dfl_feature *feature,
  204. struct dfl_feature_driver *drv)
  205. {
  206. int ret;
  207. ret = drv->ops->init(pdev, feature);
  208. if (ret)
  209. return ret;
  210. feature->ops = drv->ops;
  211. return ret;
  212. }
  213. /**
  214. * dfl_fpga_dev_feature_init - init for sub features of dfl feature device
  215. * @pdev: feature device.
  216. * @feature_drvs: drvs for sub features.
  217. *
  218. * This function will match sub features with given feature drvs list and
  219. * use matched drv to init related sub feature.
  220. *
  221. * Return: 0 on success, negative error code otherwise.
  222. */
  223. int dfl_fpga_dev_feature_init(struct platform_device *pdev,
  224. struct dfl_feature_driver *feature_drvs)
  225. {
  226. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  227. struct dfl_feature_driver *drv = feature_drvs;
  228. struct dfl_feature *feature;
  229. int ret;
  230. while (drv->ops) {
  231. dfl_fpga_dev_for_each_feature(pdata, feature) {
  232. /* match feature and drv using id */
  233. if (feature->id == drv->id) {
  234. ret = dfl_feature_instance_init(pdev, pdata,
  235. feature, drv);
  236. if (ret)
  237. goto exit;
  238. }
  239. }
  240. drv++;
  241. }
  242. return 0;
  243. exit:
  244. dfl_fpga_dev_feature_uinit(pdev);
  245. return ret;
  246. }
  247. EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init);
  248. static void dfl_chardev_uinit(void)
  249. {
  250. int i;
  251. for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
  252. if (MAJOR(dfl_chrdevs[i].devt)) {
  253. unregister_chrdev_region(dfl_chrdevs[i].devt,
  254. MINORMASK);
  255. dfl_chrdevs[i].devt = MKDEV(0, 0);
  256. }
  257. }
  258. static int dfl_chardev_init(void)
  259. {
  260. int i, ret;
  261. for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
  262. ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0, MINORMASK,
  263. dfl_chrdevs[i].name);
  264. if (ret)
  265. goto exit;
  266. }
  267. return 0;
  268. exit:
  269. dfl_chardev_uinit();
  270. return ret;
  271. }
  272. static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id)
  273. {
  274. if (type >= DFL_FPGA_DEVT_MAX)
  275. return 0;
  276. return MKDEV(MAJOR(dfl_chrdevs[type].devt), id);
  277. }
  278. /**
  279. * dfl_fpga_dev_ops_register - register cdev ops for feature dev
  280. *
  281. * @pdev: feature dev.
  282. * @fops: file operations for feature dev's cdev.
  283. * @owner: owning module/driver.
  284. *
  285. * Return: 0 on success, negative error code otherwise.
  286. */
  287. int dfl_fpga_dev_ops_register(struct platform_device *pdev,
  288. const struct file_operations *fops,
  289. struct module *owner)
  290. {
  291. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  292. cdev_init(&pdata->cdev, fops);
  293. pdata->cdev.owner = owner;
  294. /*
  295. * set parent to the feature device so that its refcount is
  296. * decreased after the last refcount of cdev is gone, that
  297. * makes sure the feature device is valid during device
  298. * file's life-cycle.
  299. */
  300. pdata->cdev.kobj.parent = &pdev->dev.kobj;
  301. return cdev_add(&pdata->cdev, pdev->dev.devt, 1);
  302. }
  303. EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register);
  304. /**
  305. * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev
  306. * @pdev: feature dev.
  307. */
  308. void dfl_fpga_dev_ops_unregister(struct platform_device *pdev)
  309. {
  310. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  311. cdev_del(&pdata->cdev);
  312. }
  313. EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
  314. /**
  315. * struct build_feature_devs_info - info collected during feature dev build.
  316. *
  317. * @dev: device to enumerate.
  318. * @cdev: the container device for all feature devices.
  319. * @feature_dev: current feature device.
  320. * @ioaddr: header register region address of feature device in enumeration.
  321. * @sub_features: a sub features linked list for feature device in enumeration.
  322. * @feature_num: number of sub features for feature device in enumeration.
  323. */
  324. struct build_feature_devs_info {
  325. struct device *dev;
  326. struct dfl_fpga_cdev *cdev;
  327. struct platform_device *feature_dev;
  328. void __iomem *ioaddr;
  329. struct list_head sub_features;
  330. int feature_num;
  331. };
  332. /**
  333. * struct dfl_feature_info - sub feature info collected during feature dev build
  334. *
  335. * @fid: id of this sub feature.
  336. * @mmio_res: mmio resource of this sub feature.
  337. * @ioaddr: mapped base address of mmio resource.
  338. * @node: node in sub_features linked list.
  339. */
  340. struct dfl_feature_info {
  341. u64 fid;
  342. struct resource mmio_res;
  343. void __iomem *ioaddr;
  344. struct list_head node;
  345. };
  346. static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
  347. struct platform_device *port)
  348. {
  349. struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
  350. mutex_lock(&cdev->lock);
  351. list_add(&pdata->node, &cdev->port_dev_list);
  352. get_device(&pdata->dev->dev);
  353. mutex_unlock(&cdev->lock);
  354. }
  355. /*
  356. * register current feature device, it is called when we need to switch to
  357. * another feature parsing or we have parsed all features on given device
  358. * feature list.
  359. */
  360. static int build_info_commit_dev(struct build_feature_devs_info *binfo)
  361. {
  362. struct platform_device *fdev = binfo->feature_dev;
  363. struct dfl_feature_platform_data *pdata;
  364. struct dfl_feature_info *finfo, *p;
  365. int ret, index = 0;
  366. if (!fdev)
  367. return 0;
  368. /*
  369. * we do not need to care for the memory which is associated with
  370. * the platform device. After calling platform_device_unregister(),
  371. * it will be automatically freed by device's release() callback,
  372. * platform_device_release().
  373. */
  374. pdata = kzalloc(dfl_feature_platform_data_size(binfo->feature_num),
  375. GFP_KERNEL);
  376. if (!pdata)
  377. return -ENOMEM;
  378. pdata->dev = fdev;
  379. pdata->num = binfo->feature_num;
  380. pdata->dfl_cdev = binfo->cdev;
  381. mutex_init(&pdata->lock);
  382. /*
  383. * the count should be initialized to 0 to make sure
  384. *__fpga_port_enable() following __fpga_port_disable()
  385. * works properly for port device.
  386. * and it should always be 0 for fme device.
  387. */
  388. WARN_ON(pdata->disable_count);
  389. fdev->dev.platform_data = pdata;
  390. /* each sub feature has one MMIO resource */
  391. fdev->num_resources = binfo->feature_num;
  392. fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
  393. GFP_KERNEL);
  394. if (!fdev->resource)
  395. return -ENOMEM;
  396. /* fill features and resource information for feature dev */
  397. list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
  398. struct dfl_feature *feature = &pdata->features[index];
  399. /* save resource information for each feature */
  400. feature->id = finfo->fid;
  401. feature->resource_index = index;
  402. feature->ioaddr = finfo->ioaddr;
  403. fdev->resource[index++] = finfo->mmio_res;
  404. list_del(&finfo->node);
  405. kfree(finfo);
  406. }
  407. ret = platform_device_add(binfo->feature_dev);
  408. if (!ret) {
  409. if (feature_dev_id_type(binfo->feature_dev) == PORT_ID)
  410. dfl_fpga_cdev_add_port_dev(binfo->cdev,
  411. binfo->feature_dev);
  412. else
  413. binfo->cdev->fme_dev =
  414. get_device(&binfo->feature_dev->dev);
  415. /*
  416. * reset it to avoid build_info_free() freeing their resource.
  417. *
  418. * The resource of successfully registered feature devices
  419. * will be freed by platform_device_unregister(). See the
  420. * comments in build_info_create_dev().
  421. */
  422. binfo->feature_dev = NULL;
  423. }
  424. return ret;
  425. }
  426. static int
  427. build_info_create_dev(struct build_feature_devs_info *binfo,
  428. enum dfl_id_type type, void __iomem *ioaddr)
  429. {
  430. struct platform_device *fdev;
  431. int ret;
  432. if (type >= DFL_ID_MAX)
  433. return -EINVAL;
  434. /* we will create a new device, commit current device first */
  435. ret = build_info_commit_dev(binfo);
  436. if (ret)
  437. return ret;
  438. /*
  439. * we use -ENODEV as the initialization indicator which indicates
  440. * whether the id need to be reclaimed
  441. */
  442. fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
  443. if (!fdev)
  444. return -ENOMEM;
  445. binfo->feature_dev = fdev;
  446. binfo->feature_num = 0;
  447. binfo->ioaddr = ioaddr;
  448. INIT_LIST_HEAD(&binfo->sub_features);
  449. fdev->id = dfl_id_alloc(type, &fdev->dev);
  450. if (fdev->id < 0)
  451. return fdev->id;
  452. fdev->dev.parent = &binfo->cdev->region->dev;
  453. fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
  454. return 0;
  455. }
  456. static void build_info_free(struct build_feature_devs_info *binfo)
  457. {
  458. struct dfl_feature_info *finfo, *p;
  459. /*
  460. * it is a valid id, free it. See comments in
  461. * build_info_create_dev()
  462. */
  463. if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
  464. dfl_id_free(feature_dev_id_type(binfo->feature_dev),
  465. binfo->feature_dev->id);
  466. list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
  467. list_del(&finfo->node);
  468. kfree(finfo);
  469. }
  470. }
  471. platform_device_put(binfo->feature_dev);
  472. devm_kfree(binfo->dev, binfo);
  473. }
  474. static inline u32 feature_size(void __iomem *start)
  475. {
  476. u64 v = readq(start + DFH);
  477. u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
  478. /* workaround for private features with invalid size, use 4K instead */
  479. return ofst ? ofst : 4096;
  480. }
  481. static u64 feature_id(void __iomem *start)
  482. {
  483. u64 v = readq(start + DFH);
  484. u16 id = FIELD_GET(DFH_ID, v);
  485. u8 type = FIELD_GET(DFH_TYPE, v);
  486. if (type == DFH_TYPE_FIU)
  487. return FEATURE_ID_FIU_HEADER;
  488. else if (type == DFH_TYPE_PRIVATE)
  489. return id;
  490. else if (type == DFH_TYPE_AFU)
  491. return FEATURE_ID_AFU;
  492. WARN_ON(1);
  493. return 0;
  494. }
  495. /*
  496. * when create sub feature instances, for private features, it doesn't need
  497. * to provide resource size and feature id as they could be read from DFH
  498. * register. For afu sub feature, its register region only contains user
  499. * defined registers, so never trust any information from it, just use the
  500. * resource size information provided by its parent FIU.
  501. */
  502. static int
  503. create_feature_instance(struct build_feature_devs_info *binfo,
  504. struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst,
  505. resource_size_t size, u64 fid)
  506. {
  507. struct dfl_feature_info *finfo;
  508. /* read feature size and id if inputs are invalid */
  509. size = size ? size : feature_size(dfl->ioaddr + ofst);
  510. fid = fid ? fid : feature_id(dfl->ioaddr + ofst);
  511. if (dfl->len - ofst < size)
  512. return -EINVAL;
  513. finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
  514. if (!finfo)
  515. return -ENOMEM;
  516. finfo->fid = fid;
  517. finfo->mmio_res.start = dfl->start + ofst;
  518. finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
  519. finfo->mmio_res.flags = IORESOURCE_MEM;
  520. finfo->ioaddr = dfl->ioaddr + ofst;
  521. list_add_tail(&finfo->node, &binfo->sub_features);
  522. binfo->feature_num++;
  523. return 0;
  524. }
  525. static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
  526. struct dfl_fpga_enum_dfl *dfl,
  527. resource_size_t ofst)
  528. {
  529. u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
  530. u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10;
  531. WARN_ON(!size);
  532. return create_feature_instance(binfo, dfl, ofst, size, FEATURE_ID_AFU);
  533. }
  534. static int parse_feature_afu(struct build_feature_devs_info *binfo,
  535. struct dfl_fpga_enum_dfl *dfl,
  536. resource_size_t ofst)
  537. {
  538. if (!binfo->feature_dev) {
  539. dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
  540. return -EINVAL;
  541. }
  542. switch (feature_dev_id_type(binfo->feature_dev)) {
  543. case PORT_ID:
  544. return parse_feature_port_afu(binfo, dfl, ofst);
  545. default:
  546. dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
  547. binfo->feature_dev->name);
  548. }
  549. return 0;
  550. }
  551. static int parse_feature_fiu(struct build_feature_devs_info *binfo,
  552. struct dfl_fpga_enum_dfl *dfl,
  553. resource_size_t ofst)
  554. {
  555. u32 id, offset;
  556. u64 v;
  557. int ret = 0;
  558. v = readq(dfl->ioaddr + ofst + DFH);
  559. id = FIELD_GET(DFH_ID, v);
  560. /* create platform device for dfl feature dev */
  561. ret = build_info_create_dev(binfo, dfh_id_to_type(id),
  562. dfl->ioaddr + ofst);
  563. if (ret)
  564. return ret;
  565. ret = create_feature_instance(binfo, dfl, ofst, 0, 0);
  566. if (ret)
  567. return ret;
  568. /*
  569. * find and parse FIU's child AFU via its NEXT_AFU register.
  570. * please note that only Port has valid NEXT_AFU pointer per spec.
  571. */
  572. v = readq(dfl->ioaddr + ofst + NEXT_AFU);
  573. offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
  574. if (offset)
  575. return parse_feature_afu(binfo, dfl, ofst + offset);
  576. dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
  577. return ret;
  578. }
  579. static int parse_feature_private(struct build_feature_devs_info *binfo,
  580. struct dfl_fpga_enum_dfl *dfl,
  581. resource_size_t ofst)
  582. {
  583. if (!binfo->feature_dev) {
  584. dev_err(binfo->dev, "the private feature %llx does not belong to any AFU.\n",
  585. (unsigned long long)feature_id(dfl->ioaddr + ofst));
  586. return -EINVAL;
  587. }
  588. return create_feature_instance(binfo, dfl, ofst, 0, 0);
  589. }
  590. /**
  591. * parse_feature - parse a feature on given device feature list
  592. *
  593. * @binfo: build feature devices information.
  594. * @dfl: device feature list to parse
  595. * @ofst: offset to feature header on this device feature list
  596. */
  597. static int parse_feature(struct build_feature_devs_info *binfo,
  598. struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst)
  599. {
  600. u64 v;
  601. u32 type;
  602. v = readq(dfl->ioaddr + ofst + DFH);
  603. type = FIELD_GET(DFH_TYPE, v);
  604. switch (type) {
  605. case DFH_TYPE_AFU:
  606. return parse_feature_afu(binfo, dfl, ofst);
  607. case DFH_TYPE_PRIVATE:
  608. return parse_feature_private(binfo, dfl, ofst);
  609. case DFH_TYPE_FIU:
  610. return parse_feature_fiu(binfo, dfl, ofst);
  611. default:
  612. dev_info(binfo->dev,
  613. "Feature Type %x is not supported.\n", type);
  614. }
  615. return 0;
  616. }
  617. static int parse_feature_list(struct build_feature_devs_info *binfo,
  618. struct dfl_fpga_enum_dfl *dfl)
  619. {
  620. void __iomem *start = dfl->ioaddr;
  621. void __iomem *end = dfl->ioaddr + dfl->len;
  622. int ret = 0;
  623. u32 ofst = 0;
  624. u64 v;
  625. /* walk through the device feature list via DFH's next DFH pointer. */
  626. for (; start < end; start += ofst) {
  627. if (end - start < DFH_SIZE) {
  628. dev_err(binfo->dev, "The region is too small to contain a feature.\n");
  629. return -EINVAL;
  630. }
  631. ret = parse_feature(binfo, dfl, start - dfl->ioaddr);
  632. if (ret)
  633. return ret;
  634. v = readq(start + DFH);
  635. ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
  636. /* stop parsing if EOL(End of List) is set or offset is 0 */
  637. if ((v & DFH_EOL) || !ofst)
  638. break;
  639. }
  640. /* commit current feature device when reach the end of list */
  641. return build_info_commit_dev(binfo);
  642. }
  643. struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
  644. {
  645. struct dfl_fpga_enum_info *info;
  646. get_device(dev);
  647. info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
  648. if (!info) {
  649. put_device(dev);
  650. return NULL;
  651. }
  652. info->dev = dev;
  653. INIT_LIST_HEAD(&info->dfls);
  654. return info;
  655. }
  656. EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc);
  657. void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
  658. {
  659. struct dfl_fpga_enum_dfl *tmp, *dfl;
  660. struct device *dev;
  661. if (!info)
  662. return;
  663. dev = info->dev;
  664. /* remove all device feature lists in the list. */
  665. list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
  666. list_del(&dfl->node);
  667. devm_kfree(dev, dfl);
  668. }
  669. devm_kfree(dev, info);
  670. put_device(dev);
  671. }
  672. EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
  673. /**
  674. * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info
  675. *
  676. * @info: ptr to dfl_fpga_enum_info
  677. * @start: mmio resource address of the device feature list.
  678. * @len: mmio resource length of the device feature list.
  679. * @ioaddr: mapped mmio resource address of the device feature list.
  680. *
  681. * One FPGA device may have one or more Device Feature Lists (DFLs), use this
  682. * function to add information of each DFL to common data structure for next
  683. * step enumeration.
  684. *
  685. * Return: 0 on success, negative error code otherwise.
  686. */
  687. int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
  688. resource_size_t start, resource_size_t len,
  689. void __iomem *ioaddr)
  690. {
  691. struct dfl_fpga_enum_dfl *dfl;
  692. dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL);
  693. if (!dfl)
  694. return -ENOMEM;
  695. dfl->start = start;
  696. dfl->len = len;
  697. dfl->ioaddr = ioaddr;
  698. list_add_tail(&dfl->node, &info->dfls);
  699. return 0;
  700. }
  701. EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
  702. static int remove_feature_dev(struct device *dev, void *data)
  703. {
  704. struct platform_device *pdev = to_platform_device(dev);
  705. enum dfl_id_type type = feature_dev_id_type(pdev);
  706. int id = pdev->id;
  707. platform_device_unregister(pdev);
  708. dfl_id_free(type, id);
  709. return 0;
  710. }
  711. static void remove_feature_devs(struct dfl_fpga_cdev *cdev)
  712. {
  713. device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev);
  714. }
  715. /**
  716. * dfl_fpga_feature_devs_enumerate - enumerate feature devices
  717. * @info: information for enumeration.
  718. *
  719. * This function creates a container device (base FPGA region), enumerates
  720. * feature devices based on the enumeration info and creates platform devices
  721. * under the container device.
  722. *
  723. * Return: dfl_fpga_cdev struct on success, -errno on failure
  724. */
  725. struct dfl_fpga_cdev *
  726. dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
  727. {
  728. struct build_feature_devs_info *binfo;
  729. struct dfl_fpga_enum_dfl *dfl;
  730. struct dfl_fpga_cdev *cdev;
  731. int ret = 0;
  732. if (!info->dev)
  733. return ERR_PTR(-ENODEV);
  734. cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL);
  735. if (!cdev)
  736. return ERR_PTR(-ENOMEM);
  737. cdev->region = fpga_region_create(info->dev, NULL, NULL);
  738. if (!cdev->region) {
  739. ret = -ENOMEM;
  740. goto free_cdev_exit;
  741. }
  742. cdev->parent = info->dev;
  743. mutex_init(&cdev->lock);
  744. INIT_LIST_HEAD(&cdev->port_dev_list);
  745. ret = fpga_region_register(cdev->region);
  746. if (ret)
  747. goto free_region_exit;
  748. /* create and init build info for enumeration */
  749. binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
  750. if (!binfo) {
  751. ret = -ENOMEM;
  752. goto unregister_region_exit;
  753. }
  754. binfo->dev = info->dev;
  755. binfo->cdev = cdev;
  756. /*
  757. * start enumeration for all feature devices based on Device Feature
  758. * Lists.
  759. */
  760. list_for_each_entry(dfl, &info->dfls, node) {
  761. ret = parse_feature_list(binfo, dfl);
  762. if (ret) {
  763. remove_feature_devs(cdev);
  764. build_info_free(binfo);
  765. goto unregister_region_exit;
  766. }
  767. }
  768. build_info_free(binfo);
  769. return cdev;
  770. unregister_region_exit:
  771. fpga_region_unregister(cdev->region);
  772. free_region_exit:
  773. fpga_region_free(cdev->region);
  774. free_cdev_exit:
  775. devm_kfree(info->dev, cdev);
  776. return ERR_PTR(ret);
  777. }
  778. EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
  779. /**
  780. * dfl_fpga_feature_devs_remove - remove all feature devices
  781. * @cdev: fpga container device.
  782. *
  783. * Remove the container device and all feature devices under given container
  784. * devices.
  785. */
  786. void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
  787. {
  788. struct dfl_feature_platform_data *pdata, *ptmp;
  789. remove_feature_devs(cdev);
  790. mutex_lock(&cdev->lock);
  791. if (cdev->fme_dev) {
  792. /* the fme should be unregistered. */
  793. WARN_ON(device_is_registered(cdev->fme_dev));
  794. put_device(cdev->fme_dev);
  795. }
  796. list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
  797. struct platform_device *port_dev = pdata->dev;
  798. /* the port should be unregistered. */
  799. WARN_ON(device_is_registered(&port_dev->dev));
  800. list_del(&pdata->node);
  801. put_device(&port_dev->dev);
  802. }
  803. mutex_unlock(&cdev->lock);
  804. fpga_region_unregister(cdev->region);
  805. devm_kfree(cdev->parent, cdev);
  806. }
  807. EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
  808. /**
  809. * __dfl_fpga_cdev_find_port - find a port under given container device
  810. *
  811. * @cdev: container device
  812. * @data: data passed to match function
  813. * @match: match function used to find specific port from the port device list
  814. *
  815. * Find a port device under container device. This function needs to be
  816. * invoked with lock held.
  817. *
  818. * Return: pointer to port's platform device if successful, NULL otherwise.
  819. *
  820. * NOTE: you will need to drop the device reference with put_device() after use.
  821. */
  822. struct platform_device *
  823. __dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
  824. int (*match)(struct platform_device *, void *))
  825. {
  826. struct dfl_feature_platform_data *pdata;
  827. struct platform_device *port_dev;
  828. list_for_each_entry(pdata, &cdev->port_dev_list, node) {
  829. port_dev = pdata->dev;
  830. if (match(port_dev, data) && get_device(&port_dev->dev))
  831. return port_dev;
  832. }
  833. return NULL;
  834. }
  835. EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
  836. static int __init dfl_fpga_init(void)
  837. {
  838. int ret;
  839. dfl_ids_init();
  840. ret = dfl_chardev_init();
  841. if (ret)
  842. dfl_ids_destroy();
  843. return ret;
  844. }
  845. static void __exit dfl_fpga_exit(void)
  846. {
  847. dfl_chardev_uinit();
  848. dfl_ids_destroy();
  849. }
  850. module_init(dfl_fpga_init);
  851. module_exit(dfl_fpga_exit);
  852. MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
  853. MODULE_AUTHOR("Intel Corporation");
  854. MODULE_LICENSE("GPL v2");