sysfs.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774
  1. /*
  2. * Copyright 2014 IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/device.h>
  11. #include <linux/sysfs.h>
  12. #include <linux/pci_regs.h>
  13. #include "cxl.h"
  14. #define to_afu_chardev_m(d) dev_get_drvdata(d)
  15. /********* Adapter attributes **********************************************/
  16. static ssize_t caia_version_show(struct device *device,
  17. struct device_attribute *attr,
  18. char *buf)
  19. {
  20. struct cxl *adapter = to_cxl_adapter(device);
  21. return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major,
  22. adapter->caia_minor);
  23. }
  24. static ssize_t psl_revision_show(struct device *device,
  25. struct device_attribute *attr,
  26. char *buf)
  27. {
  28. struct cxl *adapter = to_cxl_adapter(device);
  29. return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev);
  30. }
  31. static ssize_t base_image_show(struct device *device,
  32. struct device_attribute *attr,
  33. char *buf)
  34. {
  35. struct cxl *adapter = to_cxl_adapter(device);
  36. return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image);
  37. }
  38. static ssize_t image_loaded_show(struct device *device,
  39. struct device_attribute *attr,
  40. char *buf)
  41. {
  42. struct cxl *adapter = to_cxl_adapter(device);
  43. if (adapter->user_image_loaded)
  44. return scnprintf(buf, PAGE_SIZE, "user\n");
  45. return scnprintf(buf, PAGE_SIZE, "factory\n");
  46. }
  47. static ssize_t psl_timebase_synced_show(struct device *device,
  48. struct device_attribute *attr,
  49. char *buf)
  50. {
  51. struct cxl *adapter = to_cxl_adapter(device);
  52. u64 psl_tb, delta;
  53. /* Recompute the status only in native mode */
  54. if (cpu_has_feature(CPU_FTR_HVMODE)) {
  55. psl_tb = adapter->native->sl_ops->timebase_read(adapter);
  56. delta = abs(mftb() - psl_tb);
  57. /* CORE TB and PSL TB difference <= 16usecs ? */
  58. adapter->psl_timebase_synced = (tb_to_ns(delta) < 16000) ? true : false;
  59. pr_devel("PSL timebase %s - delta: 0x%016llx\n",
  60. (tb_to_ns(delta) < 16000) ? "synchronized" :
  61. "not synchronized", tb_to_ns(delta));
  62. }
  63. return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
  64. }
  65. static ssize_t tunneled_ops_supported_show(struct device *device,
  66. struct device_attribute *attr,
  67. char *buf)
  68. {
  69. struct cxl *adapter = to_cxl_adapter(device);
  70. return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported);
  71. }
  72. static ssize_t reset_adapter_store(struct device *device,
  73. struct device_attribute *attr,
  74. const char *buf, size_t count)
  75. {
  76. struct cxl *adapter = to_cxl_adapter(device);
  77. int rc;
  78. int val;
  79. rc = sscanf(buf, "%i", &val);
  80. if ((rc != 1) || (val != 1 && val != -1))
  81. return -EINVAL;
  82. /*
  83. * See if we can lock the context mapping that's only allowed
  84. * when there are no contexts attached to the adapter. Once
  85. * taken this will also prevent any context from getting activated.
  86. */
  87. if (val == 1) {
  88. rc = cxl_adapter_context_lock(adapter);
  89. if (rc)
  90. goto out;
  91. rc = cxl_ops->adapter_reset(adapter);
  92. /* In case reset failed release context lock */
  93. if (rc)
  94. cxl_adapter_context_unlock(adapter);
  95. } else if (val == -1) {
  96. /* Perform a forced adapter reset */
  97. rc = cxl_ops->adapter_reset(adapter);
  98. }
  99. out:
  100. return rc ? rc : count;
  101. }
  102. static ssize_t load_image_on_perst_show(struct device *device,
  103. struct device_attribute *attr,
  104. char *buf)
  105. {
  106. struct cxl *adapter = to_cxl_adapter(device);
  107. if (!adapter->perst_loads_image)
  108. return scnprintf(buf, PAGE_SIZE, "none\n");
  109. if (adapter->perst_select_user)
  110. return scnprintf(buf, PAGE_SIZE, "user\n");
  111. return scnprintf(buf, PAGE_SIZE, "factory\n");
  112. }
  113. static ssize_t load_image_on_perst_store(struct device *device,
  114. struct device_attribute *attr,
  115. const char *buf, size_t count)
  116. {
  117. struct cxl *adapter = to_cxl_adapter(device);
  118. int rc;
  119. if (!strncmp(buf, "none", 4))
  120. adapter->perst_loads_image = false;
  121. else if (!strncmp(buf, "user", 4)) {
  122. adapter->perst_select_user = true;
  123. adapter->perst_loads_image = true;
  124. } else if (!strncmp(buf, "factory", 7)) {
  125. adapter->perst_select_user = false;
  126. adapter->perst_loads_image = true;
  127. } else
  128. return -EINVAL;
  129. if ((rc = cxl_update_image_control(adapter)))
  130. return rc;
  131. return count;
  132. }
  133. static ssize_t perst_reloads_same_image_show(struct device *device,
  134. struct device_attribute *attr,
  135. char *buf)
  136. {
  137. struct cxl *adapter = to_cxl_adapter(device);
  138. return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
  139. }
  140. static ssize_t perst_reloads_same_image_store(struct device *device,
  141. struct device_attribute *attr,
  142. const char *buf, size_t count)
  143. {
  144. struct cxl *adapter = to_cxl_adapter(device);
  145. int rc;
  146. int val;
  147. rc = sscanf(buf, "%i", &val);
  148. if ((rc != 1) || !(val == 1 || val == 0))
  149. return -EINVAL;
  150. adapter->perst_same_image = (val == 1 ? true : false);
  151. return count;
  152. }
  153. static struct device_attribute adapter_attrs[] = {
  154. __ATTR_RO(caia_version),
  155. __ATTR_RO(psl_revision),
  156. __ATTR_RO(base_image),
  157. __ATTR_RO(image_loaded),
  158. __ATTR_RO(psl_timebase_synced),
  159. __ATTR_RO(tunneled_ops_supported),
  160. __ATTR_RW(load_image_on_perst),
  161. __ATTR_RW(perst_reloads_same_image),
  162. __ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
  163. };
  164. /********* AFU master specific attributes **********************************/
  165. static ssize_t mmio_size_show_master(struct device *device,
  166. struct device_attribute *attr,
  167. char *buf)
  168. {
  169. struct cxl_afu *afu = to_afu_chardev_m(device);
  170. return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
  171. }
  172. static ssize_t pp_mmio_off_show(struct device *device,
  173. struct device_attribute *attr,
  174. char *buf)
  175. {
  176. struct cxl_afu *afu = to_afu_chardev_m(device);
  177. return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
  178. }
  179. static ssize_t pp_mmio_len_show(struct device *device,
  180. struct device_attribute *attr,
  181. char *buf)
  182. {
  183. struct cxl_afu *afu = to_afu_chardev_m(device);
  184. return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
  185. }
  186. static struct device_attribute afu_master_attrs[] = {
  187. __ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL),
  188. __ATTR_RO(pp_mmio_off),
  189. __ATTR_RO(pp_mmio_len),
  190. };
  191. /********* AFU attributes **************************************************/
  192. static ssize_t mmio_size_show(struct device *device,
  193. struct device_attribute *attr,
  194. char *buf)
  195. {
  196. struct cxl_afu *afu = to_cxl_afu(device);
  197. if (afu->pp_size)
  198. return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
  199. return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
  200. }
  201. static ssize_t reset_store_afu(struct device *device,
  202. struct device_attribute *attr,
  203. const char *buf, size_t count)
  204. {
  205. struct cxl_afu *afu = to_cxl_afu(device);
  206. int rc;
  207. /* Not safe to reset if it is currently in use */
  208. mutex_lock(&afu->contexts_lock);
  209. if (!idr_is_empty(&afu->contexts_idr)) {
  210. rc = -EBUSY;
  211. goto err;
  212. }
  213. if ((rc = cxl_ops->afu_reset(afu)))
  214. goto err;
  215. rc = count;
  216. err:
  217. mutex_unlock(&afu->contexts_lock);
  218. return rc;
  219. }
  220. static ssize_t irqs_min_show(struct device *device,
  221. struct device_attribute *attr,
  222. char *buf)
  223. {
  224. struct cxl_afu *afu = to_cxl_afu(device);
  225. return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs);
  226. }
  227. static ssize_t irqs_max_show(struct device *device,
  228. struct device_attribute *attr,
  229. char *buf)
  230. {
  231. struct cxl_afu *afu = to_cxl_afu(device);
  232. return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max);
  233. }
  234. static ssize_t irqs_max_store(struct device *device,
  235. struct device_attribute *attr,
  236. const char *buf, size_t count)
  237. {
  238. struct cxl_afu *afu = to_cxl_afu(device);
  239. ssize_t ret;
  240. int irqs_max;
  241. ret = sscanf(buf, "%i", &irqs_max);
  242. if (ret != 1)
  243. return -EINVAL;
  244. if (irqs_max < afu->pp_irqs)
  245. return -EINVAL;
  246. if (cpu_has_feature(CPU_FTR_HVMODE)) {
  247. if (irqs_max > afu->adapter->user_irqs)
  248. return -EINVAL;
  249. } else {
  250. /* pHyp sets a per-AFU limit */
  251. if (irqs_max > afu->guest->max_ints)
  252. return -EINVAL;
  253. }
  254. afu->irqs_max = irqs_max;
  255. return count;
  256. }
  257. static ssize_t modes_supported_show(struct device *device,
  258. struct device_attribute *attr, char *buf)
  259. {
  260. struct cxl_afu *afu = to_cxl_afu(device);
  261. char *p = buf, *end = buf + PAGE_SIZE;
  262. if (afu->modes_supported & CXL_MODE_DEDICATED)
  263. p += scnprintf(p, end - p, "dedicated_process\n");
  264. if (afu->modes_supported & CXL_MODE_DIRECTED)
  265. p += scnprintf(p, end - p, "afu_directed\n");
  266. return (p - buf);
  267. }
  268. static ssize_t prefault_mode_show(struct device *device,
  269. struct device_attribute *attr,
  270. char *buf)
  271. {
  272. struct cxl_afu *afu = to_cxl_afu(device);
  273. switch (afu->prefault_mode) {
  274. case CXL_PREFAULT_WED:
  275. return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n");
  276. case CXL_PREFAULT_ALL:
  277. return scnprintf(buf, PAGE_SIZE, "all\n");
  278. default:
  279. return scnprintf(buf, PAGE_SIZE, "none\n");
  280. }
  281. }
  282. static ssize_t prefault_mode_store(struct device *device,
  283. struct device_attribute *attr,
  284. const char *buf, size_t count)
  285. {
  286. struct cxl_afu *afu = to_cxl_afu(device);
  287. enum prefault_modes mode = -1;
  288. if (!strncmp(buf, "none", 4))
  289. mode = CXL_PREFAULT_NONE;
  290. else {
  291. if (!radix_enabled()) {
  292. /* only allowed when not in radix mode */
  293. if (!strncmp(buf, "work_element_descriptor", 23))
  294. mode = CXL_PREFAULT_WED;
  295. if (!strncmp(buf, "all", 3))
  296. mode = CXL_PREFAULT_ALL;
  297. } else {
  298. dev_err(device, "Cannot prefault with radix enabled\n");
  299. }
  300. }
  301. if (mode == -1)
  302. return -EINVAL;
  303. afu->prefault_mode = mode;
  304. return count;
  305. }
  306. static ssize_t mode_show(struct device *device,
  307. struct device_attribute *attr,
  308. char *buf)
  309. {
  310. struct cxl_afu *afu = to_cxl_afu(device);
  311. if (afu->current_mode == CXL_MODE_DEDICATED)
  312. return scnprintf(buf, PAGE_SIZE, "dedicated_process\n");
  313. if (afu->current_mode == CXL_MODE_DIRECTED)
  314. return scnprintf(buf, PAGE_SIZE, "afu_directed\n");
  315. return scnprintf(buf, PAGE_SIZE, "none\n");
  316. }
  317. static ssize_t mode_store(struct device *device, struct device_attribute *attr,
  318. const char *buf, size_t count)
  319. {
  320. struct cxl_afu *afu = to_cxl_afu(device);
  321. int old_mode, mode = -1;
  322. int rc = -EBUSY;
  323. /* can't change this if we have a user */
  324. mutex_lock(&afu->contexts_lock);
  325. if (!idr_is_empty(&afu->contexts_idr))
  326. goto err;
  327. if (!strncmp(buf, "dedicated_process", 17))
  328. mode = CXL_MODE_DEDICATED;
  329. if (!strncmp(buf, "afu_directed", 12))
  330. mode = CXL_MODE_DIRECTED;
  331. if (!strncmp(buf, "none", 4))
  332. mode = 0;
  333. if (mode == -1) {
  334. rc = -EINVAL;
  335. goto err;
  336. }
  337. /*
  338. * afu_deactivate_mode needs to be done outside the lock, prevent
  339. * other contexts coming in before we are ready:
  340. */
  341. old_mode = afu->current_mode;
  342. afu->current_mode = 0;
  343. afu->num_procs = 0;
  344. mutex_unlock(&afu->contexts_lock);
  345. if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode)))
  346. return rc;
  347. if ((rc = cxl_ops->afu_activate_mode(afu, mode)))
  348. return rc;
  349. return count;
  350. err:
  351. mutex_unlock(&afu->contexts_lock);
  352. return rc;
  353. }
  354. static ssize_t api_version_show(struct device *device,
  355. struct device_attribute *attr,
  356. char *buf)
  357. {
  358. return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION);
  359. }
  360. static ssize_t api_version_compatible_show(struct device *device,
  361. struct device_attribute *attr,
  362. char *buf)
  363. {
  364. return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
  365. }
  366. static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
  367. struct bin_attribute *bin_attr, char *buf,
  368. loff_t off, size_t count)
  369. {
  370. struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
  371. return cxl_ops->afu_read_err_buffer(afu, buf, off, count);
  372. }
  373. static struct device_attribute afu_attrs[] = {
  374. __ATTR_RO(mmio_size),
  375. __ATTR_RO(irqs_min),
  376. __ATTR_RW(irqs_max),
  377. __ATTR_RO(modes_supported),
  378. __ATTR_RW(mode),
  379. __ATTR_RW(prefault_mode),
  380. __ATTR_RO(api_version),
  381. __ATTR_RO(api_version_compatible),
  382. __ATTR(reset, S_IWUSR, NULL, reset_store_afu),
  383. };
  384. int cxl_sysfs_adapter_add(struct cxl *adapter)
  385. {
  386. struct device_attribute *dev_attr;
  387. int i, rc;
  388. for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
  389. dev_attr = &adapter_attrs[i];
  390. if (cxl_ops->support_attributes(dev_attr->attr.name,
  391. CXL_ADAPTER_ATTRS)) {
  392. if ((rc = device_create_file(&adapter->dev, dev_attr)))
  393. goto err;
  394. }
  395. }
  396. return 0;
  397. err:
  398. for (i--; i >= 0; i--) {
  399. dev_attr = &adapter_attrs[i];
  400. if (cxl_ops->support_attributes(dev_attr->attr.name,
  401. CXL_ADAPTER_ATTRS))
  402. device_remove_file(&adapter->dev, dev_attr);
  403. }
  404. return rc;
  405. }
  406. void cxl_sysfs_adapter_remove(struct cxl *adapter)
  407. {
  408. struct device_attribute *dev_attr;
  409. int i;
  410. for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
  411. dev_attr = &adapter_attrs[i];
  412. if (cxl_ops->support_attributes(dev_attr->attr.name,
  413. CXL_ADAPTER_ATTRS))
  414. device_remove_file(&adapter->dev, dev_attr);
  415. }
  416. }
  417. struct afu_config_record {
  418. struct kobject kobj;
  419. struct bin_attribute config_attr;
  420. struct list_head list;
  421. int cr;
  422. u16 device;
  423. u16 vendor;
  424. u32 class;
  425. };
  426. #define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
  427. static ssize_t vendor_show(struct kobject *kobj,
  428. struct kobj_attribute *attr, char *buf)
  429. {
  430. struct afu_config_record *cr = to_cr(kobj);
  431. return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor);
  432. }
  433. static ssize_t device_show(struct kobject *kobj,
  434. struct kobj_attribute *attr, char *buf)
  435. {
  436. struct afu_config_record *cr = to_cr(kobj);
  437. return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device);
  438. }
  439. static ssize_t class_show(struct kobject *kobj,
  440. struct kobj_attribute *attr, char *buf)
  441. {
  442. struct afu_config_record *cr = to_cr(kobj);
  443. return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class);
  444. }
  445. static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
  446. struct bin_attribute *bin_attr, char *buf,
  447. loff_t off, size_t count)
  448. {
  449. struct afu_config_record *cr = to_cr(kobj);
  450. struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
  451. u64 i, j, val, rc;
  452. for (i = 0; i < count;) {
  453. rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val);
  454. if (rc)
  455. val = ~0ULL;
  456. for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
  457. buf[i] = (val >> (j * 8)) & 0xff;
  458. }
  459. return count;
  460. }
  461. static struct kobj_attribute vendor_attribute =
  462. __ATTR_RO(vendor);
  463. static struct kobj_attribute device_attribute =
  464. __ATTR_RO(device);
  465. static struct kobj_attribute class_attribute =
  466. __ATTR_RO(class);
  467. static struct attribute *afu_cr_attrs[] = {
  468. &vendor_attribute.attr,
  469. &device_attribute.attr,
  470. &class_attribute.attr,
  471. NULL,
  472. };
  473. static void release_afu_config_record(struct kobject *kobj)
  474. {
  475. struct afu_config_record *cr = to_cr(kobj);
  476. kfree(cr);
  477. }
  478. static struct kobj_type afu_config_record_type = {
  479. .sysfs_ops = &kobj_sysfs_ops,
  480. .release = release_afu_config_record,
  481. .default_attrs = afu_cr_attrs,
  482. };
  483. static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
  484. {
  485. struct afu_config_record *cr;
  486. int rc;
  487. cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL);
  488. if (!cr)
  489. return ERR_PTR(-ENOMEM);
  490. cr->cr = cr_idx;
  491. rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device);
  492. if (rc)
  493. goto err;
  494. rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor);
  495. if (rc)
  496. goto err;
  497. rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class);
  498. if (rc)
  499. goto err;
  500. cr->class >>= 8;
  501. /*
  502. * Export raw AFU PCIe like config record. For now this is read only by
  503. * root - we can expand that later to be readable by non-root and maybe
  504. * even writable provided we have a good use-case. Once we support
  505. * exposing AFUs through a virtual PHB they will get that for free from
  506. * Linux' PCI infrastructure, but until then it's not clear that we
  507. * need it for anything since the main use case is just identifying
  508. * AFUs, which can be done via the vendor, device and class attributes.
  509. */
  510. sysfs_bin_attr_init(&cr->config_attr);
  511. cr->config_attr.attr.name = "config";
  512. cr->config_attr.attr.mode = S_IRUSR;
  513. cr->config_attr.size = afu->crs_len;
  514. cr->config_attr.read = afu_read_config;
  515. rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
  516. &afu->dev.kobj, "cr%i", cr->cr);
  517. if (rc)
  518. goto err;
  519. rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
  520. if (rc)
  521. goto err1;
  522. rc = kobject_uevent(&cr->kobj, KOBJ_ADD);
  523. if (rc)
  524. goto err2;
  525. return cr;
  526. err2:
  527. sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
  528. err1:
  529. kobject_put(&cr->kobj);
  530. return ERR_PTR(rc);
  531. err:
  532. kfree(cr);
  533. return ERR_PTR(rc);
  534. }
  535. void cxl_sysfs_afu_remove(struct cxl_afu *afu)
  536. {
  537. struct device_attribute *dev_attr;
  538. struct afu_config_record *cr, *tmp;
  539. int i;
  540. /* remove the err buffer bin attribute */
  541. if (afu->eb_len)
  542. device_remove_bin_file(&afu->dev, &afu->attr_eb);
  543. for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
  544. dev_attr = &afu_attrs[i];
  545. if (cxl_ops->support_attributes(dev_attr->attr.name,
  546. CXL_AFU_ATTRS))
  547. device_remove_file(&afu->dev, &afu_attrs[i]);
  548. }
  549. list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
  550. sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
  551. kobject_put(&cr->kobj);
  552. }
  553. }
  554. int cxl_sysfs_afu_add(struct cxl_afu *afu)
  555. {
  556. struct device_attribute *dev_attr;
  557. struct afu_config_record *cr;
  558. int i, rc;
  559. INIT_LIST_HEAD(&afu->crs);
  560. for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
  561. dev_attr = &afu_attrs[i];
  562. if (cxl_ops->support_attributes(dev_attr->attr.name,
  563. CXL_AFU_ATTRS)) {
  564. if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
  565. goto err;
  566. }
  567. }
  568. /* conditionally create the add the binary file for error info buffer */
  569. if (afu->eb_len) {
  570. sysfs_attr_init(&afu->attr_eb.attr);
  571. afu->attr_eb.attr.name = "afu_err_buff";
  572. afu->attr_eb.attr.mode = S_IRUGO;
  573. afu->attr_eb.size = afu->eb_len;
  574. afu->attr_eb.read = afu_eb_read;
  575. rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
  576. if (rc) {
  577. dev_err(&afu->dev,
  578. "Unable to create eb attr for the afu. Err(%d)\n",
  579. rc);
  580. goto err;
  581. }
  582. }
  583. for (i = 0; i < afu->crs_num; i++) {
  584. cr = cxl_sysfs_afu_new_cr(afu, i);
  585. if (IS_ERR(cr)) {
  586. rc = PTR_ERR(cr);
  587. goto err1;
  588. }
  589. list_add(&cr->list, &afu->crs);
  590. }
  591. return 0;
  592. err1:
  593. cxl_sysfs_afu_remove(afu);
  594. return rc;
  595. err:
  596. /* reset the eb_len as we havent created the bin attr */
  597. afu->eb_len = 0;
  598. for (i--; i >= 0; i--) {
  599. dev_attr = &afu_attrs[i];
  600. if (cxl_ops->support_attributes(dev_attr->attr.name,
  601. CXL_AFU_ATTRS))
  602. device_remove_file(&afu->dev, &afu_attrs[i]);
  603. }
  604. return rc;
  605. }
  606. int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
  607. {
  608. struct device_attribute *dev_attr;
  609. int i, rc;
  610. for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
  611. dev_attr = &afu_master_attrs[i];
  612. if (cxl_ops->support_attributes(dev_attr->attr.name,
  613. CXL_AFU_MASTER_ATTRS)) {
  614. if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
  615. goto err;
  616. }
  617. }
  618. return 0;
  619. err:
  620. for (i--; i >= 0; i--) {
  621. dev_attr = &afu_master_attrs[i];
  622. if (cxl_ops->support_attributes(dev_attr->attr.name,
  623. CXL_AFU_MASTER_ATTRS))
  624. device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
  625. }
  626. return rc;
  627. }
  628. void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
  629. {
  630. struct device_attribute *dev_attr;
  631. int i;
  632. for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
  633. dev_attr = &afu_master_attrs[i];
  634. if (cxl_ops->support_attributes(dev_attr->attr.name,
  635. CXL_AFU_MASTER_ATTRS))
  636. device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
  637. }
  638. }