sysfs.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756
  1. /*
  2. * Copyright 2014 IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/device.h>
  11. #include <linux/sysfs.h>
  12. #include <linux/pci_regs.h>
  13. #include "cxl.h"
  14. #define to_afu_chardev_m(d) dev_get_drvdata(d)
  15. /********* Adapter attributes **********************************************/
  16. static ssize_t caia_version_show(struct device *device,
  17. struct device_attribute *attr,
  18. char *buf)
  19. {
  20. struct cxl *adapter = to_cxl_adapter(device);
  21. return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major,
  22. adapter->caia_minor);
  23. }
  24. static ssize_t psl_revision_show(struct device *device,
  25. struct device_attribute *attr,
  26. char *buf)
  27. {
  28. struct cxl *adapter = to_cxl_adapter(device);
  29. return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev);
  30. }
  31. static ssize_t base_image_show(struct device *device,
  32. struct device_attribute *attr,
  33. char *buf)
  34. {
  35. struct cxl *adapter = to_cxl_adapter(device);
  36. return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image);
  37. }
  38. static ssize_t image_loaded_show(struct device *device,
  39. struct device_attribute *attr,
  40. char *buf)
  41. {
  42. struct cxl *adapter = to_cxl_adapter(device);
  43. if (adapter->user_image_loaded)
  44. return scnprintf(buf, PAGE_SIZE, "user\n");
  45. return scnprintf(buf, PAGE_SIZE, "factory\n");
  46. }
  47. static ssize_t psl_timebase_synced_show(struct device *device,
  48. struct device_attribute *attr,
  49. char *buf)
  50. {
  51. struct cxl *adapter = to_cxl_adapter(device);
  52. u64 psl_tb, delta;
  53. /* Recompute the status only in native mode */
  54. if (cpu_has_feature(CPU_FTR_HVMODE)) {
  55. psl_tb = adapter->native->sl_ops->timebase_read(adapter);
  56. delta = abs(mftb() - psl_tb);
  57. /* CORE TB and PSL TB difference <= 16usecs ? */
  58. adapter->psl_timebase_synced = (tb_to_ns(delta) < 16000) ? true : false;
  59. pr_devel("PSL timebase %s - delta: 0x%016llx\n",
  60. (tb_to_ns(delta) < 16000) ? "synchronized" :
  61. "not synchronized", tb_to_ns(delta));
  62. }
  63. return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
  64. }
  65. static ssize_t reset_adapter_store(struct device *device,
  66. struct device_attribute *attr,
  67. const char *buf, size_t count)
  68. {
  69. struct cxl *adapter = to_cxl_adapter(device);
  70. int rc;
  71. int val;
  72. rc = sscanf(buf, "%i", &val);
  73. if ((rc != 1) || (val != 1 && val != -1))
  74. return -EINVAL;
  75. /*
  76. * See if we can lock the context mapping that's only allowed
  77. * when there are no contexts attached to the adapter. Once
  78. * taken this will also prevent any context from getting activated.
  79. */
  80. if (val == 1) {
  81. rc = cxl_adapter_context_lock(adapter);
  82. if (rc)
  83. goto out;
  84. rc = cxl_ops->adapter_reset(adapter);
  85. /* In case reset failed release context lock */
  86. if (rc)
  87. cxl_adapter_context_unlock(adapter);
  88. } else if (val == -1) {
  89. /* Perform a forced adapter reset */
  90. rc = cxl_ops->adapter_reset(adapter);
  91. }
  92. out:
  93. return rc ? rc : count;
  94. }
  95. static ssize_t load_image_on_perst_show(struct device *device,
  96. struct device_attribute *attr,
  97. char *buf)
  98. {
  99. struct cxl *adapter = to_cxl_adapter(device);
  100. if (!adapter->perst_loads_image)
  101. return scnprintf(buf, PAGE_SIZE, "none\n");
  102. if (adapter->perst_select_user)
  103. return scnprintf(buf, PAGE_SIZE, "user\n");
  104. return scnprintf(buf, PAGE_SIZE, "factory\n");
  105. }
  106. static ssize_t load_image_on_perst_store(struct device *device,
  107. struct device_attribute *attr,
  108. const char *buf, size_t count)
  109. {
  110. struct cxl *adapter = to_cxl_adapter(device);
  111. int rc;
  112. if (!strncmp(buf, "none", 4))
  113. adapter->perst_loads_image = false;
  114. else if (!strncmp(buf, "user", 4)) {
  115. adapter->perst_select_user = true;
  116. adapter->perst_loads_image = true;
  117. } else if (!strncmp(buf, "factory", 7)) {
  118. adapter->perst_select_user = false;
  119. adapter->perst_loads_image = true;
  120. } else
  121. return -EINVAL;
  122. if ((rc = cxl_update_image_control(adapter)))
  123. return rc;
  124. return count;
  125. }
  126. static ssize_t perst_reloads_same_image_show(struct device *device,
  127. struct device_attribute *attr,
  128. char *buf)
  129. {
  130. struct cxl *adapter = to_cxl_adapter(device);
  131. return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
  132. }
  133. static ssize_t perst_reloads_same_image_store(struct device *device,
  134. struct device_attribute *attr,
  135. const char *buf, size_t count)
  136. {
  137. struct cxl *adapter = to_cxl_adapter(device);
  138. int rc;
  139. int val;
  140. rc = sscanf(buf, "%i", &val);
  141. if ((rc != 1) || !(val == 1 || val == 0))
  142. return -EINVAL;
  143. adapter->perst_same_image = (val == 1 ? true : false);
  144. return count;
  145. }
  146. static struct device_attribute adapter_attrs[] = {
  147. __ATTR_RO(caia_version),
  148. __ATTR_RO(psl_revision),
  149. __ATTR_RO(base_image),
  150. __ATTR_RO(image_loaded),
  151. __ATTR_RO(psl_timebase_synced),
  152. __ATTR_RW(load_image_on_perst),
  153. __ATTR_RW(perst_reloads_same_image),
  154. __ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
  155. };
  156. /********* AFU master specific attributes **********************************/
  157. static ssize_t mmio_size_show_master(struct device *device,
  158. struct device_attribute *attr,
  159. char *buf)
  160. {
  161. struct cxl_afu *afu = to_afu_chardev_m(device);
  162. return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
  163. }
  164. static ssize_t pp_mmio_off_show(struct device *device,
  165. struct device_attribute *attr,
  166. char *buf)
  167. {
  168. struct cxl_afu *afu = to_afu_chardev_m(device);
  169. return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
  170. }
  171. static ssize_t pp_mmio_len_show(struct device *device,
  172. struct device_attribute *attr,
  173. char *buf)
  174. {
  175. struct cxl_afu *afu = to_afu_chardev_m(device);
  176. return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
  177. }
  178. static struct device_attribute afu_master_attrs[] = {
  179. __ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL),
  180. __ATTR_RO(pp_mmio_off),
  181. __ATTR_RO(pp_mmio_len),
  182. };
  183. /********* AFU attributes **************************************************/
  184. static ssize_t mmio_size_show(struct device *device,
  185. struct device_attribute *attr,
  186. char *buf)
  187. {
  188. struct cxl_afu *afu = to_cxl_afu(device);
  189. if (afu->pp_size)
  190. return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
  191. return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
  192. }
  193. static ssize_t reset_store_afu(struct device *device,
  194. struct device_attribute *attr,
  195. const char *buf, size_t count)
  196. {
  197. struct cxl_afu *afu = to_cxl_afu(device);
  198. int rc;
  199. /* Not safe to reset if it is currently in use */
  200. mutex_lock(&afu->contexts_lock);
  201. if (!idr_is_empty(&afu->contexts_idr)) {
  202. rc = -EBUSY;
  203. goto err;
  204. }
  205. if ((rc = cxl_ops->afu_reset(afu)))
  206. goto err;
  207. rc = count;
  208. err:
  209. mutex_unlock(&afu->contexts_lock);
  210. return rc;
  211. }
  212. static ssize_t irqs_min_show(struct device *device,
  213. struct device_attribute *attr,
  214. char *buf)
  215. {
  216. struct cxl_afu *afu = to_cxl_afu(device);
  217. return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs);
  218. }
  219. static ssize_t irqs_max_show(struct device *device,
  220. struct device_attribute *attr,
  221. char *buf)
  222. {
  223. struct cxl_afu *afu = to_cxl_afu(device);
  224. return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max);
  225. }
  226. static ssize_t irqs_max_store(struct device *device,
  227. struct device_attribute *attr,
  228. const char *buf, size_t count)
  229. {
  230. struct cxl_afu *afu = to_cxl_afu(device);
  231. ssize_t ret;
  232. int irqs_max;
  233. ret = sscanf(buf, "%i", &irqs_max);
  234. if (ret != 1)
  235. return -EINVAL;
  236. if (irqs_max < afu->pp_irqs)
  237. return -EINVAL;
  238. if (cpu_has_feature(CPU_FTR_HVMODE)) {
  239. if (irqs_max > afu->adapter->user_irqs)
  240. return -EINVAL;
  241. } else {
  242. /* pHyp sets a per-AFU limit */
  243. if (irqs_max > afu->guest->max_ints)
  244. return -EINVAL;
  245. }
  246. afu->irqs_max = irqs_max;
  247. return count;
  248. }
  249. static ssize_t modes_supported_show(struct device *device,
  250. struct device_attribute *attr, char *buf)
  251. {
  252. struct cxl_afu *afu = to_cxl_afu(device);
  253. char *p = buf, *end = buf + PAGE_SIZE;
  254. if (afu->modes_supported & CXL_MODE_DEDICATED)
  255. p += scnprintf(p, end - p, "dedicated_process\n");
  256. if (afu->modes_supported & CXL_MODE_DIRECTED)
  257. p += scnprintf(p, end - p, "afu_directed\n");
  258. return (p - buf);
  259. }
  260. static ssize_t prefault_mode_show(struct device *device,
  261. struct device_attribute *attr,
  262. char *buf)
  263. {
  264. struct cxl_afu *afu = to_cxl_afu(device);
  265. switch (afu->prefault_mode) {
  266. case CXL_PREFAULT_WED:
  267. return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n");
  268. case CXL_PREFAULT_ALL:
  269. return scnprintf(buf, PAGE_SIZE, "all\n");
  270. default:
  271. return scnprintf(buf, PAGE_SIZE, "none\n");
  272. }
  273. }
  274. static ssize_t prefault_mode_store(struct device *device,
  275. struct device_attribute *attr,
  276. const char *buf, size_t count)
  277. {
  278. struct cxl_afu *afu = to_cxl_afu(device);
  279. enum prefault_modes mode = -1;
  280. if (!strncmp(buf, "work_element_descriptor", 23))
  281. mode = CXL_PREFAULT_WED;
  282. if (!strncmp(buf, "all", 3))
  283. mode = CXL_PREFAULT_ALL;
  284. if (!strncmp(buf, "none", 4))
  285. mode = CXL_PREFAULT_NONE;
  286. if (mode == -1)
  287. return -EINVAL;
  288. afu->prefault_mode = mode;
  289. return count;
  290. }
  291. static ssize_t mode_show(struct device *device,
  292. struct device_attribute *attr,
  293. char *buf)
  294. {
  295. struct cxl_afu *afu = to_cxl_afu(device);
  296. if (afu->current_mode == CXL_MODE_DEDICATED)
  297. return scnprintf(buf, PAGE_SIZE, "dedicated_process\n");
  298. if (afu->current_mode == CXL_MODE_DIRECTED)
  299. return scnprintf(buf, PAGE_SIZE, "afu_directed\n");
  300. return scnprintf(buf, PAGE_SIZE, "none\n");
  301. }
  302. static ssize_t mode_store(struct device *device, struct device_attribute *attr,
  303. const char *buf, size_t count)
  304. {
  305. struct cxl_afu *afu = to_cxl_afu(device);
  306. int old_mode, mode = -1;
  307. int rc = -EBUSY;
  308. /* can't change this if we have a user */
  309. mutex_lock(&afu->contexts_lock);
  310. if (!idr_is_empty(&afu->contexts_idr))
  311. goto err;
  312. if (!strncmp(buf, "dedicated_process", 17))
  313. mode = CXL_MODE_DEDICATED;
  314. if (!strncmp(buf, "afu_directed", 12))
  315. mode = CXL_MODE_DIRECTED;
  316. if (!strncmp(buf, "none", 4))
  317. mode = 0;
  318. if (mode == -1) {
  319. rc = -EINVAL;
  320. goto err;
  321. }
  322. /*
  323. * afu_deactivate_mode needs to be done outside the lock, prevent
  324. * other contexts coming in before we are ready:
  325. */
  326. old_mode = afu->current_mode;
  327. afu->current_mode = 0;
  328. afu->num_procs = 0;
  329. mutex_unlock(&afu->contexts_lock);
  330. if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode)))
  331. return rc;
  332. if ((rc = cxl_ops->afu_activate_mode(afu, mode)))
  333. return rc;
  334. return count;
  335. err:
  336. mutex_unlock(&afu->contexts_lock);
  337. return rc;
  338. }
  339. static ssize_t api_version_show(struct device *device,
  340. struct device_attribute *attr,
  341. char *buf)
  342. {
  343. return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION);
  344. }
  345. static ssize_t api_version_compatible_show(struct device *device,
  346. struct device_attribute *attr,
  347. char *buf)
  348. {
  349. return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
  350. }
  351. static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
  352. struct bin_attribute *bin_attr, char *buf,
  353. loff_t off, size_t count)
  354. {
  355. struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
  356. return cxl_ops->afu_read_err_buffer(afu, buf, off, count);
  357. }
  358. static struct device_attribute afu_attrs[] = {
  359. __ATTR_RO(mmio_size),
  360. __ATTR_RO(irqs_min),
  361. __ATTR_RW(irqs_max),
  362. __ATTR_RO(modes_supported),
  363. __ATTR_RW(mode),
  364. __ATTR_RW(prefault_mode),
  365. __ATTR_RO(api_version),
  366. __ATTR_RO(api_version_compatible),
  367. __ATTR(reset, S_IWUSR, NULL, reset_store_afu),
  368. };
  369. int cxl_sysfs_adapter_add(struct cxl *adapter)
  370. {
  371. struct device_attribute *dev_attr;
  372. int i, rc;
  373. for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
  374. dev_attr = &adapter_attrs[i];
  375. if (cxl_ops->support_attributes(dev_attr->attr.name,
  376. CXL_ADAPTER_ATTRS)) {
  377. if ((rc = device_create_file(&adapter->dev, dev_attr)))
  378. goto err;
  379. }
  380. }
  381. return 0;
  382. err:
  383. for (i--; i >= 0; i--) {
  384. dev_attr = &adapter_attrs[i];
  385. if (cxl_ops->support_attributes(dev_attr->attr.name,
  386. CXL_ADAPTER_ATTRS))
  387. device_remove_file(&adapter->dev, dev_attr);
  388. }
  389. return rc;
  390. }
  391. void cxl_sysfs_adapter_remove(struct cxl *adapter)
  392. {
  393. struct device_attribute *dev_attr;
  394. int i;
  395. for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
  396. dev_attr = &adapter_attrs[i];
  397. if (cxl_ops->support_attributes(dev_attr->attr.name,
  398. CXL_ADAPTER_ATTRS))
  399. device_remove_file(&adapter->dev, dev_attr);
  400. }
  401. }
  402. struct afu_config_record {
  403. struct kobject kobj;
  404. struct bin_attribute config_attr;
  405. struct list_head list;
  406. int cr;
  407. u16 device;
  408. u16 vendor;
  409. u32 class;
  410. };
  411. #define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
  412. static ssize_t vendor_show(struct kobject *kobj,
  413. struct kobj_attribute *attr, char *buf)
  414. {
  415. struct afu_config_record *cr = to_cr(kobj);
  416. return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor);
  417. }
  418. static ssize_t device_show(struct kobject *kobj,
  419. struct kobj_attribute *attr, char *buf)
  420. {
  421. struct afu_config_record *cr = to_cr(kobj);
  422. return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device);
  423. }
  424. static ssize_t class_show(struct kobject *kobj,
  425. struct kobj_attribute *attr, char *buf)
  426. {
  427. struct afu_config_record *cr = to_cr(kobj);
  428. return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class);
  429. }
  430. static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
  431. struct bin_attribute *bin_attr, char *buf,
  432. loff_t off, size_t count)
  433. {
  434. struct afu_config_record *cr = to_cr(kobj);
  435. struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
  436. u64 i, j, val, rc;
  437. for (i = 0; i < count;) {
  438. rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val);
  439. if (rc)
  440. val = ~0ULL;
  441. for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
  442. buf[i] = (val >> (j * 8)) & 0xff;
  443. }
  444. return count;
  445. }
  446. static struct kobj_attribute vendor_attribute =
  447. __ATTR_RO(vendor);
  448. static struct kobj_attribute device_attribute =
  449. __ATTR_RO(device);
  450. static struct kobj_attribute class_attribute =
  451. __ATTR_RO(class);
  452. static struct attribute *afu_cr_attrs[] = {
  453. &vendor_attribute.attr,
  454. &device_attribute.attr,
  455. &class_attribute.attr,
  456. NULL,
  457. };
  458. static void release_afu_config_record(struct kobject *kobj)
  459. {
  460. struct afu_config_record *cr = to_cr(kobj);
  461. kfree(cr);
  462. }
  463. static struct kobj_type afu_config_record_type = {
  464. .sysfs_ops = &kobj_sysfs_ops,
  465. .release = release_afu_config_record,
  466. .default_attrs = afu_cr_attrs,
  467. };
  468. static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
  469. {
  470. struct afu_config_record *cr;
  471. int rc;
  472. cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL);
  473. if (!cr)
  474. return ERR_PTR(-ENOMEM);
  475. cr->cr = cr_idx;
  476. rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device);
  477. if (rc)
  478. goto err;
  479. rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor);
  480. if (rc)
  481. goto err;
  482. rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class);
  483. if (rc)
  484. goto err;
  485. cr->class >>= 8;
  486. /*
  487. * Export raw AFU PCIe like config record. For now this is read only by
  488. * root - we can expand that later to be readable by non-root and maybe
  489. * even writable provided we have a good use-case. Once we support
  490. * exposing AFUs through a virtual PHB they will get that for free from
  491. * Linux' PCI infrastructure, but until then it's not clear that we
  492. * need it for anything since the main use case is just identifying
  493. * AFUs, which can be done via the vendor, device and class attributes.
  494. */
  495. sysfs_bin_attr_init(&cr->config_attr);
  496. cr->config_attr.attr.name = "config";
  497. cr->config_attr.attr.mode = S_IRUSR;
  498. cr->config_attr.size = afu->crs_len;
  499. cr->config_attr.read = afu_read_config;
  500. rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
  501. &afu->dev.kobj, "cr%i", cr->cr);
  502. if (rc)
  503. goto err;
  504. rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
  505. if (rc)
  506. goto err1;
  507. rc = kobject_uevent(&cr->kobj, KOBJ_ADD);
  508. if (rc)
  509. goto err2;
  510. return cr;
  511. err2:
  512. sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
  513. err1:
  514. kobject_put(&cr->kobj);
  515. return ERR_PTR(rc);
  516. err:
  517. kfree(cr);
  518. return ERR_PTR(rc);
  519. }
  520. void cxl_sysfs_afu_remove(struct cxl_afu *afu)
  521. {
  522. struct device_attribute *dev_attr;
  523. struct afu_config_record *cr, *tmp;
  524. int i;
  525. /* remove the err buffer bin attribute */
  526. if (afu->eb_len)
  527. device_remove_bin_file(&afu->dev, &afu->attr_eb);
  528. for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
  529. dev_attr = &afu_attrs[i];
  530. if (cxl_ops->support_attributes(dev_attr->attr.name,
  531. CXL_AFU_ATTRS))
  532. device_remove_file(&afu->dev, &afu_attrs[i]);
  533. }
  534. list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
  535. sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
  536. kobject_put(&cr->kobj);
  537. }
  538. }
  539. int cxl_sysfs_afu_add(struct cxl_afu *afu)
  540. {
  541. struct device_attribute *dev_attr;
  542. struct afu_config_record *cr;
  543. int i, rc;
  544. INIT_LIST_HEAD(&afu->crs);
  545. for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
  546. dev_attr = &afu_attrs[i];
  547. if (cxl_ops->support_attributes(dev_attr->attr.name,
  548. CXL_AFU_ATTRS)) {
  549. if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
  550. goto err;
  551. }
  552. }
  553. /* conditionally create the add the binary file for error info buffer */
  554. if (afu->eb_len) {
  555. sysfs_attr_init(&afu->attr_eb.attr);
  556. afu->attr_eb.attr.name = "afu_err_buff";
  557. afu->attr_eb.attr.mode = S_IRUGO;
  558. afu->attr_eb.size = afu->eb_len;
  559. afu->attr_eb.read = afu_eb_read;
  560. rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
  561. if (rc) {
  562. dev_err(&afu->dev,
  563. "Unable to create eb attr for the afu. Err(%d)\n",
  564. rc);
  565. goto err;
  566. }
  567. }
  568. for (i = 0; i < afu->crs_num; i++) {
  569. cr = cxl_sysfs_afu_new_cr(afu, i);
  570. if (IS_ERR(cr)) {
  571. rc = PTR_ERR(cr);
  572. goto err1;
  573. }
  574. list_add(&cr->list, &afu->crs);
  575. }
  576. return 0;
  577. err1:
  578. cxl_sysfs_afu_remove(afu);
  579. return rc;
  580. err:
  581. /* reset the eb_len as we havent created the bin attr */
  582. afu->eb_len = 0;
  583. for (i--; i >= 0; i--) {
  584. dev_attr = &afu_attrs[i];
  585. if (cxl_ops->support_attributes(dev_attr->attr.name,
  586. CXL_AFU_ATTRS))
  587. device_remove_file(&afu->dev, &afu_attrs[i]);
  588. }
  589. return rc;
  590. }
  591. int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
  592. {
  593. struct device_attribute *dev_attr;
  594. int i, rc;
  595. for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
  596. dev_attr = &afu_master_attrs[i];
  597. if (cxl_ops->support_attributes(dev_attr->attr.name,
  598. CXL_AFU_MASTER_ATTRS)) {
  599. if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
  600. goto err;
  601. }
  602. }
  603. return 0;
  604. err:
  605. for (i--; i >= 0; i--) {
  606. dev_attr = &afu_master_attrs[i];
  607. if (cxl_ops->support_attributes(dev_attr->attr.name,
  608. CXL_AFU_MASTER_ATTRS))
  609. device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
  610. }
  611. return rc;
  612. }
  613. void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
  614. {
  615. struct device_attribute *dev_attr;
  616. int i;
  617. for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
  618. dev_attr = &afu_master_attrs[i];
  619. if (cxl_ops->support_attributes(dev_attr->attr.name,
  620. CXL_AFU_MASTER_ATTRS))
  621. device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
  622. }
  623. }