qib_sysfs.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852
  1. /*
  2. * Copyright (c) 2012 Intel Corporation. All rights reserved.
  3. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
  4. * Copyright (c) 2006 PathScale, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/ctype.h>
  35. #include "qib.h"
  36. #include "qib_mad.h"
  37. /* start of per-port functions */
  38. /*
  39. * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
  40. */
  41. static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
  42. {
  43. struct qib_devdata *dd = ppd->dd;
  44. int ret;
  45. ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
  46. ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
  47. return ret;
  48. }
  49. static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
  50. size_t count)
  51. {
  52. struct qib_devdata *dd = ppd->dd;
  53. int ret;
  54. u16 val;
  55. ret = kstrtou16(buf, 0, &val);
  56. if (ret) {
  57. qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
  58. return ret;
  59. }
  60. /*
  61. * Set the "intentional" heartbeat enable per either of
  62. * "Enable" and "Auto", as these are normally set together.
  63. * This bit is consulted when leaving loopback mode,
  64. * because entering loopback mode overrides it and automatically
  65. * disables heartbeat.
  66. */
  67. ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
  68. return ret < 0 ? ret : count;
  69. }
  70. static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
  71. size_t count)
  72. {
  73. struct qib_devdata *dd = ppd->dd;
  74. int ret = count, r;
  75. r = dd->f_set_ib_loopback(ppd, buf);
  76. if (r < 0)
  77. ret = r;
  78. return ret;
  79. }
  80. static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
  81. size_t count)
  82. {
  83. struct qib_devdata *dd = ppd->dd;
  84. int ret;
  85. u16 val;
  86. ret = kstrtou16(buf, 0, &val);
  87. if (ret) {
  88. qib_dev_err(dd, "attempt to set invalid LED override\n");
  89. return ret;
  90. }
  91. qib_set_led_override(ppd, val);
  92. return count;
  93. }
  94. static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
  95. {
  96. ssize_t ret;
  97. if (!ppd->statusp)
  98. ret = -EINVAL;
  99. else
  100. ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
  101. (unsigned long long) *(ppd->statusp));
  102. return ret;
  103. }
  104. /*
  105. * For userland compatibility, these offsets must remain fixed.
  106. * They are strings for QIB_STATUS_*
  107. */
  108. static const char * const qib_status_str[] = {
  109. "Initted",
  110. "",
  111. "",
  112. "",
  113. "",
  114. "Present",
  115. "IB_link_up",
  116. "IB_configured",
  117. "",
  118. "Fatal_Hardware_Error",
  119. NULL,
  120. };
  121. static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
  122. {
  123. int i, any;
  124. u64 s;
  125. ssize_t ret;
  126. if (!ppd->statusp) {
  127. ret = -EINVAL;
  128. goto bail;
  129. }
  130. s = *(ppd->statusp);
  131. *buf = '\0';
  132. for (any = i = 0; s && qib_status_str[i]; i++) {
  133. if (s & 1) {
  134. /* if overflow */
  135. if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
  136. break;
  137. if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
  138. PAGE_SIZE)
  139. break;
  140. any = 1;
  141. }
  142. s >>= 1;
  143. }
  144. if (any)
  145. strlcat(buf, "\n", PAGE_SIZE);
  146. ret = strlen(buf);
  147. bail:
  148. return ret;
  149. }
  150. /* end of per-port functions */
  151. /*
  152. * Start of per-port file structures and support code
  153. * Because we are fitting into other infrastructure, we have to supply the
  154. * full set of kobject/sysfs_ops structures and routines.
  155. */
  156. #define QIB_PORT_ATTR(name, mode, show, store) \
  157. static struct qib_port_attr qib_port_attr_##name = \
  158. __ATTR(name, mode, show, store)
  159. struct qib_port_attr {
  160. struct attribute attr;
  161. ssize_t (*show)(struct qib_pportdata *, char *);
  162. ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
  163. };
  164. QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
  165. QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
  166. QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
  167. store_hrtbt_enb);
  168. QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
  169. QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
  170. static struct attribute *port_default_attributes[] = {
  171. &qib_port_attr_loopback.attr,
  172. &qib_port_attr_led_override.attr,
  173. &qib_port_attr_hrtbt_enable.attr,
  174. &qib_port_attr_status.attr,
  175. &qib_port_attr_status_str.attr,
  176. NULL
  177. };
  178. /*
  179. * Start of per-port congestion control structures and support code
  180. */
  181. /*
  182. * Congestion control table size followed by table entries
  183. */
  184. static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
  185. struct bin_attribute *bin_attr,
  186. char *buf, loff_t pos, size_t count)
  187. {
  188. int ret;
  189. struct qib_pportdata *ppd =
  190. container_of(kobj, struct qib_pportdata, pport_cc_kobj);
  191. if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
  192. return -EINVAL;
  193. ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
  194. + sizeof(__be16);
  195. if (pos > ret)
  196. return -EINVAL;
  197. if (count > ret - pos)
  198. count = ret - pos;
  199. if (!count)
  200. return count;
  201. spin_lock(&ppd->cc_shadow_lock);
  202. memcpy(buf, ppd->ccti_entries_shadow, count);
  203. spin_unlock(&ppd->cc_shadow_lock);
  204. return count;
  205. }
  206. static void qib_port_release(struct kobject *kobj)
  207. {
  208. /* nothing to do since memory is freed by qib_free_devdata() */
  209. }
  210. static struct kobj_type qib_port_cc_ktype = {
  211. .release = qib_port_release,
  212. };
  213. static const struct bin_attribute cc_table_bin_attr = {
  214. .attr = {.name = "cc_table_bin", .mode = 0444},
  215. .read = read_cc_table_bin,
  216. .size = PAGE_SIZE,
  217. };
  218. /*
  219. * Congestion settings: port control, control map and an array of 16
  220. * entries for the congestion entries - increase, timer, event log
  221. * trigger threshold and the minimum injection rate delay.
  222. */
  223. static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
  224. struct bin_attribute *bin_attr,
  225. char *buf, loff_t pos, size_t count)
  226. {
  227. int ret;
  228. struct qib_pportdata *ppd =
  229. container_of(kobj, struct qib_pportdata, pport_cc_kobj);
  230. if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
  231. return -EINVAL;
  232. ret = sizeof(struct ib_cc_congestion_setting_attr_shadow);
  233. if (pos > ret)
  234. return -EINVAL;
  235. if (count > ret - pos)
  236. count = ret - pos;
  237. if (!count)
  238. return count;
  239. spin_lock(&ppd->cc_shadow_lock);
  240. memcpy(buf, ppd->congestion_entries_shadow, count);
  241. spin_unlock(&ppd->cc_shadow_lock);
  242. return count;
  243. }
  244. static const struct bin_attribute cc_setting_bin_attr = {
  245. .attr = {.name = "cc_settings_bin", .mode = 0444},
  246. .read = read_cc_setting_bin,
  247. .size = PAGE_SIZE,
  248. };
  249. static ssize_t qib_portattr_show(struct kobject *kobj,
  250. struct attribute *attr, char *buf)
  251. {
  252. struct qib_port_attr *pattr =
  253. container_of(attr, struct qib_port_attr, attr);
  254. struct qib_pportdata *ppd =
  255. container_of(kobj, struct qib_pportdata, pport_kobj);
  256. return pattr->show(ppd, buf);
  257. }
  258. static ssize_t qib_portattr_store(struct kobject *kobj,
  259. struct attribute *attr, const char *buf, size_t len)
  260. {
  261. struct qib_port_attr *pattr =
  262. container_of(attr, struct qib_port_attr, attr);
  263. struct qib_pportdata *ppd =
  264. container_of(kobj, struct qib_pportdata, pport_kobj);
  265. return pattr->store(ppd, buf, len);
  266. }
  267. static const struct sysfs_ops qib_port_ops = {
  268. .show = qib_portattr_show,
  269. .store = qib_portattr_store,
  270. };
  271. static struct kobj_type qib_port_ktype = {
  272. .release = qib_port_release,
  273. .sysfs_ops = &qib_port_ops,
  274. .default_attrs = port_default_attributes
  275. };
  276. /* Start sl2vl */
  277. #define QIB_SL2VL_ATTR(N) \
  278. static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
  279. .attr = { .name = __stringify(N), .mode = 0444 }, \
  280. .sl = N \
  281. }
  282. struct qib_sl2vl_attr {
  283. struct attribute attr;
  284. int sl;
  285. };
  286. QIB_SL2VL_ATTR(0);
  287. QIB_SL2VL_ATTR(1);
  288. QIB_SL2VL_ATTR(2);
  289. QIB_SL2VL_ATTR(3);
  290. QIB_SL2VL_ATTR(4);
  291. QIB_SL2VL_ATTR(5);
  292. QIB_SL2VL_ATTR(6);
  293. QIB_SL2VL_ATTR(7);
  294. QIB_SL2VL_ATTR(8);
  295. QIB_SL2VL_ATTR(9);
  296. QIB_SL2VL_ATTR(10);
  297. QIB_SL2VL_ATTR(11);
  298. QIB_SL2VL_ATTR(12);
  299. QIB_SL2VL_ATTR(13);
  300. QIB_SL2VL_ATTR(14);
  301. QIB_SL2VL_ATTR(15);
  302. static struct attribute *sl2vl_default_attributes[] = {
  303. &qib_sl2vl_attr_0.attr,
  304. &qib_sl2vl_attr_1.attr,
  305. &qib_sl2vl_attr_2.attr,
  306. &qib_sl2vl_attr_3.attr,
  307. &qib_sl2vl_attr_4.attr,
  308. &qib_sl2vl_attr_5.attr,
  309. &qib_sl2vl_attr_6.attr,
  310. &qib_sl2vl_attr_7.attr,
  311. &qib_sl2vl_attr_8.attr,
  312. &qib_sl2vl_attr_9.attr,
  313. &qib_sl2vl_attr_10.attr,
  314. &qib_sl2vl_attr_11.attr,
  315. &qib_sl2vl_attr_12.attr,
  316. &qib_sl2vl_attr_13.attr,
  317. &qib_sl2vl_attr_14.attr,
  318. &qib_sl2vl_attr_15.attr,
  319. NULL
  320. };
  321. static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
  322. char *buf)
  323. {
  324. struct qib_sl2vl_attr *sattr =
  325. container_of(attr, struct qib_sl2vl_attr, attr);
  326. struct qib_pportdata *ppd =
  327. container_of(kobj, struct qib_pportdata, sl2vl_kobj);
  328. struct qib_ibport *qibp = &ppd->ibport_data;
  329. return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
  330. }
  331. static const struct sysfs_ops qib_sl2vl_ops = {
  332. .show = sl2vl_attr_show,
  333. };
  334. static struct kobj_type qib_sl2vl_ktype = {
  335. .release = qib_port_release,
  336. .sysfs_ops = &qib_sl2vl_ops,
  337. .default_attrs = sl2vl_default_attributes
  338. };
  339. /* End sl2vl */
  340. /* Start diag_counters */
  341. #define QIB_DIAGC_ATTR(N) \
  342. static struct qib_diagc_attr qib_diagc_attr_##N = { \
  343. .attr = { .name = __stringify(N), .mode = 0664 }, \
  344. .counter = offsetof(struct qib_ibport, rvp.n_##N) \
  345. }
  346. #define QIB_DIAGC_ATTR_PER_CPU(N) \
  347. static struct qib_diagc_attr qib_diagc_attr_##N = { \
  348. .attr = { .name = __stringify(N), .mode = 0664 }, \
  349. .counter = offsetof(struct qib_ibport, rvp.z_##N) \
  350. }
  351. struct qib_diagc_attr {
  352. struct attribute attr;
  353. size_t counter;
  354. };
  355. QIB_DIAGC_ATTR_PER_CPU(rc_acks);
  356. QIB_DIAGC_ATTR_PER_CPU(rc_qacks);
  357. QIB_DIAGC_ATTR_PER_CPU(rc_delayed_comp);
  358. QIB_DIAGC_ATTR(rc_resends);
  359. QIB_DIAGC_ATTR(seq_naks);
  360. QIB_DIAGC_ATTR(rdma_seq);
  361. QIB_DIAGC_ATTR(rnr_naks);
  362. QIB_DIAGC_ATTR(other_naks);
  363. QIB_DIAGC_ATTR(rc_timeouts);
  364. QIB_DIAGC_ATTR(loop_pkts);
  365. QIB_DIAGC_ATTR(pkt_drops);
  366. QIB_DIAGC_ATTR(dmawait);
  367. QIB_DIAGC_ATTR(unaligned);
  368. QIB_DIAGC_ATTR(rc_dupreq);
  369. QIB_DIAGC_ATTR(rc_seqnak);
  370. static struct attribute *diagc_default_attributes[] = {
  371. &qib_diagc_attr_rc_resends.attr,
  372. &qib_diagc_attr_rc_acks.attr,
  373. &qib_diagc_attr_rc_qacks.attr,
  374. &qib_diagc_attr_rc_delayed_comp.attr,
  375. &qib_diagc_attr_seq_naks.attr,
  376. &qib_diagc_attr_rdma_seq.attr,
  377. &qib_diagc_attr_rnr_naks.attr,
  378. &qib_diagc_attr_other_naks.attr,
  379. &qib_diagc_attr_rc_timeouts.attr,
  380. &qib_diagc_attr_loop_pkts.attr,
  381. &qib_diagc_attr_pkt_drops.attr,
  382. &qib_diagc_attr_dmawait.attr,
  383. &qib_diagc_attr_unaligned.attr,
  384. &qib_diagc_attr_rc_dupreq.attr,
  385. &qib_diagc_attr_rc_seqnak.attr,
  386. NULL
  387. };
  388. static u64 get_all_cpu_total(u64 __percpu *cntr)
  389. {
  390. int cpu;
  391. u64 counter = 0;
  392. for_each_possible_cpu(cpu)
  393. counter += *per_cpu_ptr(cntr, cpu);
  394. return counter;
  395. }
  396. #define def_write_per_cpu(cntr) \
  397. static void write_per_cpu_##cntr(struct qib_pportdata *ppd, u32 data) \
  398. { \
  399. struct qib_devdata *dd = ppd->dd; \
  400. struct qib_ibport *qibp = &ppd->ibport_data; \
  401. /* A write can only zero the counter */ \
  402. if (data == 0) \
  403. qibp->rvp.z_##cntr = get_all_cpu_total(qibp->rvp.cntr); \
  404. else \
  405. qib_dev_err(dd, "Per CPU cntrs can only be zeroed"); \
  406. }
  407. def_write_per_cpu(rc_acks)
  408. def_write_per_cpu(rc_qacks)
  409. def_write_per_cpu(rc_delayed_comp)
  410. #define READ_PER_CPU_CNTR(cntr) (get_all_cpu_total(qibp->rvp.cntr) - \
  411. qibp->rvp.z_##cntr)
  412. static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
  413. char *buf)
  414. {
  415. struct qib_diagc_attr *dattr =
  416. container_of(attr, struct qib_diagc_attr, attr);
  417. struct qib_pportdata *ppd =
  418. container_of(kobj, struct qib_pportdata, diagc_kobj);
  419. struct qib_ibport *qibp = &ppd->ibport_data;
  420. if (!strncmp(dattr->attr.name, "rc_acks", 7))
  421. return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_acks));
  422. else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
  423. return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_qacks));
  424. else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
  425. return sprintf(buf, "%llu\n",
  426. READ_PER_CPU_CNTR(rc_delayed_comp));
  427. else
  428. return sprintf(buf, "%u\n",
  429. *(u32 *)((char *)qibp + dattr->counter));
  430. }
  431. static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
  432. const char *buf, size_t size)
  433. {
  434. struct qib_diagc_attr *dattr =
  435. container_of(attr, struct qib_diagc_attr, attr);
  436. struct qib_pportdata *ppd =
  437. container_of(kobj, struct qib_pportdata, diagc_kobj);
  438. struct qib_ibport *qibp = &ppd->ibport_data;
  439. u32 val;
  440. int ret;
  441. ret = kstrtou32(buf, 0, &val);
  442. if (ret)
  443. return ret;
  444. if (!strncmp(dattr->attr.name, "rc_acks", 7))
  445. write_per_cpu_rc_acks(ppd, val);
  446. else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
  447. write_per_cpu_rc_qacks(ppd, val);
  448. else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
  449. write_per_cpu_rc_delayed_comp(ppd, val);
  450. else
  451. *(u32 *)((char *)qibp + dattr->counter) = val;
  452. return size;
  453. }
  454. static const struct sysfs_ops qib_diagc_ops = {
  455. .show = diagc_attr_show,
  456. .store = diagc_attr_store,
  457. };
  458. static struct kobj_type qib_diagc_ktype = {
  459. .release = qib_port_release,
  460. .sysfs_ops = &qib_diagc_ops,
  461. .default_attrs = diagc_default_attributes
  462. };
  463. /* End diag_counters */
  464. /* end of per-port file structures and support code */
  465. /*
  466. * Start of per-unit (or driver, in some cases, but replicated
  467. * per unit) functions (these get a device *)
  468. */
  469. static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
  470. char *buf)
  471. {
  472. struct qib_ibdev *dev =
  473. container_of(device, struct qib_ibdev, rdi.ibdev.dev);
  474. return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
  475. }
  476. static DEVICE_ATTR_RO(hw_rev);
  477. static ssize_t hca_type_show(struct device *device,
  478. struct device_attribute *attr, char *buf)
  479. {
  480. struct qib_ibdev *dev =
  481. container_of(device, struct qib_ibdev, rdi.ibdev.dev);
  482. struct qib_devdata *dd = dd_from_dev(dev);
  483. int ret;
  484. if (!dd->boardname)
  485. ret = -EINVAL;
  486. else
  487. ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
  488. return ret;
  489. }
  490. static DEVICE_ATTR_RO(hca_type);
  491. static DEVICE_ATTR(board_id, 0444, hca_type_show, NULL);
  492. static ssize_t version_show(struct device *device,
  493. struct device_attribute *attr, char *buf)
  494. {
  495. /* The string printed here is already newline-terminated. */
  496. return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
  497. }
  498. static DEVICE_ATTR_RO(version);
  499. static ssize_t boardversion_show(struct device *device,
  500. struct device_attribute *attr, char *buf)
  501. {
  502. struct qib_ibdev *dev =
  503. container_of(device, struct qib_ibdev, rdi.ibdev.dev);
  504. struct qib_devdata *dd = dd_from_dev(dev);
  505. /* The string printed here is already newline-terminated. */
  506. return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
  507. }
  508. static DEVICE_ATTR_RO(boardversion);
  509. static ssize_t localbus_info_show(struct device *device,
  510. struct device_attribute *attr, char *buf)
  511. {
  512. struct qib_ibdev *dev =
  513. container_of(device, struct qib_ibdev, rdi.ibdev.dev);
  514. struct qib_devdata *dd = dd_from_dev(dev);
  515. /* The string printed here is already newline-terminated. */
  516. return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
  517. }
  518. static DEVICE_ATTR_RO(localbus_info);
  519. static ssize_t nctxts_show(struct device *device,
  520. struct device_attribute *attr, char *buf)
  521. {
  522. struct qib_ibdev *dev =
  523. container_of(device, struct qib_ibdev, rdi.ibdev.dev);
  524. struct qib_devdata *dd = dd_from_dev(dev);
  525. /* Return the number of user ports (contexts) available. */
  526. /* The calculation below deals with a special case where
  527. * cfgctxts is set to 1 on a single-port board. */
  528. return scnprintf(buf, PAGE_SIZE, "%u\n",
  529. (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
  530. (dd->cfgctxts - dd->first_user_ctxt));
  531. }
  532. static DEVICE_ATTR_RO(nctxts);
  533. static ssize_t nfreectxts_show(struct device *device,
  534. struct device_attribute *attr, char *buf)
  535. {
  536. struct qib_ibdev *dev =
  537. container_of(device, struct qib_ibdev, rdi.ibdev.dev);
  538. struct qib_devdata *dd = dd_from_dev(dev);
  539. /* Return the number of free user ports (contexts) available. */
  540. return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
  541. }
  542. static DEVICE_ATTR_RO(nfreectxts);
  543. static ssize_t serial_show(struct device *device,
  544. struct device_attribute *attr, char *buf)
  545. {
  546. struct qib_ibdev *dev =
  547. container_of(device, struct qib_ibdev, rdi.ibdev.dev);
  548. struct qib_devdata *dd = dd_from_dev(dev);
  549. buf[sizeof(dd->serial)] = '\0';
  550. memcpy(buf, dd->serial, sizeof(dd->serial));
  551. strcat(buf, "\n");
  552. return strlen(buf);
  553. }
  554. static DEVICE_ATTR_RO(serial);
  555. static ssize_t chip_reset_store(struct device *device,
  556. struct device_attribute *attr, const char *buf,
  557. size_t count)
  558. {
  559. struct qib_ibdev *dev =
  560. container_of(device, struct qib_ibdev, rdi.ibdev.dev);
  561. struct qib_devdata *dd = dd_from_dev(dev);
  562. int ret;
  563. if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
  564. ret = -EINVAL;
  565. goto bail;
  566. }
  567. ret = qib_reset_device(dd->unit);
  568. bail:
  569. return ret < 0 ? ret : count;
  570. }
  571. static DEVICE_ATTR_WO(chip_reset);
  572. /*
  573. * Dump tempsense regs. in decimal, to ease shell-scripts.
  574. */
  575. static ssize_t tempsense_show(struct device *device,
  576. struct device_attribute *attr, char *buf)
  577. {
  578. struct qib_ibdev *dev =
  579. container_of(device, struct qib_ibdev, rdi.ibdev.dev);
  580. struct qib_devdata *dd = dd_from_dev(dev);
  581. int ret;
  582. int idx;
  583. u8 regvals[8];
  584. ret = -ENXIO;
  585. for (idx = 0; idx < 8; ++idx) {
  586. if (idx == 6)
  587. continue;
  588. ret = dd->f_tempsense_rd(dd, idx);
  589. if (ret < 0)
  590. break;
  591. regvals[idx] = ret;
  592. }
  593. if (idx == 8)
  594. ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
  595. *(signed char *)(regvals),
  596. *(signed char *)(regvals + 1),
  597. regvals[2], regvals[3],
  598. *(signed char *)(regvals + 5),
  599. *(signed char *)(regvals + 7));
  600. return ret;
  601. }
  602. static DEVICE_ATTR_RO(tempsense);
  603. /*
  604. * end of per-unit (or driver, in some cases, but replicated
  605. * per unit) functions
  606. */
  607. /* start of per-unit file structures and support code */
  608. static struct attribute *qib_attributes[] = {
  609. &dev_attr_hw_rev.attr,
  610. &dev_attr_hca_type.attr,
  611. &dev_attr_board_id.attr,
  612. &dev_attr_version.attr,
  613. &dev_attr_nctxts.attr,
  614. &dev_attr_nfreectxts.attr,
  615. &dev_attr_serial.attr,
  616. &dev_attr_boardversion.attr,
  617. &dev_attr_tempsense.attr,
  618. &dev_attr_localbus_info.attr,
  619. &dev_attr_chip_reset.attr,
  620. NULL,
  621. };
  622. const struct attribute_group qib_attr_group = {
  623. .attrs = qib_attributes,
  624. };
  625. int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
  626. struct kobject *kobj)
  627. {
  628. struct qib_pportdata *ppd;
  629. struct qib_devdata *dd = dd_from_ibdev(ibdev);
  630. int ret;
  631. if (!port_num || port_num > dd->num_pports) {
  632. qib_dev_err(dd,
  633. "Skipping infiniband class with invalid port %u\n",
  634. port_num);
  635. ret = -ENODEV;
  636. goto bail;
  637. }
  638. ppd = &dd->pport[port_num - 1];
  639. ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
  640. "linkcontrol");
  641. if (ret) {
  642. qib_dev_err(dd,
  643. "Skipping linkcontrol sysfs info, (err %d) port %u\n",
  644. ret, port_num);
  645. goto bail;
  646. }
  647. kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
  648. ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
  649. "sl2vl");
  650. if (ret) {
  651. qib_dev_err(dd,
  652. "Skipping sl2vl sysfs info, (err %d) port %u\n",
  653. ret, port_num);
  654. goto bail_link;
  655. }
  656. kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
  657. ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
  658. "diag_counters");
  659. if (ret) {
  660. qib_dev_err(dd,
  661. "Skipping diag_counters sysfs info, (err %d) port %u\n",
  662. ret, port_num);
  663. goto bail_sl;
  664. }
  665. kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
  666. if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
  667. return 0;
  668. ret = kobject_init_and_add(&ppd->pport_cc_kobj, &qib_port_cc_ktype,
  669. kobj, "CCMgtA");
  670. if (ret) {
  671. qib_dev_err(dd,
  672. "Skipping Congestion Control sysfs info, (err %d) port %u\n",
  673. ret, port_num);
  674. goto bail_diagc;
  675. }
  676. kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
  677. ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
  678. &cc_setting_bin_attr);
  679. if (ret) {
  680. qib_dev_err(dd,
  681. "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
  682. ret, port_num);
  683. goto bail_cc;
  684. }
  685. ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
  686. &cc_table_bin_attr);
  687. if (ret) {
  688. qib_dev_err(dd,
  689. "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
  690. ret, port_num);
  691. goto bail_cc_entry_bin;
  692. }
  693. qib_devinfo(dd->pcidev,
  694. "IB%u: Congestion Control Agent enabled for port %d\n",
  695. dd->unit, port_num);
  696. return 0;
  697. bail_cc_entry_bin:
  698. sysfs_remove_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
  699. bail_cc:
  700. kobject_put(&ppd->pport_cc_kobj);
  701. bail_diagc:
  702. kobject_put(&ppd->diagc_kobj);
  703. bail_sl:
  704. kobject_put(&ppd->sl2vl_kobj);
  705. bail_link:
  706. kobject_put(&ppd->pport_kobj);
  707. bail:
  708. return ret;
  709. }
  710. /*
  711. * Unregister and remove our files in /sys/class/infiniband.
  712. */
  713. void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
  714. {
  715. struct qib_pportdata *ppd;
  716. int i;
  717. for (i = 0; i < dd->num_pports; i++) {
  718. ppd = &dd->pport[i];
  719. if (qib_cc_table_size &&
  720. ppd->congestion_entries_shadow) {
  721. sysfs_remove_bin_file(&ppd->pport_cc_kobj,
  722. &cc_setting_bin_attr);
  723. sysfs_remove_bin_file(&ppd->pport_cc_kobj,
  724. &cc_table_bin_attr);
  725. kobject_put(&ppd->pport_cc_kobj);
  726. }
  727. kobject_put(&ppd->sl2vl_kobj);
  728. kobject_put(&ppd->pport_kobj);
  729. }
  730. }