mlxreg-hotplug.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. /*
  2. * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2016-2018 Vadim Pasternak <vadimp@mellanox.com>
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. * 3. Neither the names of the copyright holders nor the names of its
  14. * contributors may be used to endorse or promote products derived from
  15. * this software without specific prior written permission.
  16. *
  17. * Alternatively, this software may be distributed under the terms of the
  18. * GNU General Public License ("GPL") version 2 as published by the Free
  19. * Software Foundation.
  20. *
  21. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  22. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  23. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  24. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  25. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  26. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  27. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  28. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  29. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  30. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  31. * POSSIBILITY OF SUCH DAMAGE.
  32. */
  33. #include <linux/bitops.h>
  34. #include <linux/device.h>
  35. #include <linux/hwmon.h>
  36. #include <linux/hwmon-sysfs.h>
  37. #include <linux/i2c.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/module.h>
  40. #include <linux/of_device.h>
  41. #include <linux/platform_data/mlxreg.h>
  42. #include <linux/platform_device.h>
  43. #include <linux/spinlock.h>
  44. #include <linux/regmap.h>
  45. #include <linux/workqueue.h>
  46. /* Offset of event and mask registers from status register. */
  47. #define MLXREG_HOTPLUG_EVENT_OFF 1
  48. #define MLXREG_HOTPLUG_MASK_OFF 2
  49. #define MLXREG_HOTPLUG_AGGR_MASK_OFF 1
  50. /* ASIC good health mask. */
  51. #define MLXREG_HOTPLUG_GOOD_HEALTH_MASK 0x02
  52. #define MLXREG_HOTPLUG_ATTRS_MAX 24
  53. #define MLXREG_HOTPLUG_NOT_ASSERT 3
  54. /**
  55. * struct mlxreg_hotplug_priv_data - platform private data:
  56. * @irq: platform device interrupt number;
  57. * @dev: basic device;
  58. * @pdev: platform device;
  59. * @plat: platform data;
  60. * @regmap: register map handle;
  61. * @dwork_irq: delayed work template;
  62. * @lock: spin lock;
  63. * @hwmon: hwmon device;
  64. * @mlxreg_hotplug_attr: sysfs attributes array;
  65. * @mlxreg_hotplug_dev_attr: sysfs sensor device attribute array;
  66. * @group: sysfs attribute group;
  67. * @groups: list of sysfs attribute group for hwmon registration;
  68. * @cell: location of top aggregation interrupt register;
  69. * @mask: top aggregation interrupt common mask;
  70. * @aggr_cache: last value of aggregation register status;
  71. * @after_probe: flag indication probing completion;
  72. * @not_asserted: number of entries in workqueue with no signal assertion;
  73. */
  74. struct mlxreg_hotplug_priv_data {
  75. int irq;
  76. struct device *dev;
  77. struct platform_device *pdev;
  78. struct mlxreg_hotplug_platform_data *plat;
  79. struct regmap *regmap;
  80. struct delayed_work dwork_irq;
  81. spinlock_t lock; /* sync with interrupt */
  82. struct device *hwmon;
  83. struct attribute *mlxreg_hotplug_attr[MLXREG_HOTPLUG_ATTRS_MAX + 1];
  84. struct sensor_device_attribute_2
  85. mlxreg_hotplug_dev_attr[MLXREG_HOTPLUG_ATTRS_MAX];
  86. struct attribute_group group;
  87. const struct attribute_group *groups[2];
  88. u32 cell;
  89. u32 mask;
  90. u32 aggr_cache;
  91. bool after_probe;
  92. u8 not_asserted;
  93. };
  94. static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
  95. struct mlxreg_core_data *data)
  96. {
  97. struct mlxreg_core_hotplug_platform_data *pdata;
  98. /* Notify user by sending hwmon uevent. */
  99. kobject_uevent(&priv->hwmon->kobj, KOBJ_CHANGE);
  100. /*
  101. * Return if adapter number is negative. It could be in case hotplug
  102. * event is not associated with hotplug device.
  103. */
  104. if (data->hpdev.nr < 0)
  105. return 0;
  106. pdata = dev_get_platdata(&priv->pdev->dev);
  107. data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr +
  108. pdata->shift_nr);
  109. if (!data->hpdev.adapter) {
  110. dev_err(priv->dev, "Failed to get adapter for bus %d\n",
  111. data->hpdev.nr + pdata->shift_nr);
  112. return -EFAULT;
  113. }
  114. data->hpdev.client = i2c_new_device(data->hpdev.adapter,
  115. data->hpdev.brdinfo);
  116. if (!data->hpdev.client) {
  117. dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
  118. data->hpdev.brdinfo->type, data->hpdev.nr +
  119. pdata->shift_nr, data->hpdev.brdinfo->addr);
  120. i2c_put_adapter(data->hpdev.adapter);
  121. data->hpdev.adapter = NULL;
  122. return -EFAULT;
  123. }
  124. return 0;
  125. }
  126. static void
  127. mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data *priv,
  128. struct mlxreg_core_data *data)
  129. {
  130. /* Notify user by sending hwmon uevent. */
  131. kobject_uevent(&priv->hwmon->kobj, KOBJ_CHANGE);
  132. if (data->hpdev.client) {
  133. i2c_unregister_device(data->hpdev.client);
  134. data->hpdev.client = NULL;
  135. }
  136. if (data->hpdev.adapter) {
  137. i2c_put_adapter(data->hpdev.adapter);
  138. data->hpdev.adapter = NULL;
  139. }
  140. }
  141. static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
  142. struct device_attribute *attr,
  143. char *buf)
  144. {
  145. struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(dev);
  146. struct mlxreg_core_hotplug_platform_data *pdata;
  147. int index = to_sensor_dev_attr_2(attr)->index;
  148. int nr = to_sensor_dev_attr_2(attr)->nr;
  149. struct mlxreg_core_item *item;
  150. struct mlxreg_core_data *data;
  151. u32 regval;
  152. int ret;
  153. pdata = dev_get_platdata(&priv->pdev->dev);
  154. item = pdata->items + nr;
  155. data = item->data + index;
  156. ret = regmap_read(priv->regmap, data->reg, &regval);
  157. if (ret)
  158. return ret;
  159. if (item->health) {
  160. regval &= data->mask;
  161. } else {
  162. /* Bit = 0 : functional if item->inversed is true. */
  163. if (item->inversed)
  164. regval = !(regval & data->mask);
  165. else
  166. regval = !!(regval & data->mask);
  167. }
  168. return sprintf(buf, "%u\n", regval);
  169. }
  170. #define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
  171. #define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i]
  172. static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
  173. {
  174. struct mlxreg_core_hotplug_platform_data *pdata;
  175. struct mlxreg_core_item *item;
  176. struct mlxreg_core_data *data;
  177. int num_attrs = 0, id = 0, i, j;
  178. pdata = dev_get_platdata(&priv->pdev->dev);
  179. item = pdata->items;
  180. /* Go over all kinds of items - psu, pwr, fan. */
  181. for (i = 0; i < pdata->counter; i++, item++) {
  182. num_attrs += item->count;
  183. data = item->data;
  184. /* Go over all units within the item. */
  185. for (j = 0; j < item->count; j++, data++, id++) {
  186. PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr;
  187. PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev,
  188. GFP_KERNEL,
  189. data->label);
  190. if (!PRIV_ATTR(id)->name) {
  191. dev_err(priv->dev, "Memory allocation failed for attr %d.\n",
  192. id);
  193. return -ENOMEM;
  194. }
  195. PRIV_DEV_ATTR(id).dev_attr.attr.name =
  196. PRIV_ATTR(id)->name;
  197. PRIV_DEV_ATTR(id).dev_attr.attr.mode = 0444;
  198. PRIV_DEV_ATTR(id).dev_attr.show =
  199. mlxreg_hotplug_attr_show;
  200. PRIV_DEV_ATTR(id).nr = i;
  201. PRIV_DEV_ATTR(id).index = j;
  202. sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr);
  203. }
  204. }
  205. priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
  206. num_attrs,
  207. sizeof(struct attribute *),
  208. GFP_KERNEL);
  209. if (!priv->group.attrs)
  210. return -ENOMEM;
  211. priv->group.attrs = priv->mlxreg_hotplug_attr;
  212. priv->groups[0] = &priv->group;
  213. priv->groups[1] = NULL;
  214. return 0;
  215. }
  216. static void
  217. mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
  218. struct mlxreg_core_item *item)
  219. {
  220. struct mlxreg_core_data *data;
  221. u32 asserted, regval, bit;
  222. int ret;
  223. /*
  224. * Validate if item related to received signal type is valid.
  225. * It should never happen, excepted the situation when some
  226. * piece of hardware is broken. In such situation just produce
  227. * error message and return. Caller must continue to handle the
  228. * signals from other devices if any.
  229. */
  230. if (unlikely(!item)) {
  231. dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n",
  232. item->reg, item->mask);
  233. return;
  234. }
  235. /* Mask event. */
  236. ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
  237. 0);
  238. if (ret)
  239. goto out;
  240. /* Read status. */
  241. ret = regmap_read(priv->regmap, item->reg, &regval);
  242. if (ret)
  243. goto out;
  244. /* Set asserted bits and save last status. */
  245. regval &= item->mask;
  246. asserted = item->cache ^ regval;
  247. item->cache = regval;
  248. for_each_set_bit(bit, (unsigned long *)&asserted, 8) {
  249. data = item->data + bit;
  250. if (regval & BIT(bit)) {
  251. if (item->inversed)
  252. mlxreg_hotplug_device_destroy(priv, data);
  253. else
  254. mlxreg_hotplug_device_create(priv, data);
  255. } else {
  256. if (item->inversed)
  257. mlxreg_hotplug_device_create(priv, data);
  258. else
  259. mlxreg_hotplug_device_destroy(priv, data);
  260. }
  261. }
  262. /* Acknowledge event. */
  263. ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF,
  264. 0);
  265. if (ret)
  266. goto out;
  267. /* Unmask event. */
  268. ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
  269. item->mask);
  270. out:
  271. if (ret)
  272. dev_err(priv->dev, "Failed to complete workqueue.\n");
  273. }
  274. static void
  275. mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
  276. struct mlxreg_core_item *item)
  277. {
  278. struct mlxreg_core_data *data = item->data;
  279. u32 regval;
  280. int i, ret = 0;
  281. for (i = 0; i < item->count; i++, data++) {
  282. /* Mask event. */
  283. ret = regmap_write(priv->regmap, data->reg +
  284. MLXREG_HOTPLUG_MASK_OFF, 0);
  285. if (ret)
  286. goto out;
  287. /* Read status. */
  288. ret = regmap_read(priv->regmap, data->reg, &regval);
  289. if (ret)
  290. goto out;
  291. regval &= data->mask;
  292. if (item->cache == regval)
  293. goto ack_event;
  294. /*
  295. * ASIC health indication is provided through two bits. Bits
  296. * value 0x2 indicates that ASIC reached the good health, value
  297. * 0x0 indicates ASIC the bad health or dormant state and value
  298. * 0x3 indicates the booting state. During ASIC reset it should
  299. * pass the following states: dormant -> booting -> good.
  300. */
  301. if (regval == MLXREG_HOTPLUG_GOOD_HEALTH_MASK) {
  302. if (!data->attached) {
  303. /*
  304. * ASIC is in steady state. Connect associated
  305. * device, if configured.
  306. */
  307. mlxreg_hotplug_device_create(priv, data);
  308. data->attached = true;
  309. }
  310. } else {
  311. if (data->attached) {
  312. /*
  313. * ASIC health is failed after ASIC has been
  314. * in steady state. Disconnect associated
  315. * device, if it has been connected.
  316. */
  317. mlxreg_hotplug_device_destroy(priv, data);
  318. data->attached = false;
  319. data->health_cntr = 0;
  320. }
  321. }
  322. item->cache = regval;
  323. ack_event:
  324. /* Acknowledge event. */
  325. ret = regmap_write(priv->regmap, data->reg +
  326. MLXREG_HOTPLUG_EVENT_OFF, 0);
  327. if (ret)
  328. goto out;
  329. /* Unmask event. */
  330. ret = regmap_write(priv->regmap, data->reg +
  331. MLXREG_HOTPLUG_MASK_OFF, data->mask);
  332. if (ret)
  333. goto out;
  334. }
  335. out:
  336. if (ret)
  337. dev_err(priv->dev, "Failed to complete workqueue.\n");
  338. }
  339. /*
  340. * mlxreg_hotplug_work_handler - performs traversing of device interrupt
  341. * registers according to the below hierarchy schema:
  342. *
  343. * Aggregation registers (status/mask)
  344. * PSU registers: *---*
  345. * *-----------------* | |
  346. * |status/event/mask|-----> | * |
  347. * *-----------------* | |
  348. * Power registers: | |
  349. * *-----------------* | |
  350. * |status/event/mask|-----> | * |
  351. * *-----------------* | |
  352. * FAN registers: | |--> CPU
  353. * *-----------------* | |
  354. * |status/event/mask|-----> | * |
  355. * *-----------------* | |
  356. * ASIC registers: | |
  357. * *-----------------* | |
  358. * |status/event/mask|-----> | * |
  359. * *-----------------* | |
  360. * *---*
  361. *
  362. * In case some system changed are detected: FAN in/out, PSU in/out, power
  363. * cable attached/detached, ASIC health good/bad, relevant device is created
  364. * or destroyed.
  365. */
  366. static void mlxreg_hotplug_work_handler(struct work_struct *work)
  367. {
  368. struct mlxreg_core_hotplug_platform_data *pdata;
  369. struct mlxreg_hotplug_priv_data *priv;
  370. struct mlxreg_core_item *item;
  371. u32 regval, aggr_asserted;
  372. unsigned long flags;
  373. int i, ret;
  374. priv = container_of(work, struct mlxreg_hotplug_priv_data,
  375. dwork_irq.work);
  376. pdata = dev_get_platdata(&priv->pdev->dev);
  377. item = pdata->items;
  378. /* Mask aggregation event. */
  379. ret = regmap_write(priv->regmap, pdata->cell +
  380. MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
  381. if (ret < 0)
  382. goto out;
  383. /* Read aggregation status. */
  384. ret = regmap_read(priv->regmap, pdata->cell, &regval);
  385. if (ret)
  386. goto out;
  387. regval &= pdata->mask;
  388. aggr_asserted = priv->aggr_cache ^ regval;
  389. priv->aggr_cache = regval;
  390. /*
  391. * Handler is invoked, but no assertion is detected at top aggregation
  392. * status level. Set aggr_asserted to mask value to allow handler extra
  393. * run over all relevant signals to recover any missed signal.
  394. */
  395. if (priv->not_asserted == MLXREG_HOTPLUG_NOT_ASSERT) {
  396. priv->not_asserted = 0;
  397. aggr_asserted = pdata->mask;
  398. }
  399. if (!aggr_asserted)
  400. goto unmask_event;
  401. /* Handle topology and health configuration changes. */
  402. for (i = 0; i < pdata->counter; i++, item++) {
  403. if (aggr_asserted & item->aggr_mask) {
  404. if (item->health)
  405. mlxreg_hotplug_health_work_helper(priv, item);
  406. else
  407. mlxreg_hotplug_work_helper(priv, item);
  408. }
  409. }
  410. spin_lock_irqsave(&priv->lock, flags);
  411. /*
  412. * It is possible, that some signals have been inserted, while
  413. * interrupt has been masked by mlxreg_hotplug_work_handler. In this
  414. * case such signals will be missed. In order to handle these signals
  415. * delayed work is canceled and work task re-scheduled for immediate
  416. * execution. It allows to handle missed signals, if any. In other case
  417. * work handler just validates that no new signals have been received
  418. * during masking.
  419. */
  420. cancel_delayed_work(&priv->dwork_irq);
  421. schedule_delayed_work(&priv->dwork_irq, 0);
  422. spin_unlock_irqrestore(&priv->lock, flags);
  423. return;
  424. unmask_event:
  425. priv->not_asserted++;
  426. /* Unmask aggregation event (no need acknowledge). */
  427. ret = regmap_write(priv->regmap, pdata->cell +
  428. MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
  429. out:
  430. if (ret)
  431. dev_err(priv->dev, "Failed to complete workqueue.\n");
  432. }
  433. static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
  434. {
  435. struct mlxreg_core_hotplug_platform_data *pdata;
  436. struct mlxreg_core_item *item;
  437. int i, ret;
  438. pdata = dev_get_platdata(&priv->pdev->dev);
  439. item = pdata->items;
  440. for (i = 0; i < pdata->counter; i++, item++) {
  441. /* Clear group presense event. */
  442. ret = regmap_write(priv->regmap, item->reg +
  443. MLXREG_HOTPLUG_EVENT_OFF, 0);
  444. if (ret)
  445. goto out;
  446. /* Set group initial status as mask and unmask group event. */
  447. if (item->inversed) {
  448. item->cache = item->mask;
  449. ret = regmap_write(priv->regmap, item->reg +
  450. MLXREG_HOTPLUG_MASK_OFF,
  451. item->mask);
  452. if (ret)
  453. goto out;
  454. }
  455. }
  456. /* Keep aggregation initial status as zero and unmask events. */
  457. ret = regmap_write(priv->regmap, pdata->cell +
  458. MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
  459. if (ret)
  460. goto out;
  461. /* Keep low aggregation initial status as zero and unmask events. */
  462. if (pdata->cell_low) {
  463. ret = regmap_write(priv->regmap, pdata->cell_low +
  464. MLXREG_HOTPLUG_AGGR_MASK_OFF,
  465. pdata->mask_low);
  466. if (ret)
  467. goto out;
  468. }
  469. /* Invoke work handler for initializing hot plug devices setting. */
  470. mlxreg_hotplug_work_handler(&priv->dwork_irq.work);
  471. out:
  472. if (ret)
  473. dev_err(priv->dev, "Failed to set interrupts.\n");
  474. enable_irq(priv->irq);
  475. return ret;
  476. }
  477. static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
  478. {
  479. struct mlxreg_core_hotplug_platform_data *pdata;
  480. struct mlxreg_core_item *item;
  481. struct mlxreg_core_data *data;
  482. int count, i, j;
  483. pdata = dev_get_platdata(&priv->pdev->dev);
  484. item = pdata->items;
  485. disable_irq(priv->irq);
  486. cancel_delayed_work_sync(&priv->dwork_irq);
  487. /* Mask low aggregation event, if defined. */
  488. if (pdata->cell_low)
  489. regmap_write(priv->regmap, pdata->cell_low +
  490. MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
  491. /* Mask aggregation event. */
  492. regmap_write(priv->regmap, pdata->cell + MLXREG_HOTPLUG_AGGR_MASK_OFF,
  493. 0);
  494. /* Clear topology configurations. */
  495. for (i = 0; i < pdata->counter; i++, item++) {
  496. data = item->data;
  497. /* Mask group presense event. */
  498. regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF,
  499. 0);
  500. /* Clear group presense event. */
  501. regmap_write(priv->regmap, data->reg +
  502. MLXREG_HOTPLUG_EVENT_OFF, 0);
  503. /* Remove all the attached devices in group. */
  504. count = item->count;
  505. for (j = 0; j < count; j++, data++)
  506. mlxreg_hotplug_device_destroy(priv, data);
  507. }
  508. }
  509. static irqreturn_t mlxreg_hotplug_irq_handler(int irq, void *dev)
  510. {
  511. struct mlxreg_hotplug_priv_data *priv;
  512. priv = (struct mlxreg_hotplug_priv_data *)dev;
  513. /* Schedule work task for immediate execution.*/
  514. schedule_delayed_work(&priv->dwork_irq, 0);
  515. return IRQ_HANDLED;
  516. }
  517. static int mlxreg_hotplug_probe(struct platform_device *pdev)
  518. {
  519. struct mlxreg_core_hotplug_platform_data *pdata;
  520. struct mlxreg_hotplug_priv_data *priv;
  521. struct i2c_adapter *deferred_adap;
  522. int err;
  523. pdata = dev_get_platdata(&pdev->dev);
  524. if (!pdata) {
  525. dev_err(&pdev->dev, "Failed to get platform data.\n");
  526. return -EINVAL;
  527. }
  528. /* Defer probing if the necessary adapter is not configured yet. */
  529. deferred_adap = i2c_get_adapter(pdata->deferred_nr);
  530. if (!deferred_adap)
  531. return -EPROBE_DEFER;
  532. i2c_put_adapter(deferred_adap);
  533. priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
  534. if (!priv)
  535. return -ENOMEM;
  536. if (pdata->irq) {
  537. priv->irq = pdata->irq;
  538. } else {
  539. priv->irq = platform_get_irq(pdev, 0);
  540. if (priv->irq < 0) {
  541. dev_err(&pdev->dev, "Failed to get platform irq: %d\n",
  542. priv->irq);
  543. return priv->irq;
  544. }
  545. }
  546. priv->regmap = pdata->regmap;
  547. priv->dev = pdev->dev.parent;
  548. priv->pdev = pdev;
  549. err = devm_request_irq(&pdev->dev, priv->irq,
  550. mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
  551. | IRQF_SHARED, "mlxreg-hotplug", priv);
  552. if (err) {
  553. dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
  554. return err;
  555. }
  556. disable_irq(priv->irq);
  557. spin_lock_init(&priv->lock);
  558. INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
  559. dev_set_drvdata(&pdev->dev, priv);
  560. err = mlxreg_hotplug_attr_init(priv);
  561. if (err) {
  562. dev_err(&pdev->dev, "Failed to allocate attributes: %d\n",
  563. err);
  564. return err;
  565. }
  566. priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
  567. "mlxreg_hotplug", priv, priv->groups);
  568. if (IS_ERR(priv->hwmon)) {
  569. dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
  570. PTR_ERR(priv->hwmon));
  571. return PTR_ERR(priv->hwmon);
  572. }
  573. /* Perform initial interrupts setup. */
  574. mlxreg_hotplug_set_irq(priv);
  575. priv->after_probe = true;
  576. return 0;
  577. }
  578. static int mlxreg_hotplug_remove(struct platform_device *pdev)
  579. {
  580. struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(&pdev->dev);
  581. /* Clean interrupts setup. */
  582. mlxreg_hotplug_unset_irq(priv);
  583. return 0;
  584. }
  585. static struct platform_driver mlxreg_hotplug_driver = {
  586. .driver = {
  587. .name = "mlxreg-hotplug",
  588. },
  589. .probe = mlxreg_hotplug_probe,
  590. .remove = mlxreg_hotplug_remove,
  591. };
  592. module_platform_driver(mlxreg_hotplug_driver);
  593. MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
  594. MODULE_DESCRIPTION("Mellanox regmap hotplug platform driver");
  595. MODULE_LICENSE("Dual BSD/GPL");
  596. MODULE_ALIAS("platform:mlxreg-hotplug");