spidev.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746
  1. /*
  2. * Simple synchronous userspace interface to SPI devices
  3. *
  4. * Copyright (C) 2006 SWAPP
  5. * Andrea Paterniani <a.paterniani@swapp-eng.it>
  6. * Copyright (C) 2007 David Brownell (simplification, cleanup)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #include <linux/init.h>
  23. #include <linux/module.h>
  24. #include <linux/ioctl.h>
  25. #include <linux/fs.h>
  26. #include <linux/device.h>
  27. #include <linux/err.h>
  28. #include <linux/list.h>
  29. #include <linux/errno.h>
  30. #include <linux/mutex.h>
  31. #include <linux/slab.h>
  32. #include <linux/compat.h>
  33. #include <linux/of.h>
  34. #include <linux/of_device.h>
  35. #include <linux/spi/spi.h>
  36. #include <linux/spi/spidev.h>
  37. #include <linux/uaccess.h>
  38. /*
  39. * This supports access to SPI devices using normal userspace I/O calls.
  40. * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
  41. * and often mask message boundaries, full SPI support requires full duplex
  42. * transfers. There are several kinds of internal message boundaries to
  43. * handle chipselect management and other protocol options.
  44. *
  45. * SPI has a character major number assigned. We allocate minor numbers
  46. * dynamically using a bitmask. You must use hotplug tools, such as udev
  47. * (or mdev with busybox) to create and destroy the /dev/spidevB.C device
  48. * nodes, since there is no fixed association of minor numbers with any
  49. * particular SPI bus or device.
  50. */
  51. #define SPIDEV_MAJOR 153 /* assigned */
  52. #define N_SPI_MINORS 32 /* ... up to 256 */
  53. static DECLARE_BITMAP(minors, N_SPI_MINORS);
  54. /* Bit masks for spi_device.mode management. Note that incorrect
  55. * settings for some settings can cause *lots* of trouble for other
  56. * devices on a shared bus:
  57. *
  58. * - CS_HIGH ... this device will be active when it shouldn't be
  59. * - 3WIRE ... when active, it won't behave as it should
  60. * - NO_CS ... there will be no explicit message boundaries; this
  61. * is completely incompatible with the shared bus model
  62. * - READY ... transfers may proceed when they shouldn't.
  63. *
  64. * REVISIT should changing those flags be privileged?
  65. */
  66. #define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
  67. | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
  68. | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
  69. | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)
  70. struct spidev_data {
  71. dev_t devt;
  72. spinlock_t spi_lock;
  73. struct spi_device *spi;
  74. struct list_head device_entry;
  75. /* TX/RX buffers are NULL unless this device is open (users > 0) */
  76. struct mutex buf_lock;
  77. unsigned users;
  78. u8 *tx_buffer;
  79. u8 *rx_buffer;
  80. };
  81. static LIST_HEAD(device_list);
  82. static DEFINE_MUTEX(device_list_lock);
  83. static unsigned bufsiz = 4096;
  84. module_param(bufsiz, uint, S_IRUGO);
  85. MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
  86. /*-------------------------------------------------------------------------*/
  87. /*
  88. * We can't use the standard synchronous wrappers for file I/O; we
  89. * need to protect against async removal of the underlying spi_device.
  90. */
  91. static void spidev_complete(void *arg)
  92. {
  93. complete(arg);
  94. }
  95. static ssize_t
  96. spidev_sync(struct spidev_data *spidev, struct spi_message *message)
  97. {
  98. DECLARE_COMPLETION_ONSTACK(done);
  99. int status;
  100. message->complete = spidev_complete;
  101. message->context = &done;
  102. spin_lock_irq(&spidev->spi_lock);
  103. if (spidev->spi == NULL)
  104. status = -ESHUTDOWN;
  105. else
  106. status = spi_async(spidev->spi, message);
  107. spin_unlock_irq(&spidev->spi_lock);
  108. if (status == 0) {
  109. wait_for_completion(&done);
  110. status = message->status;
  111. if (status == 0)
  112. status = message->actual_length;
  113. }
  114. return status;
  115. }
  116. static inline ssize_t
  117. spidev_sync_write(struct spidev_data *spidev, size_t len)
  118. {
  119. struct spi_transfer t = {
  120. .tx_buf = spidev->tx_buffer,
  121. .len = len,
  122. };
  123. struct spi_message m;
  124. spi_message_init(&m);
  125. spi_message_add_tail(&t, &m);
  126. return spidev_sync(spidev, &m);
  127. }
  128. static inline ssize_t
  129. spidev_sync_read(struct spidev_data *spidev, size_t len)
  130. {
  131. struct spi_transfer t = {
  132. .rx_buf = spidev->rx_buffer,
  133. .len = len,
  134. };
  135. struct spi_message m;
  136. spi_message_init(&m);
  137. spi_message_add_tail(&t, &m);
  138. return spidev_sync(spidev, &m);
  139. }
  140. /*-------------------------------------------------------------------------*/
  141. /* Read-only message with current device setup */
  142. static ssize_t
  143. spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
  144. {
  145. struct spidev_data *spidev;
  146. ssize_t status = 0;
  147. /* chipselect only toggles at start or end of operation */
  148. if (count > bufsiz)
  149. return -EMSGSIZE;
  150. spidev = filp->private_data;
  151. mutex_lock(&spidev->buf_lock);
  152. status = spidev_sync_read(spidev, count);
  153. if (status > 0) {
  154. unsigned long missing;
  155. missing = copy_to_user(buf, spidev->rx_buffer, status);
  156. if (missing == status)
  157. status = -EFAULT;
  158. else
  159. status = status - missing;
  160. }
  161. mutex_unlock(&spidev->buf_lock);
  162. return status;
  163. }
  164. /* Write-only message with current device setup */
  165. static ssize_t
  166. spidev_write(struct file *filp, const char __user *buf,
  167. size_t count, loff_t *f_pos)
  168. {
  169. struct spidev_data *spidev;
  170. ssize_t status = 0;
  171. unsigned long missing;
  172. /* chipselect only toggles at start or end of operation */
  173. if (count > bufsiz)
  174. return -EMSGSIZE;
  175. spidev = filp->private_data;
  176. mutex_lock(&spidev->buf_lock);
  177. missing = copy_from_user(spidev->tx_buffer, buf, count);
  178. if (missing == 0)
  179. status = spidev_sync_write(spidev, count);
  180. else
  181. status = -EFAULT;
  182. mutex_unlock(&spidev->buf_lock);
  183. return status;
  184. }
  185. static int spidev_message(struct spidev_data *spidev,
  186. struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
  187. {
  188. struct spi_message msg;
  189. struct spi_transfer *k_xfers;
  190. struct spi_transfer *k_tmp;
  191. struct spi_ioc_transfer *u_tmp;
  192. unsigned n, total;
  193. u8 *tx_buf, *rx_buf;
  194. int status = -EFAULT;
  195. spi_message_init(&msg);
  196. k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
  197. if (k_xfers == NULL)
  198. return -ENOMEM;
  199. /* Construct spi_message, copying any tx data to bounce buffer.
  200. * We walk the array of user-provided transfers, using each one
  201. * to initialize a kernel version of the same transfer.
  202. */
  203. tx_buf = spidev->tx_buffer;
  204. rx_buf = spidev->rx_buffer;
  205. total = 0;
  206. for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
  207. n;
  208. n--, k_tmp++, u_tmp++) {
  209. k_tmp->len = u_tmp->len;
  210. total += k_tmp->len;
  211. if (total > bufsiz) {
  212. status = -EMSGSIZE;
  213. goto done;
  214. }
  215. if (u_tmp->rx_buf) {
  216. k_tmp->rx_buf = rx_buf;
  217. if (!access_ok(VERIFY_WRITE, (u8 __user *)
  218. (uintptr_t) u_tmp->rx_buf,
  219. u_tmp->len))
  220. goto done;
  221. }
  222. if (u_tmp->tx_buf) {
  223. k_tmp->tx_buf = tx_buf;
  224. if (copy_from_user(tx_buf, (const u8 __user *)
  225. (uintptr_t) u_tmp->tx_buf,
  226. u_tmp->len))
  227. goto done;
  228. }
  229. tx_buf += k_tmp->len;
  230. rx_buf += k_tmp->len;
  231. k_tmp->cs_change = !!u_tmp->cs_change;
  232. k_tmp->tx_nbits = u_tmp->tx_nbits;
  233. k_tmp->rx_nbits = u_tmp->rx_nbits;
  234. k_tmp->bits_per_word = u_tmp->bits_per_word;
  235. k_tmp->delay_usecs = u_tmp->delay_usecs;
  236. k_tmp->speed_hz = u_tmp->speed_hz;
  237. #ifdef VERBOSE
  238. dev_dbg(&spidev->spi->dev,
  239. " xfer len %zd %s%s%s%dbits %u usec %uHz\n",
  240. u_tmp->len,
  241. u_tmp->rx_buf ? "rx " : "",
  242. u_tmp->tx_buf ? "tx " : "",
  243. u_tmp->cs_change ? "cs " : "",
  244. u_tmp->bits_per_word ? : spidev->spi->bits_per_word,
  245. u_tmp->delay_usecs,
  246. u_tmp->speed_hz ? : spidev->spi->max_speed_hz);
  247. #endif
  248. spi_message_add_tail(k_tmp, &msg);
  249. }
  250. status = spidev_sync(spidev, &msg);
  251. if (status < 0)
  252. goto done;
  253. /* copy any rx data out of bounce buffer */
  254. rx_buf = spidev->rx_buffer;
  255. for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
  256. if (u_tmp->rx_buf) {
  257. if (__copy_to_user((u8 __user *)
  258. (uintptr_t) u_tmp->rx_buf, rx_buf,
  259. u_tmp->len)) {
  260. status = -EFAULT;
  261. goto done;
  262. }
  263. }
  264. rx_buf += u_tmp->len;
  265. }
  266. status = total;
  267. done:
  268. kfree(k_xfers);
  269. return status;
  270. }
  271. static long
  272. spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  273. {
  274. int err = 0;
  275. int retval = 0;
  276. struct spidev_data *spidev;
  277. struct spi_device *spi;
  278. u32 tmp;
  279. unsigned n_ioc;
  280. struct spi_ioc_transfer *ioc;
  281. /* Check type and command number */
  282. if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
  283. return -ENOTTY;
  284. /* Check access direction once here; don't repeat below.
  285. * IOC_DIR is from the user perspective, while access_ok is
  286. * from the kernel perspective; so they look reversed.
  287. */
  288. if (_IOC_DIR(cmd) & _IOC_READ)
  289. err = !access_ok(VERIFY_WRITE,
  290. (void __user *)arg, _IOC_SIZE(cmd));
  291. if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE)
  292. err = !access_ok(VERIFY_READ,
  293. (void __user *)arg, _IOC_SIZE(cmd));
  294. if (err)
  295. return -EFAULT;
  296. /* guard against device removal before, or while,
  297. * we issue this ioctl.
  298. */
  299. spidev = filp->private_data;
  300. spin_lock_irq(&spidev->spi_lock);
  301. spi = spi_dev_get(spidev->spi);
  302. spin_unlock_irq(&spidev->spi_lock);
  303. if (spi == NULL)
  304. return -ESHUTDOWN;
  305. /* use the buffer lock here for triple duty:
  306. * - prevent I/O (from us) so calling spi_setup() is safe;
  307. * - prevent concurrent SPI_IOC_WR_* from morphing
  308. * data fields while SPI_IOC_RD_* reads them;
  309. * - SPI_IOC_MESSAGE needs the buffer locked "normally".
  310. */
  311. mutex_lock(&spidev->buf_lock);
  312. switch (cmd) {
  313. /* read requests */
  314. case SPI_IOC_RD_MODE:
  315. retval = __put_user(spi->mode & SPI_MODE_MASK,
  316. (__u8 __user *)arg);
  317. break;
  318. case SPI_IOC_RD_MODE32:
  319. retval = __put_user(spi->mode & SPI_MODE_MASK,
  320. (__u32 __user *)arg);
  321. break;
  322. case SPI_IOC_RD_LSB_FIRST:
  323. retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
  324. (__u8 __user *)arg);
  325. break;
  326. case SPI_IOC_RD_BITS_PER_WORD:
  327. retval = __put_user(spi->bits_per_word, (__u8 __user *)arg);
  328. break;
  329. case SPI_IOC_RD_MAX_SPEED_HZ:
  330. retval = __put_user(spi->max_speed_hz, (__u32 __user *)arg);
  331. break;
  332. /* write requests */
  333. case SPI_IOC_WR_MODE:
  334. case SPI_IOC_WR_MODE32:
  335. if (cmd == SPI_IOC_WR_MODE)
  336. retval = __get_user(tmp, (u8 __user *)arg);
  337. else
  338. retval = __get_user(tmp, (u32 __user *)arg);
  339. if (retval == 0) {
  340. u32 save = spi->mode;
  341. if (tmp & ~SPI_MODE_MASK) {
  342. retval = -EINVAL;
  343. break;
  344. }
  345. tmp |= spi->mode & ~SPI_MODE_MASK;
  346. spi->mode = (u16)tmp;
  347. retval = spi_setup(spi);
  348. if (retval < 0)
  349. spi->mode = save;
  350. else
  351. dev_dbg(&spi->dev, "spi mode %x\n", tmp);
  352. }
  353. break;
  354. case SPI_IOC_WR_LSB_FIRST:
  355. retval = __get_user(tmp, (__u8 __user *)arg);
  356. if (retval == 0) {
  357. u32 save = spi->mode;
  358. if (tmp)
  359. spi->mode |= SPI_LSB_FIRST;
  360. else
  361. spi->mode &= ~SPI_LSB_FIRST;
  362. retval = spi_setup(spi);
  363. if (retval < 0)
  364. spi->mode = save;
  365. else
  366. dev_dbg(&spi->dev, "%csb first\n",
  367. tmp ? 'l' : 'm');
  368. }
  369. break;
  370. case SPI_IOC_WR_BITS_PER_WORD:
  371. retval = __get_user(tmp, (__u8 __user *)arg);
  372. if (retval == 0) {
  373. u8 save = spi->bits_per_word;
  374. spi->bits_per_word = tmp;
  375. retval = spi_setup(spi);
  376. if (retval < 0)
  377. spi->bits_per_word = save;
  378. else
  379. dev_dbg(&spi->dev, "%d bits per word\n", tmp);
  380. }
  381. break;
  382. case SPI_IOC_WR_MAX_SPEED_HZ:
  383. retval = __get_user(tmp, (__u32 __user *)arg);
  384. if (retval == 0) {
  385. u32 save = spi->max_speed_hz;
  386. spi->max_speed_hz = tmp;
  387. retval = spi_setup(spi);
  388. if (retval < 0)
  389. spi->max_speed_hz = save;
  390. else
  391. dev_dbg(&spi->dev, "%d Hz (max)\n", tmp);
  392. }
  393. break;
  394. default:
  395. /* segmented and/or full-duplex I/O request */
  396. if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
  397. || _IOC_DIR(cmd) != _IOC_WRITE) {
  398. retval = -ENOTTY;
  399. break;
  400. }
  401. tmp = _IOC_SIZE(cmd);
  402. if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) {
  403. retval = -EINVAL;
  404. break;
  405. }
  406. n_ioc = tmp / sizeof(struct spi_ioc_transfer);
  407. if (n_ioc == 0)
  408. break;
  409. /* copy into scratch area */
  410. ioc = kmalloc(tmp, GFP_KERNEL);
  411. if (!ioc) {
  412. retval = -ENOMEM;
  413. break;
  414. }
  415. if (__copy_from_user(ioc, (void __user *)arg, tmp)) {
  416. kfree(ioc);
  417. retval = -EFAULT;
  418. break;
  419. }
  420. /* translate to spi_message, execute */
  421. retval = spidev_message(spidev, ioc, n_ioc);
  422. kfree(ioc);
  423. break;
  424. }
  425. mutex_unlock(&spidev->buf_lock);
  426. spi_dev_put(spi);
  427. return retval;
  428. }
  429. #ifdef CONFIG_COMPAT
  430. static long
  431. spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  432. {
  433. return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
  434. }
  435. #else
  436. #define spidev_compat_ioctl NULL
  437. #endif /* CONFIG_COMPAT */
  438. static int spidev_open(struct inode *inode, struct file *filp)
  439. {
  440. struct spidev_data *spidev;
  441. int status = -ENXIO;
  442. mutex_lock(&device_list_lock);
  443. list_for_each_entry(spidev, &device_list, device_entry) {
  444. if (spidev->devt == inode->i_rdev) {
  445. status = 0;
  446. break;
  447. }
  448. }
  449. if (status) {
  450. pr_debug("spidev: nothing for minor %d\n", iminor(inode));
  451. goto err_find_dev;
  452. }
  453. if (!spidev->tx_buffer) {
  454. spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
  455. if (!spidev->tx_buffer) {
  456. dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
  457. status = -ENOMEM;
  458. goto err_find_dev;
  459. }
  460. }
  461. if (!spidev->rx_buffer) {
  462. spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
  463. if (!spidev->rx_buffer) {
  464. dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
  465. status = -ENOMEM;
  466. goto err_alloc_rx_buf;
  467. }
  468. }
  469. spidev->users++;
  470. filp->private_data = spidev;
  471. nonseekable_open(inode, filp);
  472. mutex_unlock(&device_list_lock);
  473. return 0;
  474. err_alloc_rx_buf:
  475. kfree(spidev->tx_buffer);
  476. spidev->tx_buffer = NULL;
  477. err_find_dev:
  478. mutex_unlock(&device_list_lock);
  479. return status;
  480. }
  481. static int spidev_release(struct inode *inode, struct file *filp)
  482. {
  483. struct spidev_data *spidev;
  484. int status = 0;
  485. mutex_lock(&device_list_lock);
  486. spidev = filp->private_data;
  487. filp->private_data = NULL;
  488. /* last close? */
  489. spidev->users--;
  490. if (!spidev->users) {
  491. int dofree;
  492. kfree(spidev->tx_buffer);
  493. spidev->tx_buffer = NULL;
  494. kfree(spidev->rx_buffer);
  495. spidev->rx_buffer = NULL;
  496. /* ... after we unbound from the underlying device? */
  497. spin_lock_irq(&spidev->spi_lock);
  498. dofree = (spidev->spi == NULL);
  499. spin_unlock_irq(&spidev->spi_lock);
  500. if (dofree)
  501. kfree(spidev);
  502. }
  503. mutex_unlock(&device_list_lock);
  504. return status;
  505. }
  506. static const struct file_operations spidev_fops = {
  507. .owner = THIS_MODULE,
  508. /* REVISIT switch to aio primitives, so that userspace
  509. * gets more complete API coverage. It'll simplify things
  510. * too, except for the locking.
  511. */
  512. .write = spidev_write,
  513. .read = spidev_read,
  514. .unlocked_ioctl = spidev_ioctl,
  515. .compat_ioctl = spidev_compat_ioctl,
  516. .open = spidev_open,
  517. .release = spidev_release,
  518. .llseek = no_llseek,
  519. };
  520. /*-------------------------------------------------------------------------*/
  521. /* The main reason to have this class is to make mdev/udev create the
  522. * /dev/spidevB.C character device nodes exposing our userspace API.
  523. * It also simplifies memory management.
  524. */
  525. static struct class *spidev_class;
  526. /*-------------------------------------------------------------------------*/
  527. static int spidev_probe(struct spi_device *spi)
  528. {
  529. struct spidev_data *spidev;
  530. int status;
  531. unsigned long minor;
  532. /* Allocate driver data */
  533. spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
  534. if (!spidev)
  535. return -ENOMEM;
  536. /* Initialize the driver data */
  537. spidev->spi = spi;
  538. spin_lock_init(&spidev->spi_lock);
  539. mutex_init(&spidev->buf_lock);
  540. INIT_LIST_HEAD(&spidev->device_entry);
  541. /* If we can allocate a minor number, hook up this device.
  542. * Reusing minors is fine so long as udev or mdev is working.
  543. */
  544. mutex_lock(&device_list_lock);
  545. minor = find_first_zero_bit(minors, N_SPI_MINORS);
  546. if (minor < N_SPI_MINORS) {
  547. struct device *dev;
  548. spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
  549. dev = device_create(spidev_class, &spi->dev, spidev->devt,
  550. spidev, "spidev%d.%d",
  551. spi->master->bus_num, spi->chip_select);
  552. status = PTR_ERR_OR_ZERO(dev);
  553. } else {
  554. dev_dbg(&spi->dev, "no minor number available!\n");
  555. status = -ENODEV;
  556. }
  557. if (status == 0) {
  558. set_bit(minor, minors);
  559. list_add(&spidev->device_entry, &device_list);
  560. }
  561. mutex_unlock(&device_list_lock);
  562. if (status == 0)
  563. spi_set_drvdata(spi, spidev);
  564. else
  565. kfree(spidev);
  566. return status;
  567. }
  568. static int spidev_remove(struct spi_device *spi)
  569. {
  570. struct spidev_data *spidev = spi_get_drvdata(spi);
  571. /* make sure ops on existing fds can abort cleanly */
  572. spin_lock_irq(&spidev->spi_lock);
  573. spidev->spi = NULL;
  574. spin_unlock_irq(&spidev->spi_lock);
  575. /* prevent new opens */
  576. mutex_lock(&device_list_lock);
  577. list_del(&spidev->device_entry);
  578. device_destroy(spidev_class, spidev->devt);
  579. clear_bit(MINOR(spidev->devt), minors);
  580. if (spidev->users == 0)
  581. kfree(spidev);
  582. mutex_unlock(&device_list_lock);
  583. return 0;
  584. }
  585. static const struct of_device_id spidev_dt_ids[] = {
  586. { .compatible = "rohm,dh2228fv" },
  587. {},
  588. };
  589. MODULE_DEVICE_TABLE(of, spidev_dt_ids);
  590. static struct spi_driver spidev_spi_driver = {
  591. .driver = {
  592. .name = "spidev",
  593. .owner = THIS_MODULE,
  594. .of_match_table = of_match_ptr(spidev_dt_ids),
  595. },
  596. .probe = spidev_probe,
  597. .remove = spidev_remove,
  598. /* NOTE: suspend/resume methods are not necessary here.
  599. * We don't do anything except pass the requests to/from
  600. * the underlying controller. The refrigerator handles
  601. * most issues; the controller driver handles the rest.
  602. */
  603. };
  604. /*-------------------------------------------------------------------------*/
  605. static int __init spidev_init(void)
  606. {
  607. int status;
  608. /* Claim our 256 reserved device numbers. Then register a class
  609. * that will key udev/mdev to add/remove /dev nodes. Last, register
  610. * the driver which manages those device numbers.
  611. */
  612. BUILD_BUG_ON(N_SPI_MINORS > 256);
  613. status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
  614. if (status < 0)
  615. return status;
  616. spidev_class = class_create(THIS_MODULE, "spidev");
  617. if (IS_ERR(spidev_class)) {
  618. unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
  619. return PTR_ERR(spidev_class);
  620. }
  621. status = spi_register_driver(&spidev_spi_driver);
  622. if (status < 0) {
  623. class_destroy(spidev_class);
  624. unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
  625. }
  626. return status;
  627. }
  628. module_init(spidev_init);
  629. static void __exit spidev_exit(void)
  630. {
  631. spi_unregister_driver(&spidev_spi_driver);
  632. class_destroy(spidev_class);
  633. unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
  634. }
  635. module_exit(spidev_exit);
  636. MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
  637. MODULE_DESCRIPTION("User mode SPI device interface");
  638. MODULE_LICENSE("GPL");
  639. MODULE_ALIAS("spi:spidev");