omap_hdq.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726
  1. /*
  2. * drivers/w1/masters/omap_hdq.c
  3. *
  4. * Copyright (C) 2007 Texas Instruments, Inc.
  5. *
  6. * This file is licensed under the terms of the GNU General Public License
  7. * version 2. This program is licensed "as is" without any warranty of any
  8. * kind, whether express or implied.
  9. *
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/slab.h>
  16. #include <linux/err.h>
  17. #include <linux/clk.h>
  18. #include <linux/io.h>
  19. #include <linux/sched.h>
  20. #include <asm/irq.h>
  21. #include <mach/hardware.h>
  22. #include "../w1.h"
  23. #include "../w1_int.h"
  24. #define MOD_NAME "OMAP_HDQ:"
  25. #define OMAP_HDQ_REVISION 0x00
  26. #define OMAP_HDQ_TX_DATA 0x04
  27. #define OMAP_HDQ_RX_DATA 0x08
  28. #define OMAP_HDQ_CTRL_STATUS 0x0c
  29. #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6)
  30. #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5)
  31. #define OMAP_HDQ_CTRL_STATUS_GO (1<<4)
  32. #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2)
  33. #define OMAP_HDQ_CTRL_STATUS_DIR (1<<1)
  34. #define OMAP_HDQ_CTRL_STATUS_MODE (1<<0)
  35. #define OMAP_HDQ_INT_STATUS 0x10
  36. #define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2)
  37. #define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1)
  38. #define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0)
  39. #define OMAP_HDQ_SYSCONFIG 0x14
  40. #define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1)
  41. #define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0)
  42. #define OMAP_HDQ_SYSSTATUS 0x18
  43. #define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0)
  44. #define OMAP_HDQ_FLAG_CLEAR 0
  45. #define OMAP_HDQ_FLAG_SET 1
  46. #define OMAP_HDQ_TIMEOUT (HZ/5)
  47. #define OMAP_HDQ_MAX_USER 4
  48. static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
  49. static int w1_id;
  50. struct hdq_data {
  51. struct device *dev;
  52. void __iomem *hdq_base;
  53. /* lock status update */
  54. struct mutex hdq_mutex;
  55. int hdq_usecount;
  56. struct clk *hdq_ick;
  57. struct clk *hdq_fck;
  58. u8 hdq_irqstatus;
  59. /* device lock */
  60. spinlock_t hdq_spinlock;
  61. /*
  62. * Used to control the call to omap_hdq_get and omap_hdq_put.
  63. * HDQ Protocol: Write the CMD|REG_address first, followed by
  64. * the data wrire or read.
  65. */
  66. int init_trans;
  67. };
  68. static int __devinit omap_hdq_probe(struct platform_device *pdev);
  69. static int omap_hdq_remove(struct platform_device *pdev);
  70. static struct platform_driver omap_hdq_driver = {
  71. .probe = omap_hdq_probe,
  72. .remove = omap_hdq_remove,
  73. .driver = {
  74. .name = "omap_hdq",
  75. },
  76. };
  77. static u8 omap_w1_read_byte(void *_hdq);
  78. static void omap_w1_write_byte(void *_hdq, u8 byte);
  79. static u8 omap_w1_reset_bus(void *_hdq);
  80. static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
  81. u8 search_type, w1_slave_found_callback slave_found);
  82. static struct w1_bus_master omap_w1_master = {
  83. .read_byte = omap_w1_read_byte,
  84. .write_byte = omap_w1_write_byte,
  85. .reset_bus = omap_w1_reset_bus,
  86. .search = omap_w1_search_bus,
  87. };
  88. /* HDQ register I/O routines */
  89. static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
  90. {
  91. return __raw_readb(hdq_data->hdq_base + offset);
  92. }
  93. static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
  94. {
  95. __raw_writeb(val, hdq_data->hdq_base + offset);
  96. }
  97. static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
  98. u8 val, u8 mask)
  99. {
  100. u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask)
  101. | (val & mask);
  102. __raw_writeb(new_val, hdq_data->hdq_base + offset);
  103. return new_val;
  104. }
  105. /*
  106. * Wait for one or more bits in flag change.
  107. * HDQ_FLAG_SET: wait until any bit in the flag is set.
  108. * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
  109. * return 0 on success and -ETIMEDOUT in the case of timeout.
  110. */
  111. static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
  112. u8 flag, u8 flag_set, u8 *status)
  113. {
  114. int ret = 0;
  115. unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
  116. if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
  117. /* wait for the flag clear */
  118. while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
  119. && time_before(jiffies, timeout)) {
  120. schedule_timeout_uninterruptible(1);
  121. }
  122. if (*status & flag)
  123. ret = -ETIMEDOUT;
  124. } else if (flag_set == OMAP_HDQ_FLAG_SET) {
  125. /* wait for the flag set */
  126. while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
  127. && time_before(jiffies, timeout)) {
  128. schedule_timeout_uninterruptible(1);
  129. }
  130. if (!(*status & flag))
  131. ret = -ETIMEDOUT;
  132. } else
  133. return -EINVAL;
  134. return ret;
  135. }
  136. /* write out a byte and fill *status with HDQ_INT_STATUS */
  137. static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
  138. {
  139. int ret;
  140. u8 tmp_status;
  141. unsigned long irqflags;
  142. *status = 0;
  143. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  144. /* clear interrupt flags via a dummy read */
  145. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  146. /* ISR loads it with new INT_STATUS */
  147. hdq_data->hdq_irqstatus = 0;
  148. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  149. hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
  150. /* set the GO bit */
  151. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
  152. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
  153. /* wait for the TXCOMPLETE bit */
  154. ret = wait_event_timeout(hdq_wait_queue,
  155. hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
  156. if (ret == 0) {
  157. dev_dbg(hdq_data->dev, "TX wait elapsed\n");
  158. ret = -ETIMEDOUT;
  159. goto out;
  160. }
  161. *status = hdq_data->hdq_irqstatus;
  162. /* check irqstatus */
  163. if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
  164. dev_dbg(hdq_data->dev, "timeout waiting for"
  165. " TXCOMPLETE/RXCOMPLETE, %x", *status);
  166. ret = -ETIMEDOUT;
  167. goto out;
  168. }
  169. /* wait for the GO bit return to zero */
  170. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
  171. OMAP_HDQ_CTRL_STATUS_GO,
  172. OMAP_HDQ_FLAG_CLEAR, &tmp_status);
  173. if (ret) {
  174. dev_dbg(hdq_data->dev, "timeout waiting GO bit"
  175. " return to zero, %x", tmp_status);
  176. }
  177. out:
  178. return ret;
  179. }
  180. /* HDQ Interrupt service routine */
  181. static irqreturn_t hdq_isr(int irq, void *_hdq)
  182. {
  183. struct hdq_data *hdq_data = _hdq;
  184. unsigned long irqflags;
  185. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  186. hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  187. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  188. dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
  189. if (hdq_data->hdq_irqstatus &
  190. (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
  191. | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
  192. /* wake up sleeping process */
  193. wake_up(&hdq_wait_queue);
  194. }
  195. return IRQ_HANDLED;
  196. }
  197. /* HDQ Mode: always return success */
  198. static u8 omap_w1_reset_bus(void *_hdq)
  199. {
  200. return 0;
  201. }
  202. /* W1 search callback function */
  203. static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
  204. u8 search_type, w1_slave_found_callback slave_found)
  205. {
  206. u64 module_id, rn_le, cs, id;
  207. if (w1_id)
  208. module_id = w1_id;
  209. else
  210. module_id = 0x1;
  211. rn_le = cpu_to_le64(module_id);
  212. /*
  213. * HDQ might not obey truly the 1-wire spec.
  214. * So calculate CRC based on module parameter.
  215. */
  216. cs = w1_calc_crc8((u8 *)&rn_le, 7);
  217. id = (cs << 56) | module_id;
  218. slave_found(master_dev, id);
  219. }
  220. static int _omap_hdq_reset(struct hdq_data *hdq_data)
  221. {
  222. int ret;
  223. u8 tmp_status;
  224. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
  225. /*
  226. * Select HDQ mode & enable clocks.
  227. * It is observed that INT flags can't be cleared via a read and GO/INIT
  228. * won't return to zero if interrupt is disabled. So we always enable
  229. * interrupt.
  230. */
  231. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  232. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  233. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  234. /* wait for reset to complete */
  235. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
  236. OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
  237. if (ret)
  238. dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
  239. tmp_status);
  240. else {
  241. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  242. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  243. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  244. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
  245. OMAP_HDQ_SYSCONFIG_AUTOIDLE);
  246. }
  247. return ret;
  248. }
  249. /* Issue break pulse to the device */
  250. static int omap_hdq_break(struct hdq_data *hdq_data)
  251. {
  252. int ret = 0;
  253. u8 tmp_status;
  254. unsigned long irqflags;
  255. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  256. if (ret < 0) {
  257. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  258. ret = -EINTR;
  259. goto rtn;
  260. }
  261. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  262. /* clear interrupt flags via a dummy read */
  263. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  264. /* ISR loads it with new INT_STATUS */
  265. hdq_data->hdq_irqstatus = 0;
  266. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  267. /* set the INIT and GO bit */
  268. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
  269. OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
  270. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
  271. OMAP_HDQ_CTRL_STATUS_GO);
  272. /* wait for the TIMEOUT bit */
  273. ret = wait_event_timeout(hdq_wait_queue,
  274. hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
  275. if (ret == 0) {
  276. dev_dbg(hdq_data->dev, "break wait elapsed\n");
  277. ret = -EINTR;
  278. goto out;
  279. }
  280. tmp_status = hdq_data->hdq_irqstatus;
  281. /* check irqstatus */
  282. if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
  283. dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
  284. tmp_status);
  285. ret = -ETIMEDOUT;
  286. goto out;
  287. }
  288. /*
  289. * wait for both INIT and GO bits rerurn to zero.
  290. * zero wait time expected for interrupt mode.
  291. */
  292. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
  293. OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
  294. OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
  295. &tmp_status);
  296. if (ret)
  297. dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
  298. " return to zero, %x", tmp_status);
  299. out:
  300. mutex_unlock(&hdq_data->hdq_mutex);
  301. rtn:
  302. return ret;
  303. }
  304. static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
  305. {
  306. int ret = 0;
  307. u8 status;
  308. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  309. if (ret < 0) {
  310. ret = -EINTR;
  311. goto rtn;
  312. }
  313. if (!hdq_data->hdq_usecount) {
  314. ret = -EINVAL;
  315. goto out;
  316. }
  317. if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
  318. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
  319. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
  320. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
  321. /*
  322. * The RX comes immediately after TX.
  323. */
  324. wait_event_timeout(hdq_wait_queue,
  325. (hdq_data->hdq_irqstatus
  326. & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
  327. OMAP_HDQ_TIMEOUT);
  328. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
  329. OMAP_HDQ_CTRL_STATUS_DIR);
  330. status = hdq_data->hdq_irqstatus;
  331. /* check irqstatus */
  332. if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
  333. dev_dbg(hdq_data->dev, "timeout waiting for"
  334. " RXCOMPLETE, %x", status);
  335. ret = -ETIMEDOUT;
  336. goto out;
  337. }
  338. }
  339. /* the data is ready. Read it in! */
  340. *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
  341. out:
  342. mutex_unlock(&hdq_data->hdq_mutex);
  343. rtn:
  344. return ret;
  345. }
  346. /* Enable clocks and set the controller to HDQ mode */
  347. static int omap_hdq_get(struct hdq_data *hdq_data)
  348. {
  349. int ret = 0;
  350. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  351. if (ret < 0) {
  352. ret = -EINTR;
  353. goto rtn;
  354. }
  355. if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
  356. dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
  357. ret = -EINVAL;
  358. goto out;
  359. } else {
  360. hdq_data->hdq_usecount++;
  361. try_module_get(THIS_MODULE);
  362. if (1 == hdq_data->hdq_usecount) {
  363. if (clk_enable(hdq_data->hdq_ick)) {
  364. dev_dbg(hdq_data->dev, "Can not enable ick\n");
  365. ret = -ENODEV;
  366. goto clk_err;
  367. }
  368. if (clk_enable(hdq_data->hdq_fck)) {
  369. dev_dbg(hdq_data->dev, "Can not enable fck\n");
  370. clk_disable(hdq_data->hdq_ick);
  371. ret = -ENODEV;
  372. goto clk_err;
  373. }
  374. /* make sure HDQ is out of reset */
  375. if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
  376. OMAP_HDQ_SYSSTATUS_RESETDONE)) {
  377. ret = _omap_hdq_reset(hdq_data);
  378. if (ret)
  379. /* back up the count */
  380. hdq_data->hdq_usecount--;
  381. } else {
  382. /* select HDQ mode & enable clocks */
  383. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  384. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  385. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  386. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
  387. OMAP_HDQ_SYSCONFIG_AUTOIDLE);
  388. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  389. }
  390. }
  391. }
  392. clk_err:
  393. clk_put(hdq_data->hdq_ick);
  394. clk_put(hdq_data->hdq_fck);
  395. out:
  396. mutex_unlock(&hdq_data->hdq_mutex);
  397. rtn:
  398. return ret;
  399. }
  400. /* Disable clocks to the module */
  401. static int omap_hdq_put(struct hdq_data *hdq_data)
  402. {
  403. int ret = 0;
  404. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  405. if (ret < 0)
  406. return -EINTR;
  407. if (0 == hdq_data->hdq_usecount) {
  408. dev_dbg(hdq_data->dev, "attempt to decrement use count"
  409. " when it is zero");
  410. ret = -EINVAL;
  411. } else {
  412. hdq_data->hdq_usecount--;
  413. module_put(THIS_MODULE);
  414. if (0 == hdq_data->hdq_usecount) {
  415. clk_disable(hdq_data->hdq_ick);
  416. clk_disable(hdq_data->hdq_fck);
  417. }
  418. }
  419. mutex_unlock(&hdq_data->hdq_mutex);
  420. return ret;
  421. }
  422. /* Read a byte of data from the device */
  423. static u8 omap_w1_read_byte(void *_hdq)
  424. {
  425. struct hdq_data *hdq_data = _hdq;
  426. u8 val = 0;
  427. int ret;
  428. ret = hdq_read_byte(hdq_data, &val);
  429. if (ret) {
  430. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  431. if (ret < 0) {
  432. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  433. return -EINTR;
  434. }
  435. hdq_data->init_trans = 0;
  436. mutex_unlock(&hdq_data->hdq_mutex);
  437. omap_hdq_put(hdq_data);
  438. return -1;
  439. }
  440. /* Write followed by a read, release the module */
  441. if (hdq_data->init_trans) {
  442. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  443. if (ret < 0) {
  444. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  445. return -EINTR;
  446. }
  447. hdq_data->init_trans = 0;
  448. mutex_unlock(&hdq_data->hdq_mutex);
  449. omap_hdq_put(hdq_data);
  450. }
  451. return val;
  452. }
  453. /* Write a byte of data to the device */
  454. static void omap_w1_write_byte(void *_hdq, u8 byte)
  455. {
  456. struct hdq_data *hdq_data = _hdq;
  457. int ret;
  458. u8 status;
  459. /* First write to initialize the transfer */
  460. if (hdq_data->init_trans == 0)
  461. omap_hdq_get(hdq_data);
  462. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  463. if (ret < 0) {
  464. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  465. return;
  466. }
  467. hdq_data->init_trans++;
  468. mutex_unlock(&hdq_data->hdq_mutex);
  469. ret = hdq_write_byte(hdq_data, byte, &status);
  470. if (ret < 0) {
  471. dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
  472. return;
  473. }
  474. /* Second write, data transferred. Release the module */
  475. if (hdq_data->init_trans > 1) {
  476. omap_hdq_put(hdq_data);
  477. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  478. if (ret < 0) {
  479. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  480. return;
  481. }
  482. hdq_data->init_trans = 0;
  483. mutex_unlock(&hdq_data->hdq_mutex);
  484. }
  485. return;
  486. }
  487. static int __devinit omap_hdq_probe(struct platform_device *pdev)
  488. {
  489. struct hdq_data *hdq_data;
  490. struct resource *res;
  491. int ret, irq;
  492. u8 rev;
  493. hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
  494. if (!hdq_data) {
  495. dev_dbg(&pdev->dev, "unable to allocate memory\n");
  496. ret = -ENOMEM;
  497. goto err_kmalloc;
  498. }
  499. hdq_data->dev = &pdev->dev;
  500. platform_set_drvdata(pdev, hdq_data);
  501. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  502. if (!res) {
  503. dev_dbg(&pdev->dev, "unable to get resource\n");
  504. ret = -ENXIO;
  505. goto err_resource;
  506. }
  507. hdq_data->hdq_base = ioremap(res->start, SZ_4K);
  508. if (!hdq_data->hdq_base) {
  509. dev_dbg(&pdev->dev, "ioremap failed\n");
  510. ret = -EINVAL;
  511. goto err_ioremap;
  512. }
  513. /* get interface & functional clock objects */
  514. hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
  515. if (IS_ERR(hdq_data->hdq_ick)) {
  516. dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n");
  517. ret = PTR_ERR(hdq_data->hdq_ick);
  518. goto err_ick;
  519. }
  520. hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
  521. if (IS_ERR(hdq_data->hdq_fck)) {
  522. dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n");
  523. ret = PTR_ERR(hdq_data->hdq_fck);
  524. goto err_fck;
  525. }
  526. hdq_data->hdq_usecount = 0;
  527. mutex_init(&hdq_data->hdq_mutex);
  528. if (clk_enable(hdq_data->hdq_ick)) {
  529. dev_dbg(&pdev->dev, "Can not enable ick\n");
  530. ret = -ENODEV;
  531. goto err_intfclk;
  532. }
  533. if (clk_enable(hdq_data->hdq_fck)) {
  534. dev_dbg(&pdev->dev, "Can not enable fck\n");
  535. ret = -ENODEV;
  536. goto err_fnclk;
  537. }
  538. rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
  539. dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
  540. (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
  541. spin_lock_init(&hdq_data->hdq_spinlock);
  542. irq = platform_get_irq(pdev, 0);
  543. if (irq < 0) {
  544. ret = -ENXIO;
  545. goto err_irq;
  546. }
  547. ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
  548. if (ret < 0) {
  549. dev_dbg(&pdev->dev, "could not request irq\n");
  550. goto err_irq;
  551. }
  552. omap_hdq_break(hdq_data);
  553. /* don't clock the HDQ until it is needed */
  554. clk_disable(hdq_data->hdq_ick);
  555. clk_disable(hdq_data->hdq_fck);
  556. omap_w1_master.data = hdq_data;
  557. ret = w1_add_master_device(&omap_w1_master);
  558. if (ret) {
  559. dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
  560. goto err_w1;
  561. }
  562. return 0;
  563. err_w1:
  564. err_irq:
  565. clk_disable(hdq_data->hdq_fck);
  566. err_fnclk:
  567. clk_disable(hdq_data->hdq_ick);
  568. err_intfclk:
  569. clk_put(hdq_data->hdq_fck);
  570. err_fck:
  571. clk_put(hdq_data->hdq_ick);
  572. err_ick:
  573. iounmap(hdq_data->hdq_base);
  574. err_ioremap:
  575. err_resource:
  576. platform_set_drvdata(pdev, NULL);
  577. kfree(hdq_data);
  578. err_kmalloc:
  579. return ret;
  580. }
  581. static int omap_hdq_remove(struct platform_device *pdev)
  582. {
  583. struct hdq_data *hdq_data = platform_get_drvdata(pdev);
  584. mutex_lock(&hdq_data->hdq_mutex);
  585. if (hdq_data->hdq_usecount) {
  586. dev_dbg(&pdev->dev, "removed when use count is not zero\n");
  587. mutex_unlock(&hdq_data->hdq_mutex);
  588. return -EBUSY;
  589. }
  590. mutex_unlock(&hdq_data->hdq_mutex);
  591. /* remove module dependency */
  592. clk_put(hdq_data->hdq_ick);
  593. clk_put(hdq_data->hdq_fck);
  594. free_irq(INT_24XX_HDQ_IRQ, hdq_data);
  595. platform_set_drvdata(pdev, NULL);
  596. iounmap(hdq_data->hdq_base);
  597. kfree(hdq_data);
  598. return 0;
  599. }
  600. static int __init
  601. omap_hdq_init(void)
  602. {
  603. return platform_driver_register(&omap_hdq_driver);
  604. }
  605. module_init(omap_hdq_init);
  606. static void __exit
  607. omap_hdq_exit(void)
  608. {
  609. platform_driver_unregister(&omap_hdq_driver);
  610. }
  611. module_exit(omap_hdq_exit);
  612. module_param(w1_id, int, S_IRUSR);
  613. MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection");
  614. MODULE_AUTHOR("Texas Instruments");
  615. MODULE_DESCRIPTION("HDQ driver Library");
  616. MODULE_LICENSE("GPL");