nuvoton-cir.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269
  1. /*
  2. * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
  3. *
  4. * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
  5. * Copyright (C) 2009 Nuvoton PS Team
  6. *
  7. * Special thanks to Nuvoton for providing hardware, spec sheets and
  8. * sample code upon which portions of this driver are based. Indirect
  9. * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
  10. * modeled after.
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License as
  14. * published by the Free Software Foundation; either version 2 of the
  15. * License, or (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. */
  22. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/pnp.h>
  26. #include <linux/io.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/sched.h>
  29. #include <linux/slab.h>
  30. #include <media/rc-core.h>
  31. #include <linux/pci_ids.h>
  32. #include "nuvoton-cir.h"
  33. static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt);
  34. static const struct nvt_chip nvt_chips[] = {
  35. { "w83667hg", NVT_W83667HG },
  36. { "NCT6775F", NVT_6775F },
  37. { "NCT6776F", NVT_6776F },
  38. { "NCT6779D", NVT_6779D },
  39. };
  40. static inline struct device *nvt_get_dev(const struct nvt_dev *nvt)
  41. {
  42. return nvt->rdev->dev.parent;
  43. }
  44. static inline bool is_w83667hg(struct nvt_dev *nvt)
  45. {
  46. return nvt->chip_ver == NVT_W83667HG;
  47. }
  48. /* write val to config reg */
  49. static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
  50. {
  51. outb(reg, nvt->cr_efir);
  52. outb(val, nvt->cr_efdr);
  53. }
  54. /* read val from config reg */
  55. static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
  56. {
  57. outb(reg, nvt->cr_efir);
  58. return inb(nvt->cr_efdr);
  59. }
  60. /* update config register bit without changing other bits */
  61. static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  62. {
  63. u8 tmp = nvt_cr_read(nvt, reg) | val;
  64. nvt_cr_write(nvt, tmp, reg);
  65. }
  66. /* clear config register bit without changing other bits */
  67. static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  68. {
  69. u8 tmp = nvt_cr_read(nvt, reg) & ~val;
  70. nvt_cr_write(nvt, tmp, reg);
  71. }
  72. /* enter extended function mode */
  73. static inline int nvt_efm_enable(struct nvt_dev *nvt)
  74. {
  75. if (!request_muxed_region(nvt->cr_efir, 2, NVT_DRIVER_NAME))
  76. return -EBUSY;
  77. /* Enabling Extended Function Mode explicitly requires writing 2x */
  78. outb(EFER_EFM_ENABLE, nvt->cr_efir);
  79. outb(EFER_EFM_ENABLE, nvt->cr_efir);
  80. return 0;
  81. }
  82. /* exit extended function mode */
  83. static inline void nvt_efm_disable(struct nvt_dev *nvt)
  84. {
  85. outb(EFER_EFM_DISABLE, nvt->cr_efir);
  86. release_region(nvt->cr_efir, 2);
  87. }
  88. /*
  89. * When you want to address a specific logical device, write its logical
  90. * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
  91. * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
  92. */
  93. static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
  94. {
  95. nvt_cr_write(nvt, ldev, CR_LOGICAL_DEV_SEL);
  96. }
  97. /* select and enable logical device with setting EFM mode*/
  98. static inline void nvt_enable_logical_dev(struct nvt_dev *nvt, u8 ldev)
  99. {
  100. nvt_efm_enable(nvt);
  101. nvt_select_logical_dev(nvt, ldev);
  102. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  103. nvt_efm_disable(nvt);
  104. }
  105. /* select and disable logical device with setting EFM mode*/
  106. static inline void nvt_disable_logical_dev(struct nvt_dev *nvt, u8 ldev)
  107. {
  108. nvt_efm_enable(nvt);
  109. nvt_select_logical_dev(nvt, ldev);
  110. nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
  111. nvt_efm_disable(nvt);
  112. }
  113. /* write val to cir config register */
  114. static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
  115. {
  116. outb(val, nvt->cir_addr + offset);
  117. }
  118. /* read val from cir config register */
  119. static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
  120. {
  121. return inb(nvt->cir_addr + offset);
  122. }
  123. /* write val to cir wake register */
  124. static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
  125. u8 val, u8 offset)
  126. {
  127. outb(val, nvt->cir_wake_addr + offset);
  128. }
  129. /* read val from cir wake config register */
  130. static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
  131. {
  132. return inb(nvt->cir_wake_addr + offset);
  133. }
  134. /* don't override io address if one is set already */
  135. static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr)
  136. {
  137. unsigned long old_addr;
  138. old_addr = nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8;
  139. old_addr |= nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO);
  140. if (old_addr)
  141. *ioaddr = old_addr;
  142. else {
  143. nvt_cr_write(nvt, *ioaddr >> 8, CR_CIR_BASE_ADDR_HI);
  144. nvt_cr_write(nvt, *ioaddr & 0xff, CR_CIR_BASE_ADDR_LO);
  145. }
  146. }
  147. static void nvt_write_wakeup_codes(struct rc_dev *dev,
  148. const u8 *wbuf, int count)
  149. {
  150. u8 tolerance, config;
  151. struct nvt_dev *nvt = dev->priv;
  152. unsigned long flags;
  153. int i;
  154. /* hardcode the tolerance to 10% */
  155. tolerance = DIV_ROUND_UP(count, 10);
  156. spin_lock_irqsave(&nvt->lock, flags);
  157. nvt_clear_cir_wake_fifo(nvt);
  158. nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
  159. nvt_cir_wake_reg_write(nvt, tolerance, CIR_WAKE_FIFO_CMP_TOL);
  160. config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
  161. /* enable writes to wake fifo */
  162. nvt_cir_wake_reg_write(nvt, config | CIR_WAKE_IRCON_MODE1,
  163. CIR_WAKE_IRCON);
  164. if (count)
  165. pr_info("Wake samples (%d) =", count);
  166. else
  167. pr_info("Wake sample fifo cleared");
  168. for (i = 0; i < count; i++)
  169. nvt_cir_wake_reg_write(nvt, wbuf[i], CIR_WAKE_WR_FIFO_DATA);
  170. nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
  171. spin_unlock_irqrestore(&nvt->lock, flags);
  172. }
  173. static ssize_t wakeup_data_show(struct device *dev,
  174. struct device_attribute *attr,
  175. char *buf)
  176. {
  177. struct rc_dev *rc_dev = to_rc_dev(dev);
  178. struct nvt_dev *nvt = rc_dev->priv;
  179. int fifo_len, duration;
  180. unsigned long flags;
  181. ssize_t buf_len = 0;
  182. int i;
  183. spin_lock_irqsave(&nvt->lock, flags);
  184. fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
  185. fifo_len = min(fifo_len, WAKEUP_MAX_SIZE);
  186. /* go to first element to be read */
  187. while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX))
  188. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
  189. for (i = 0; i < fifo_len; i++) {
  190. duration = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
  191. duration = (duration & BUF_LEN_MASK) * SAMPLE_PERIOD;
  192. buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len,
  193. "%d ", duration);
  194. }
  195. buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
  196. spin_unlock_irqrestore(&nvt->lock, flags);
  197. return buf_len;
  198. }
  199. static ssize_t wakeup_data_store(struct device *dev,
  200. struct device_attribute *attr,
  201. const char *buf, size_t len)
  202. {
  203. struct rc_dev *rc_dev = to_rc_dev(dev);
  204. u8 wake_buf[WAKEUP_MAX_SIZE];
  205. char **argv;
  206. int i, count;
  207. unsigned int val;
  208. ssize_t ret;
  209. argv = argv_split(GFP_KERNEL, buf, &count);
  210. if (!argv)
  211. return -ENOMEM;
  212. if (!count || count > WAKEUP_MAX_SIZE) {
  213. ret = -EINVAL;
  214. goto out;
  215. }
  216. for (i = 0; i < count; i++) {
  217. ret = kstrtouint(argv[i], 10, &val);
  218. if (ret)
  219. goto out;
  220. val = DIV_ROUND_CLOSEST(val, SAMPLE_PERIOD);
  221. if (!val || val > 0x7f) {
  222. ret = -EINVAL;
  223. goto out;
  224. }
  225. wake_buf[i] = val;
  226. /* sequence must start with a pulse */
  227. if (i % 2 == 0)
  228. wake_buf[i] |= BUF_PULSE_BIT;
  229. }
  230. nvt_write_wakeup_codes(rc_dev, wake_buf, count);
  231. ret = len;
  232. out:
  233. argv_free(argv);
  234. return ret;
  235. }
  236. static DEVICE_ATTR_RW(wakeup_data);
  237. /* dump current cir register contents */
  238. static void cir_dump_regs(struct nvt_dev *nvt)
  239. {
  240. nvt_efm_enable(nvt);
  241. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  242. pr_info("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
  243. pr_info(" * CR CIR ACTIVE : 0x%x\n",
  244. nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
  245. pr_info(" * CR CIR BASE ADDR: 0x%x\n",
  246. (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
  247. nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
  248. pr_info(" * CR CIR IRQ NUM: 0x%x\n",
  249. nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
  250. nvt_efm_disable(nvt);
  251. pr_info("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
  252. pr_info(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
  253. pr_info(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
  254. pr_info(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
  255. pr_info(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
  256. pr_info(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
  257. pr_info(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
  258. pr_info(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
  259. pr_info(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
  260. pr_info(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
  261. pr_info(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
  262. pr_info(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
  263. pr_info(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
  264. pr_info(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
  265. pr_info(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
  266. pr_info(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
  267. pr_info(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
  268. }
  269. /* dump current cir wake register contents */
  270. static void cir_wake_dump_regs(struct nvt_dev *nvt)
  271. {
  272. u8 i, fifo_len;
  273. nvt_efm_enable(nvt);
  274. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  275. pr_info("%s: Dump CIR WAKE logical device registers:\n",
  276. NVT_DRIVER_NAME);
  277. pr_info(" * CR CIR WAKE ACTIVE : 0x%x\n",
  278. nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
  279. pr_info(" * CR CIR WAKE BASE ADDR: 0x%x\n",
  280. (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
  281. nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
  282. pr_info(" * CR CIR WAKE IRQ NUM: 0x%x\n",
  283. nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
  284. nvt_efm_disable(nvt);
  285. pr_info("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
  286. pr_info(" * IRCON: 0x%x\n",
  287. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
  288. pr_info(" * IRSTS: 0x%x\n",
  289. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
  290. pr_info(" * IREN: 0x%x\n",
  291. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
  292. pr_info(" * FIFO CMP DEEP: 0x%x\n",
  293. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
  294. pr_info(" * FIFO CMP TOL: 0x%x\n",
  295. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
  296. pr_info(" * FIFO COUNT: 0x%x\n",
  297. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
  298. pr_info(" * SLCH: 0x%x\n",
  299. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
  300. pr_info(" * SLCL: 0x%x\n",
  301. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
  302. pr_info(" * FIFOCON: 0x%x\n",
  303. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
  304. pr_info(" * SRXFSTS: 0x%x\n",
  305. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
  306. pr_info(" * SAMPLE RX FIFO: 0x%x\n",
  307. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
  308. pr_info(" * WR FIFO DATA: 0x%x\n",
  309. nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
  310. pr_info(" * RD FIFO ONLY: 0x%x\n",
  311. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
  312. pr_info(" * RD FIFO ONLY IDX: 0x%x\n",
  313. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
  314. pr_info(" * FIFO IGNORE: 0x%x\n",
  315. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
  316. pr_info(" * IRFSM: 0x%x\n",
  317. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
  318. fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
  319. pr_info("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
  320. pr_info("* Contents =");
  321. for (i = 0; i < fifo_len; i++)
  322. pr_cont(" %02x",
  323. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
  324. pr_cont("\n");
  325. }
  326. static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id)
  327. {
  328. int i;
  329. for (i = 0; i < ARRAY_SIZE(nvt_chips); i++)
  330. if ((id & SIO_ID_MASK) == nvt_chips[i].chip_ver) {
  331. nvt->chip_ver = nvt_chips[i].chip_ver;
  332. return nvt_chips[i].name;
  333. }
  334. return NULL;
  335. }
  336. /* detect hardware features */
  337. static int nvt_hw_detect(struct nvt_dev *nvt)
  338. {
  339. struct device *dev = nvt_get_dev(nvt);
  340. const char *chip_name;
  341. int chip_id;
  342. nvt_efm_enable(nvt);
  343. /* Check if we're wired for the alternate EFER setup */
  344. nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
  345. if (nvt->chip_major == 0xff) {
  346. nvt_efm_disable(nvt);
  347. nvt->cr_efir = CR_EFIR2;
  348. nvt->cr_efdr = CR_EFDR2;
  349. nvt_efm_enable(nvt);
  350. nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
  351. }
  352. nvt->chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
  353. nvt_efm_disable(nvt);
  354. chip_id = nvt->chip_major << 8 | nvt->chip_minor;
  355. if (chip_id == NVT_INVALID) {
  356. dev_err(dev, "No device found on either EFM port\n");
  357. return -ENODEV;
  358. }
  359. chip_name = nvt_find_chip(nvt, chip_id);
  360. /* warn, but still let the driver load, if we don't know this chip */
  361. if (!chip_name)
  362. dev_warn(dev,
  363. "unknown chip, id: 0x%02x 0x%02x, it may not work...",
  364. nvt->chip_major, nvt->chip_minor);
  365. else
  366. dev_info(dev, "found %s or compatible: chip id: 0x%02x 0x%02x",
  367. chip_name, nvt->chip_major, nvt->chip_minor);
  368. return 0;
  369. }
  370. static void nvt_cir_ldev_init(struct nvt_dev *nvt)
  371. {
  372. u8 val, psreg, psmask, psval;
  373. if (is_w83667hg(nvt)) {
  374. psreg = CR_MULTIFUNC_PIN_SEL;
  375. psmask = MULTIFUNC_PIN_SEL_MASK;
  376. psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB;
  377. } else {
  378. psreg = CR_OUTPUT_PIN_SEL;
  379. psmask = OUTPUT_PIN_SEL_MASK;
  380. psval = OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB;
  381. }
  382. /* output pin selection: enable CIR, with WB sensor enabled */
  383. val = nvt_cr_read(nvt, psreg);
  384. val &= psmask;
  385. val |= psval;
  386. nvt_cr_write(nvt, val, psreg);
  387. /* Select CIR logical device */
  388. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  389. nvt_set_ioaddr(nvt, &nvt->cir_addr);
  390. nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
  391. nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
  392. nvt->cir_addr, nvt->cir_irq);
  393. }
  394. static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
  395. {
  396. /* Select ACPI logical device and anable it */
  397. nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
  398. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  399. /* Enable CIR Wake via PSOUT# (Pin60) */
  400. nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
  401. /* enable pme interrupt of cir wakeup event */
  402. nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
  403. /* Select CIR Wake logical device */
  404. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  405. nvt_set_ioaddr(nvt, &nvt->cir_wake_addr);
  406. nvt_dbg("CIR Wake initialized, base io port address: 0x%lx",
  407. nvt->cir_wake_addr);
  408. }
  409. /* clear out the hardware's cir rx fifo */
  410. static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
  411. {
  412. u8 val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
  413. nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
  414. }
  415. /* clear out the hardware's cir wake rx fifo */
  416. static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
  417. {
  418. u8 val, config;
  419. config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
  420. /* clearing wake fifo works in learning mode only */
  421. nvt_cir_wake_reg_write(nvt, config & ~CIR_WAKE_IRCON_MODE0,
  422. CIR_WAKE_IRCON);
  423. val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
  424. nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
  425. CIR_WAKE_FIFOCON);
  426. nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
  427. }
  428. /* clear out the hardware's cir tx fifo */
  429. static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
  430. {
  431. u8 val;
  432. val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
  433. nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
  434. }
  435. /* enable RX Trigger Level Reach and Packet End interrupts */
  436. static void nvt_set_cir_iren(struct nvt_dev *nvt)
  437. {
  438. u8 iren;
  439. iren = CIR_IREN_RTR | CIR_IREN_PE | CIR_IREN_RFO;
  440. nvt_cir_reg_write(nvt, iren, CIR_IREN);
  441. }
  442. static void nvt_cir_regs_init(struct nvt_dev *nvt)
  443. {
  444. /* set sample limit count (PE interrupt raised when reached) */
  445. nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
  446. nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);
  447. /* set fifo irq trigger levels */
  448. nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
  449. CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);
  450. /*
  451. * Enable TX and RX, specify carrier on = low, off = high, and set
  452. * sample period (currently 50us)
  453. */
  454. nvt_cir_reg_write(nvt,
  455. CIR_IRCON_TXEN | CIR_IRCON_RXEN |
  456. CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
  457. CIR_IRCON);
  458. /* clear hardware rx and tx fifos */
  459. nvt_clear_cir_fifo(nvt);
  460. nvt_clear_tx_fifo(nvt);
  461. /* clear any and all stray interrupts */
  462. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  463. /* and finally, enable interrupts */
  464. nvt_set_cir_iren(nvt);
  465. /* enable the CIR logical device */
  466. nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
  467. }
  468. static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
  469. {
  470. /*
  471. * Disable RX, set specific carrier on = low, off = high,
  472. * and sample period (currently 50us)
  473. */
  474. nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 |
  475. CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
  476. CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
  477. CIR_WAKE_IRCON);
  478. /* clear any and all stray interrupts */
  479. nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
  480. /* enable the CIR WAKE logical device */
  481. nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  482. }
  483. static void nvt_enable_wake(struct nvt_dev *nvt)
  484. {
  485. unsigned long flags;
  486. nvt_efm_enable(nvt);
  487. nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
  488. nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
  489. nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
  490. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  491. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  492. nvt_efm_disable(nvt);
  493. spin_lock_irqsave(&nvt->lock, flags);
  494. nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
  495. CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
  496. CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
  497. CIR_WAKE_IRCON);
  498. nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
  499. nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
  500. spin_unlock_irqrestore(&nvt->lock, flags);
  501. }
  502. #if 0 /* Currently unused */
  503. /* rx carrier detect only works in learning mode, must be called w/lock */
  504. static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
  505. {
  506. u32 count, carrier, duration = 0;
  507. int i;
  508. count = nvt_cir_reg_read(nvt, CIR_FCCL) |
  509. nvt_cir_reg_read(nvt, CIR_FCCH) << 8;
  510. for (i = 0; i < nvt->pkts; i++) {
  511. if (nvt->buf[i] & BUF_PULSE_BIT)
  512. duration += nvt->buf[i] & BUF_LEN_MASK;
  513. }
  514. duration *= SAMPLE_PERIOD;
  515. if (!count || !duration) {
  516. dev_notice(nvt_get_dev(nvt),
  517. "Unable to determine carrier! (c:%u, d:%u)",
  518. count, duration);
  519. return 0;
  520. }
  521. carrier = MS_TO_NS(count) / duration;
  522. if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
  523. nvt_dbg("WTF? Carrier frequency out of range!");
  524. nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
  525. carrier, count, duration);
  526. return carrier;
  527. }
  528. #endif
  529. /*
  530. * set carrier frequency
  531. *
  532. * set carrier on 2 registers: CP & CC
  533. * always set CP as 0x81
  534. * set CC by SPEC, CC = 3MHz/carrier - 1
  535. */
  536. static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
  537. {
  538. struct nvt_dev *nvt = dev->priv;
  539. u16 val;
  540. if (carrier == 0)
  541. return -EINVAL;
  542. nvt_cir_reg_write(nvt, 1, CIR_CP);
  543. val = 3000000 / (carrier) - 1;
  544. nvt_cir_reg_write(nvt, val & 0xff, CIR_CC);
  545. nvt_dbg("cp: 0x%x cc: 0x%x\n",
  546. nvt_cir_reg_read(nvt, CIR_CP), nvt_cir_reg_read(nvt, CIR_CC));
  547. return 0;
  548. }
  549. static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
  550. struct rc_scancode_filter *sc_filter)
  551. {
  552. u8 buf_val;
  553. int i, ret, count;
  554. unsigned int val;
  555. struct ir_raw_event *raw;
  556. u8 wake_buf[WAKEUP_MAX_SIZE];
  557. bool complete;
  558. /* Require mask to be set */
  559. if (!sc_filter->mask)
  560. return 0;
  561. raw = kmalloc_array(WAKEUP_MAX_SIZE, sizeof(*raw), GFP_KERNEL);
  562. if (!raw)
  563. return -ENOMEM;
  564. ret = ir_raw_encode_scancode(dev->wakeup_protocol, sc_filter->data,
  565. raw, WAKEUP_MAX_SIZE);
  566. complete = (ret != -ENOBUFS);
  567. if (!complete)
  568. ret = WAKEUP_MAX_SIZE;
  569. else if (ret < 0)
  570. goto out_raw;
  571. /* Inspect the ir samples */
  572. for (i = 0, count = 0; i < ret && count < WAKEUP_MAX_SIZE; ++i) {
  573. /* NS to US */
  574. val = DIV_ROUND_UP(raw[i].duration, 1000L) / SAMPLE_PERIOD;
  575. /* Split too large values into several smaller ones */
  576. while (val > 0 && count < WAKEUP_MAX_SIZE) {
  577. /* Skip last value for better comparison tolerance */
  578. if (complete && i == ret - 1 && val < BUF_LEN_MASK)
  579. break;
  580. /* Clamp values to BUF_LEN_MASK at most */
  581. buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
  582. wake_buf[count] = buf_val;
  583. val -= buf_val;
  584. if ((raw[i]).pulse)
  585. wake_buf[count] |= BUF_PULSE_BIT;
  586. count++;
  587. }
  588. }
  589. nvt_write_wakeup_codes(dev, wake_buf, count);
  590. ret = 0;
  591. out_raw:
  592. kfree(raw);
  593. return ret;
  594. }
  595. /*
  596. * nvt_tx_ir
  597. *
  598. * 1) clean TX fifo first (handled by AP)
  599. * 2) copy data from user space
  600. * 3) disable RX interrupts, enable TX interrupts: TTR & TFU
  601. * 4) send 9 packets to TX FIFO to open TTR
  602. * in interrupt_handler:
  603. * 5) send all data out
  604. * go back to write():
  605. * 6) disable TX interrupts, re-enable RX interupts
  606. *
  607. * The key problem of this function is user space data may larger than
  608. * driver's data buf length. So nvt_tx_ir() will only copy TX_BUF_LEN data to
  609. * buf, and keep current copied data buf num in cur_buf_num. But driver's buf
  610. * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
  611. * set TXFCONT as 0xff, until buf_count less than 0xff.
  612. */
  613. static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
  614. {
  615. struct nvt_dev *nvt = dev->priv;
  616. unsigned long flags;
  617. unsigned int i;
  618. u8 iren;
  619. int ret;
  620. spin_lock_irqsave(&nvt->lock, flags);
  621. ret = min((unsigned)(TX_BUF_LEN / sizeof(unsigned)), n);
  622. nvt->tx.buf_count = (ret * sizeof(unsigned));
  623. memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count);
  624. nvt->tx.cur_buf_num = 0;
  625. /* save currently enabled interrupts */
  626. iren = nvt_cir_reg_read(nvt, CIR_IREN);
  627. /* now disable all interrupts, save TFU & TTR */
  628. nvt_cir_reg_write(nvt, CIR_IREN_TFU | CIR_IREN_TTR, CIR_IREN);
  629. nvt->tx.tx_state = ST_TX_REPLY;
  630. nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV_8 |
  631. CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
  632. /* trigger TTR interrupt by writing out ones, (yes, it's ugly) */
  633. for (i = 0; i < 9; i++)
  634. nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO);
  635. spin_unlock_irqrestore(&nvt->lock, flags);
  636. wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST);
  637. spin_lock_irqsave(&nvt->lock, flags);
  638. nvt->tx.tx_state = ST_TX_NONE;
  639. spin_unlock_irqrestore(&nvt->lock, flags);
  640. /* restore enabled interrupts to prior state */
  641. nvt_cir_reg_write(nvt, iren, CIR_IREN);
  642. return ret;
  643. }
  644. /* dump contents of the last rx buffer we got from the hw rx fifo */
  645. static void nvt_dump_rx_buf(struct nvt_dev *nvt)
  646. {
  647. int i;
  648. printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts);
  649. for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
  650. printk(KERN_CONT "0x%02x ", nvt->buf[i]);
  651. printk(KERN_CONT "\n");
  652. }
  653. /*
  654. * Process raw data in rx driver buffer, store it in raw IR event kfifo,
  655. * trigger decode when appropriate.
  656. *
  657. * We get IR data samples one byte at a time. If the msb is set, its a pulse,
  658. * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
  659. * (default 50us) intervals for that pulse/space. A discrete signal is
  660. * followed by a series of 0x7f packets, then either 0x7<something> or 0x80
  661. * to signal more IR coming (repeats) or end of IR, respectively. We store
  662. * sample data in the raw event kfifo until we see 0x7<something> (except f)
  663. * or 0x80, at which time, we trigger a decode operation.
  664. */
  665. static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
  666. {
  667. DEFINE_IR_RAW_EVENT(rawir);
  668. u8 sample;
  669. int i;
  670. nvt_dbg_verbose("%s firing", __func__);
  671. if (debug)
  672. nvt_dump_rx_buf(nvt);
  673. nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
  674. for (i = 0; i < nvt->pkts; i++) {
  675. sample = nvt->buf[i];
  676. rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
  677. rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
  678. * SAMPLE_PERIOD);
  679. nvt_dbg("Storing %s with duration %d",
  680. rawir.pulse ? "pulse" : "space", rawir.duration);
  681. ir_raw_event_store_with_filter(nvt->rdev, &rawir);
  682. }
  683. nvt->pkts = 0;
  684. nvt_dbg("Calling ir_raw_event_handle\n");
  685. ir_raw_event_handle(nvt->rdev);
  686. nvt_dbg_verbose("%s done", __func__);
  687. }
  688. static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
  689. {
  690. dev_warn(nvt_get_dev(nvt), "RX FIFO overrun detected, flushing data!");
  691. nvt->pkts = 0;
  692. nvt_clear_cir_fifo(nvt);
  693. ir_raw_event_reset(nvt->rdev);
  694. }
  695. /* copy data from hardware rx fifo into driver buffer */
  696. static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
  697. {
  698. u8 fifocount;
  699. int i;
  700. /* Get count of how many bytes to read from RX FIFO */
  701. fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
  702. nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
  703. /* Read fifocount bytes from CIR Sample RX FIFO register */
  704. for (i = 0; i < fifocount; i++)
  705. nvt->buf[i] = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
  706. nvt->pkts = fifocount;
  707. nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
  708. nvt_process_rx_ir_data(nvt);
  709. }
  710. static void nvt_cir_log_irqs(u8 status, u8 iren)
  711. {
  712. nvt_dbg("IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
  713. status, iren,
  714. status & CIR_IRSTS_RDR ? " RDR" : "",
  715. status & CIR_IRSTS_RTR ? " RTR" : "",
  716. status & CIR_IRSTS_PE ? " PE" : "",
  717. status & CIR_IRSTS_RFO ? " RFO" : "",
  718. status & CIR_IRSTS_TE ? " TE" : "",
  719. status & CIR_IRSTS_TTR ? " TTR" : "",
  720. status & CIR_IRSTS_TFU ? " TFU" : "",
  721. status & CIR_IRSTS_GH ? " GH" : "",
  722. status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
  723. CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
  724. CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
  725. }
  726. static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
  727. {
  728. return nvt->tx.tx_state == ST_TX_NONE;
  729. }
  730. /* interrupt service routine for incoming and outgoing CIR data */
  731. static irqreturn_t nvt_cir_isr(int irq, void *data)
  732. {
  733. struct nvt_dev *nvt = data;
  734. u8 status, iren;
  735. nvt_dbg_verbose("%s firing", __func__);
  736. spin_lock(&nvt->lock);
  737. /*
  738. * Get IR Status register contents. Write 1 to ack/clear
  739. *
  740. * bit: reg name - description
  741. * 7: CIR_IRSTS_RDR - RX Data Ready
  742. * 6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
  743. * 5: CIR_IRSTS_PE - Packet End
  744. * 4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
  745. * 3: CIR_IRSTS_TE - TX FIFO Empty
  746. * 2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
  747. * 1: CIR_IRSTS_TFU - TX FIFO Underrun
  748. * 0: CIR_IRSTS_GH - Min Length Detected
  749. */
  750. status = nvt_cir_reg_read(nvt, CIR_IRSTS);
  751. iren = nvt_cir_reg_read(nvt, CIR_IREN);
  752. /* At least NCT6779D creates a spurious interrupt when the
  753. * logical device is being disabled.
  754. */
  755. if (status == 0xff && iren == 0xff) {
  756. spin_unlock(&nvt->lock);
  757. nvt_dbg_verbose("Spurious interrupt detected");
  758. return IRQ_HANDLED;
  759. }
  760. /* IRQ may be shared with CIR WAKE, therefore check for each
  761. * status bit whether the related interrupt source is enabled
  762. */
  763. if (!(status & iren)) {
  764. spin_unlock(&nvt->lock);
  765. nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
  766. return IRQ_NONE;
  767. }
  768. /* ack/clear all irq flags we've got */
  769. nvt_cir_reg_write(nvt, status, CIR_IRSTS);
  770. nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
  771. nvt_cir_log_irqs(status, iren);
  772. if (status & CIR_IRSTS_RFO)
  773. nvt_handle_rx_fifo_overrun(nvt);
  774. else if (status & (CIR_IRSTS_RTR | CIR_IRSTS_PE)) {
  775. /* We only do rx if not tx'ing */
  776. if (nvt_cir_tx_inactive(nvt))
  777. nvt_get_rx_ir_data(nvt);
  778. }
  779. if (status & CIR_IRSTS_TE)
  780. nvt_clear_tx_fifo(nvt);
  781. if (status & CIR_IRSTS_TTR) {
  782. unsigned int pos, count;
  783. u8 tmp;
  784. pos = nvt->tx.cur_buf_num;
  785. count = nvt->tx.buf_count;
  786. /* Write data into the hardware tx fifo while pos < count */
  787. if (pos < count) {
  788. nvt_cir_reg_write(nvt, nvt->tx.buf[pos], CIR_STXFIFO);
  789. nvt->tx.cur_buf_num++;
  790. /* Disable TX FIFO Trigger Level Reach (TTR) interrupt */
  791. } else {
  792. tmp = nvt_cir_reg_read(nvt, CIR_IREN);
  793. nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN);
  794. }
  795. }
  796. if (status & CIR_IRSTS_TFU) {
  797. if (nvt->tx.tx_state == ST_TX_REPLY) {
  798. nvt->tx.tx_state = ST_TX_REQUEST;
  799. wake_up(&nvt->tx.queue);
  800. }
  801. }
  802. spin_unlock(&nvt->lock);
  803. nvt_dbg_verbose("%s done", __func__);
  804. return IRQ_HANDLED;
  805. }
  806. static void nvt_disable_cir(struct nvt_dev *nvt)
  807. {
  808. unsigned long flags;
  809. spin_lock_irqsave(&nvt->lock, flags);
  810. /* disable CIR interrupts */
  811. nvt_cir_reg_write(nvt, 0, CIR_IREN);
  812. /* clear any and all pending interrupts */
  813. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  814. /* clear all function enable flags */
  815. nvt_cir_reg_write(nvt, 0, CIR_IRCON);
  816. /* clear hardware rx and tx fifos */
  817. nvt_clear_cir_fifo(nvt);
  818. nvt_clear_tx_fifo(nvt);
  819. spin_unlock_irqrestore(&nvt->lock, flags);
  820. /* disable the CIR logical device */
  821. nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
  822. }
  823. static int nvt_open(struct rc_dev *dev)
  824. {
  825. struct nvt_dev *nvt = dev->priv;
  826. unsigned long flags;
  827. spin_lock_irqsave(&nvt->lock, flags);
  828. /* set function enable flags */
  829. nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
  830. CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
  831. CIR_IRCON);
  832. /* clear all pending interrupts */
  833. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  834. /* enable interrupts */
  835. nvt_set_cir_iren(nvt);
  836. spin_unlock_irqrestore(&nvt->lock, flags);
  837. /* enable the CIR logical device */
  838. nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
  839. return 0;
  840. }
  841. static void nvt_close(struct rc_dev *dev)
  842. {
  843. struct nvt_dev *nvt = dev->priv;
  844. nvt_disable_cir(nvt);
  845. }
  846. /* Allocate memory, probe hardware, and initialize everything */
  847. static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
  848. {
  849. struct nvt_dev *nvt;
  850. struct rc_dev *rdev;
  851. int ret;
  852. nvt = devm_kzalloc(&pdev->dev, sizeof(struct nvt_dev), GFP_KERNEL);
  853. if (!nvt)
  854. return -ENOMEM;
  855. /* input device for IR remote (and tx) */
  856. nvt->rdev = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW);
  857. if (!nvt->rdev)
  858. return -ENOMEM;
  859. rdev = nvt->rdev;
  860. /* activate pnp device */
  861. ret = pnp_activate_dev(pdev);
  862. if (ret) {
  863. dev_err(&pdev->dev, "Could not activate PNP device!\n");
  864. return ret;
  865. }
  866. /* validate pnp resources */
  867. if (!pnp_port_valid(pdev, 0) ||
  868. pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
  869. dev_err(&pdev->dev, "IR PNP Port not valid!\n");
  870. return -EINVAL;
  871. }
  872. if (!pnp_irq_valid(pdev, 0)) {
  873. dev_err(&pdev->dev, "PNP IRQ not valid!\n");
  874. return -EINVAL;
  875. }
  876. if (!pnp_port_valid(pdev, 1) ||
  877. pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
  878. dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
  879. return -EINVAL;
  880. }
  881. nvt->cir_addr = pnp_port_start(pdev, 0);
  882. nvt->cir_irq = pnp_irq(pdev, 0);
  883. nvt->cir_wake_addr = pnp_port_start(pdev, 1);
  884. nvt->cr_efir = CR_EFIR;
  885. nvt->cr_efdr = CR_EFDR;
  886. spin_lock_init(&nvt->lock);
  887. pnp_set_drvdata(pdev, nvt);
  888. init_waitqueue_head(&nvt->tx.queue);
  889. ret = nvt_hw_detect(nvt);
  890. if (ret)
  891. return ret;
  892. /* Initialize CIR & CIR Wake Logical Devices */
  893. nvt_efm_enable(nvt);
  894. nvt_cir_ldev_init(nvt);
  895. nvt_cir_wake_ldev_init(nvt);
  896. nvt_efm_disable(nvt);
  897. /*
  898. * Initialize CIR & CIR Wake Config Registers
  899. * and enable logical devices
  900. */
  901. nvt_cir_regs_init(nvt);
  902. nvt_cir_wake_regs_init(nvt);
  903. /* Set up the rc device */
  904. rdev->priv = nvt;
  905. rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
  906. rdev->allowed_wakeup_protocols = RC_BIT_ALL_IR_ENCODER;
  907. rdev->encode_wakeup = true;
  908. rdev->open = nvt_open;
  909. rdev->close = nvt_close;
  910. rdev->tx_ir = nvt_tx_ir;
  911. rdev->s_tx_carrier = nvt_set_tx_carrier;
  912. rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
  913. rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
  914. rdev->input_phys = "nuvoton/cir0";
  915. rdev->input_id.bustype = BUS_HOST;
  916. rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2;
  917. rdev->input_id.product = nvt->chip_major;
  918. rdev->input_id.version = nvt->chip_minor;
  919. rdev->driver_name = NVT_DRIVER_NAME;
  920. rdev->map_name = RC_MAP_RC6_MCE;
  921. rdev->timeout = MS_TO_NS(100);
  922. /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
  923. rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
  924. #if 0
  925. rdev->min_timeout = XYZ;
  926. rdev->max_timeout = XYZ;
  927. /* tx bits */
  928. rdev->tx_resolution = XYZ;
  929. #endif
  930. ret = devm_rc_register_device(&pdev->dev, rdev);
  931. if (ret)
  932. return ret;
  933. /* now claim resources */
  934. if (!devm_request_region(&pdev->dev, nvt->cir_addr,
  935. CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
  936. return -EBUSY;
  937. ret = devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr,
  938. IRQF_SHARED, NVT_DRIVER_NAME, nvt);
  939. if (ret)
  940. return ret;
  941. if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr,
  942. CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake"))
  943. return -EBUSY;
  944. ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data);
  945. if (ret)
  946. return ret;
  947. device_init_wakeup(&pdev->dev, true);
  948. dev_notice(&pdev->dev, "driver has been successfully loaded\n");
  949. if (debug) {
  950. cir_dump_regs(nvt);
  951. cir_wake_dump_regs(nvt);
  952. }
  953. return 0;
  954. }
  955. static void nvt_remove(struct pnp_dev *pdev)
  956. {
  957. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  958. device_remove_file(&nvt->rdev->dev, &dev_attr_wakeup_data);
  959. nvt_disable_cir(nvt);
  960. /* enable CIR Wake (for IR power-on) */
  961. nvt_enable_wake(nvt);
  962. }
  963. static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
  964. {
  965. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  966. unsigned long flags;
  967. nvt_dbg("%s called", __func__);
  968. spin_lock_irqsave(&nvt->lock, flags);
  969. nvt->tx.tx_state = ST_TX_NONE;
  970. /* disable all CIR interrupts */
  971. nvt_cir_reg_write(nvt, 0, CIR_IREN);
  972. spin_unlock_irqrestore(&nvt->lock, flags);
  973. /* disable cir logical dev */
  974. nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
  975. /* make sure wake is enabled */
  976. nvt_enable_wake(nvt);
  977. return 0;
  978. }
  979. static int nvt_resume(struct pnp_dev *pdev)
  980. {
  981. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  982. nvt_dbg("%s called", __func__);
  983. nvt_cir_regs_init(nvt);
  984. nvt_cir_wake_regs_init(nvt);
  985. return 0;
  986. }
  987. static void nvt_shutdown(struct pnp_dev *pdev)
  988. {
  989. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  990. nvt_enable_wake(nvt);
  991. }
  992. static const struct pnp_device_id nvt_ids[] = {
  993. { "WEC0530", 0 }, /* CIR */
  994. { "NTN0530", 0 }, /* CIR for new chip's pnp id*/
  995. { "", 0 },
  996. };
  997. static struct pnp_driver nvt_driver = {
  998. .name = NVT_DRIVER_NAME,
  999. .id_table = nvt_ids,
  1000. .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
  1001. .probe = nvt_probe,
  1002. .remove = nvt_remove,
  1003. .suspend = nvt_suspend,
  1004. .resume = nvt_resume,
  1005. .shutdown = nvt_shutdown,
  1006. };
  1007. module_param(debug, int, S_IRUGO | S_IWUSR);
  1008. MODULE_PARM_DESC(debug, "Enable debugging output");
  1009. MODULE_DEVICE_TABLE(pnp, nvt_ids);
  1010. MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
  1011. MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
  1012. MODULE_LICENSE("GPL");
  1013. module_pnp_driver(nvt_driver);