au1k_ir.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989
  1. /*
  2. * Alchemy Semi Au1000 IrDA driver
  3. *
  4. * Copyright 2001 MontaVista Software Inc.
  5. * Author: MontaVista Software, Inc.
  6. * ppopov@mvista.com or source@mvista.com
  7. *
  8. * This program is free software; you can distribute it and/or modify it
  9. * under the terms of the GNU General Public License (Version 2) as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  15. * for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along
  18. * with this program; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include <linux/clk.h>
  21. #include <linux/module.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/slab.h>
  26. #include <linux/types.h>
  27. #include <net/irda/irda.h>
  28. #include <net/irda/irmod.h>
  29. #include <net/irda/wrapper.h>
  30. #include <net/irda/irda_device.h>
  31. #include <asm/mach-au1x00/au1000.h>
  32. /* registers */
  33. #define IR_RING_PTR_STATUS 0x00
  34. #define IR_RING_BASE_ADDR_H 0x04
  35. #define IR_RING_BASE_ADDR_L 0x08
  36. #define IR_RING_SIZE 0x0C
  37. #define IR_RING_PROMPT 0x10
  38. #define IR_RING_ADDR_CMPR 0x14
  39. #define IR_INT_CLEAR 0x18
  40. #define IR_CONFIG_1 0x20
  41. #define IR_SIR_FLAGS 0x24
  42. #define IR_STATUS 0x28
  43. #define IR_READ_PHY_CONFIG 0x2C
  44. #define IR_WRITE_PHY_CONFIG 0x30
  45. #define IR_MAX_PKT_LEN 0x34
  46. #define IR_RX_BYTE_CNT 0x38
  47. #define IR_CONFIG_2 0x3C
  48. #define IR_ENABLE 0x40
  49. /* Config1 */
  50. #define IR_RX_INVERT_LED (1 << 0)
  51. #define IR_TX_INVERT_LED (1 << 1)
  52. #define IR_ST (1 << 2)
  53. #define IR_SF (1 << 3)
  54. #define IR_SIR (1 << 4)
  55. #define IR_MIR (1 << 5)
  56. #define IR_FIR (1 << 6)
  57. #define IR_16CRC (1 << 7)
  58. #define IR_TD (1 << 8)
  59. #define IR_RX_ALL (1 << 9)
  60. #define IR_DMA_ENABLE (1 << 10)
  61. #define IR_RX_ENABLE (1 << 11)
  62. #define IR_TX_ENABLE (1 << 12)
  63. #define IR_LOOPBACK (1 << 14)
  64. #define IR_SIR_MODE (IR_SIR | IR_DMA_ENABLE | \
  65. IR_RX_ALL | IR_RX_ENABLE | IR_SF | \
  66. IR_16CRC)
  67. /* ir_status */
  68. #define IR_RX_STATUS (1 << 9)
  69. #define IR_TX_STATUS (1 << 10)
  70. #define IR_PHYEN (1 << 15)
  71. /* ir_write_phy_config */
  72. #define IR_BR(x) (((x) & 0x3f) << 10) /* baud rate */
  73. #define IR_PW(x) (((x) & 0x1f) << 5) /* pulse width */
  74. #define IR_P(x) ((x) & 0x1f) /* preamble bits */
  75. /* Config2 */
  76. #define IR_MODE_INV (1 << 0)
  77. #define IR_ONE_PIN (1 << 1)
  78. #define IR_PHYCLK_40MHZ (0 << 2)
  79. #define IR_PHYCLK_48MHZ (1 << 2)
  80. #define IR_PHYCLK_56MHZ (2 << 2)
  81. #define IR_PHYCLK_64MHZ (3 << 2)
  82. #define IR_DP (1 << 4)
  83. #define IR_DA (1 << 5)
  84. #define IR_FLT_HIGH (0 << 6)
  85. #define IR_FLT_MEDHI (1 << 6)
  86. #define IR_FLT_MEDLO (2 << 6)
  87. #define IR_FLT_LO (3 << 6)
  88. #define IR_IEN (1 << 8)
  89. /* ir_enable */
  90. #define IR_HC (1 << 3) /* divide SBUS clock by 2 */
  91. #define IR_CE (1 << 2) /* clock enable */
  92. #define IR_C (1 << 1) /* coherency bit */
  93. #define IR_BE (1 << 0) /* set in big endian mode */
  94. #define NUM_IR_DESC 64
  95. #define RING_SIZE_4 0x0
  96. #define RING_SIZE_16 0x3
  97. #define RING_SIZE_64 0xF
  98. #define MAX_NUM_IR_DESC 64
  99. #define MAX_BUF_SIZE 2048
  100. /* Ring descriptor flags */
  101. #define AU_OWN (1 << 7) /* tx,rx */
  102. #define IR_DIS_CRC (1 << 6) /* tx */
  103. #define IR_BAD_CRC (1 << 5) /* tx */
  104. #define IR_NEED_PULSE (1 << 4) /* tx */
  105. #define IR_FORCE_UNDER (1 << 3) /* tx */
  106. #define IR_DISABLE_TX (1 << 2) /* tx */
  107. #define IR_HW_UNDER (1 << 0) /* tx */
  108. #define IR_TX_ERROR (IR_DIS_CRC | IR_BAD_CRC | IR_HW_UNDER)
  109. #define IR_PHY_ERROR (1 << 6) /* rx */
  110. #define IR_CRC_ERROR (1 << 5) /* rx */
  111. #define IR_MAX_LEN (1 << 4) /* rx */
  112. #define IR_FIFO_OVER (1 << 3) /* rx */
  113. #define IR_SIR_ERROR (1 << 2) /* rx */
  114. #define IR_RX_ERROR (IR_PHY_ERROR | IR_CRC_ERROR | \
  115. IR_MAX_LEN | IR_FIFO_OVER | IR_SIR_ERROR)
  116. struct db_dest {
  117. struct db_dest *pnext;
  118. volatile u32 *vaddr;
  119. dma_addr_t dma_addr;
  120. };
  121. struct ring_dest {
  122. u8 count_0; /* 7:0 */
  123. u8 count_1; /* 12:8 */
  124. u8 reserved;
  125. u8 flags;
  126. u8 addr_0; /* 7:0 */
  127. u8 addr_1; /* 15:8 */
  128. u8 addr_2; /* 23:16 */
  129. u8 addr_3; /* 31:24 */
  130. };
  131. /* Private data for each instance */
  132. struct au1k_private {
  133. void __iomem *iobase;
  134. int irq_rx, irq_tx;
  135. struct db_dest *pDBfree;
  136. struct db_dest db[2 * NUM_IR_DESC];
  137. volatile struct ring_dest *rx_ring[NUM_IR_DESC];
  138. volatile struct ring_dest *tx_ring[NUM_IR_DESC];
  139. struct db_dest *rx_db_inuse[NUM_IR_DESC];
  140. struct db_dest *tx_db_inuse[NUM_IR_DESC];
  141. u32 rx_head;
  142. u32 tx_head;
  143. u32 tx_tail;
  144. u32 tx_full;
  145. iobuff_t rx_buff;
  146. struct net_device *netdev;
  147. struct qos_info qos;
  148. struct irlap_cb *irlap;
  149. u8 open;
  150. u32 speed;
  151. u32 newspeed;
  152. struct resource *ioarea;
  153. struct au1k_irda_platform_data *platdata;
  154. struct clk *irda_clk;
  155. };
  156. static int qos_mtt_bits = 0x07; /* 1 ms or more */
  157. static void au1k_irda_plat_set_phy_mode(struct au1k_private *p, int mode)
  158. {
  159. if (p->platdata && p->platdata->set_phy_mode)
  160. p->platdata->set_phy_mode(mode);
  161. }
  162. static inline unsigned long irda_read(struct au1k_private *p,
  163. unsigned long ofs)
  164. {
  165. /*
  166. * IrDA peripheral bug. You have to read the register
  167. * twice to get the right value.
  168. */
  169. (void)__raw_readl(p->iobase + ofs);
  170. return __raw_readl(p->iobase + ofs);
  171. }
  172. static inline void irda_write(struct au1k_private *p, unsigned long ofs,
  173. unsigned long val)
  174. {
  175. __raw_writel(val, p->iobase + ofs);
  176. wmb();
  177. }
  178. /*
  179. * Buffer allocation/deallocation routines. The buffer descriptor returned
  180. * has the virtual and dma address of a buffer suitable for
  181. * both, receive and transmit operations.
  182. */
  183. static struct db_dest *GetFreeDB(struct au1k_private *aup)
  184. {
  185. struct db_dest *db;
  186. db = aup->pDBfree;
  187. if (db)
  188. aup->pDBfree = db->pnext;
  189. return db;
  190. }
  191. /*
  192. DMA memory allocation, derived from pci_alloc_consistent.
  193. However, the Au1000 data cache is coherent (when programmed
  194. so), therefore we return KSEG0 address, not KSEG1.
  195. */
  196. static void *dma_alloc(size_t size, dma_addr_t *dma_handle)
  197. {
  198. void *ret;
  199. int gfp = GFP_ATOMIC | GFP_DMA;
  200. ret = (void *)__get_free_pages(gfp, get_order(size));
  201. if (ret != NULL) {
  202. memset(ret, 0, size);
  203. *dma_handle = virt_to_bus(ret);
  204. ret = (void *)KSEG0ADDR(ret);
  205. }
  206. return ret;
  207. }
  208. static void dma_free(void *vaddr, size_t size)
  209. {
  210. vaddr = (void *)KSEG0ADDR(vaddr);
  211. free_pages((unsigned long) vaddr, get_order(size));
  212. }
  213. static void setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
  214. {
  215. int i;
  216. for (i = 0; i < NUM_IR_DESC; i++) {
  217. aup->rx_ring[i] = (volatile struct ring_dest *)
  218. (rx_base + sizeof(struct ring_dest) * i);
  219. }
  220. for (i = 0; i < NUM_IR_DESC; i++) {
  221. aup->tx_ring[i] = (volatile struct ring_dest *)
  222. (tx_base + sizeof(struct ring_dest) * i);
  223. }
  224. }
  225. static int au1k_irda_init_iobuf(iobuff_t *io, int size)
  226. {
  227. io->head = kmalloc(size, GFP_KERNEL);
  228. if (io->head != NULL) {
  229. io->truesize = size;
  230. io->in_frame = FALSE;
  231. io->state = OUTSIDE_FRAME;
  232. io->data = io->head;
  233. }
  234. return io->head ? 0 : -ENOMEM;
  235. }
  236. /*
  237. * Set the IrDA communications speed.
  238. */
  239. static int au1k_irda_set_speed(struct net_device *dev, int speed)
  240. {
  241. struct au1k_private *aup = netdev_priv(dev);
  242. volatile struct ring_dest *ptxd;
  243. unsigned long control;
  244. int ret = 0, timeout = 10, i;
  245. if (speed == aup->speed)
  246. return ret;
  247. /* disable PHY first */
  248. au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
  249. irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
  250. /* disable RX/TX */
  251. irda_write(aup, IR_CONFIG_1,
  252. irda_read(aup, IR_CONFIG_1) & ~(IR_RX_ENABLE | IR_TX_ENABLE));
  253. msleep(20);
  254. while (irda_read(aup, IR_STATUS) & (IR_RX_STATUS | IR_TX_STATUS)) {
  255. msleep(20);
  256. if (!timeout--) {
  257. printk(KERN_ERR "%s: rx/tx disable timeout\n",
  258. dev->name);
  259. break;
  260. }
  261. }
  262. /* disable DMA */
  263. irda_write(aup, IR_CONFIG_1,
  264. irda_read(aup, IR_CONFIG_1) & ~IR_DMA_ENABLE);
  265. msleep(20);
  266. /* After we disable tx/rx. the index pointers go back to zero. */
  267. aup->tx_head = aup->tx_tail = aup->rx_head = 0;
  268. for (i = 0; i < NUM_IR_DESC; i++) {
  269. ptxd = aup->tx_ring[i];
  270. ptxd->flags = 0;
  271. ptxd->count_0 = 0;
  272. ptxd->count_1 = 0;
  273. }
  274. for (i = 0; i < NUM_IR_DESC; i++) {
  275. ptxd = aup->rx_ring[i];
  276. ptxd->count_0 = 0;
  277. ptxd->count_1 = 0;
  278. ptxd->flags = AU_OWN;
  279. }
  280. if (speed == 4000000)
  281. au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_FIR);
  282. else
  283. au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
  284. switch (speed) {
  285. case 9600:
  286. irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(11) | IR_PW(12));
  287. irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
  288. break;
  289. case 19200:
  290. irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(5) | IR_PW(12));
  291. irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
  292. break;
  293. case 38400:
  294. irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(2) | IR_PW(12));
  295. irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
  296. break;
  297. case 57600:
  298. irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(1) | IR_PW(12));
  299. irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
  300. break;
  301. case 115200:
  302. irda_write(aup, IR_WRITE_PHY_CONFIG, IR_PW(12));
  303. irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
  304. break;
  305. case 4000000:
  306. irda_write(aup, IR_WRITE_PHY_CONFIG, IR_P(15));
  307. irda_write(aup, IR_CONFIG_1, IR_FIR | IR_DMA_ENABLE |
  308. IR_RX_ENABLE);
  309. break;
  310. default:
  311. printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
  312. ret = -EINVAL;
  313. break;
  314. }
  315. aup->speed = speed;
  316. irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) | IR_PHYEN);
  317. control = irda_read(aup, IR_STATUS);
  318. irda_write(aup, IR_RING_PROMPT, 0);
  319. if (control & (1 << 14)) {
  320. printk(KERN_ERR "%s: configuration error\n", dev->name);
  321. } else {
  322. if (control & (1 << 11))
  323. printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
  324. if (control & (1 << 12))
  325. printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
  326. if (control & (1 << 13))
  327. printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
  328. if (control & (1 << 10))
  329. printk(KERN_DEBUG "%s TX enabled\n", dev->name);
  330. if (control & (1 << 9))
  331. printk(KERN_DEBUG "%s RX enabled\n", dev->name);
  332. }
  333. return ret;
  334. }
  335. static void update_rx_stats(struct net_device *dev, u32 status, u32 count)
  336. {
  337. struct net_device_stats *ps = &dev->stats;
  338. ps->rx_packets++;
  339. if (status & IR_RX_ERROR) {
  340. ps->rx_errors++;
  341. if (status & (IR_PHY_ERROR | IR_FIFO_OVER))
  342. ps->rx_missed_errors++;
  343. if (status & IR_MAX_LEN)
  344. ps->rx_length_errors++;
  345. if (status & IR_CRC_ERROR)
  346. ps->rx_crc_errors++;
  347. } else
  348. ps->rx_bytes += count;
  349. }
  350. static void update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
  351. {
  352. struct net_device_stats *ps = &dev->stats;
  353. ps->tx_packets++;
  354. ps->tx_bytes += pkt_len;
  355. if (status & IR_TX_ERROR) {
  356. ps->tx_errors++;
  357. ps->tx_aborted_errors++;
  358. }
  359. }
  360. static void au1k_tx_ack(struct net_device *dev)
  361. {
  362. struct au1k_private *aup = netdev_priv(dev);
  363. volatile struct ring_dest *ptxd;
  364. ptxd = aup->tx_ring[aup->tx_tail];
  365. while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
  366. update_tx_stats(dev, ptxd->flags,
  367. (ptxd->count_1 << 8) | ptxd->count_0);
  368. ptxd->count_0 = 0;
  369. ptxd->count_1 = 0;
  370. wmb();
  371. aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
  372. ptxd = aup->tx_ring[aup->tx_tail];
  373. if (aup->tx_full) {
  374. aup->tx_full = 0;
  375. netif_wake_queue(dev);
  376. }
  377. }
  378. if (aup->tx_tail == aup->tx_head) {
  379. if (aup->newspeed) {
  380. au1k_irda_set_speed(dev, aup->newspeed);
  381. aup->newspeed = 0;
  382. } else {
  383. irda_write(aup, IR_CONFIG_1,
  384. irda_read(aup, IR_CONFIG_1) & ~IR_TX_ENABLE);
  385. irda_write(aup, IR_CONFIG_1,
  386. irda_read(aup, IR_CONFIG_1) | IR_RX_ENABLE);
  387. irda_write(aup, IR_RING_PROMPT, 0);
  388. }
  389. }
  390. }
  391. static int au1k_irda_rx(struct net_device *dev)
  392. {
  393. struct au1k_private *aup = netdev_priv(dev);
  394. volatile struct ring_dest *prxd;
  395. struct sk_buff *skb;
  396. struct db_dest *pDB;
  397. u32 flags, count;
  398. prxd = aup->rx_ring[aup->rx_head];
  399. flags = prxd->flags;
  400. while (!(flags & AU_OWN)) {
  401. pDB = aup->rx_db_inuse[aup->rx_head];
  402. count = (prxd->count_1 << 8) | prxd->count_0;
  403. if (!(flags & IR_RX_ERROR)) {
  404. /* good frame */
  405. update_rx_stats(dev, flags, count);
  406. skb = alloc_skb(count + 1, GFP_ATOMIC);
  407. if (skb == NULL) {
  408. dev->stats.rx_dropped++;
  409. continue;
  410. }
  411. skb_reserve(skb, 1);
  412. if (aup->speed == 4000000)
  413. skb_put(skb, count);
  414. else
  415. skb_put(skb, count - 2);
  416. skb_copy_to_linear_data(skb, (void *)pDB->vaddr,
  417. count - 2);
  418. skb->dev = dev;
  419. skb_reset_mac_header(skb);
  420. skb->protocol = htons(ETH_P_IRDA);
  421. netif_rx(skb);
  422. prxd->count_0 = 0;
  423. prxd->count_1 = 0;
  424. }
  425. prxd->flags |= AU_OWN;
  426. aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
  427. irda_write(aup, IR_RING_PROMPT, 0);
  428. /* next descriptor */
  429. prxd = aup->rx_ring[aup->rx_head];
  430. flags = prxd->flags;
  431. }
  432. return 0;
  433. }
  434. static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
  435. {
  436. struct net_device *dev = dev_id;
  437. struct au1k_private *aup = netdev_priv(dev);
  438. irda_write(aup, IR_INT_CLEAR, 0); /* ack irda interrupts */
  439. au1k_irda_rx(dev);
  440. au1k_tx_ack(dev);
  441. return IRQ_HANDLED;
  442. }
  443. static int au1k_init(struct net_device *dev)
  444. {
  445. struct au1k_private *aup = netdev_priv(dev);
  446. u32 enable, ring_address, phyck;
  447. struct clk *c;
  448. int i;
  449. c = clk_get(NULL, "irda_clk");
  450. if (IS_ERR(c))
  451. return PTR_ERR(c);
  452. i = clk_prepare_enable(c);
  453. if (i) {
  454. clk_put(c);
  455. return i;
  456. }
  457. switch (clk_get_rate(c)) {
  458. case 40000000:
  459. phyck = IR_PHYCLK_40MHZ;
  460. break;
  461. case 48000000:
  462. phyck = IR_PHYCLK_48MHZ;
  463. break;
  464. case 56000000:
  465. phyck = IR_PHYCLK_56MHZ;
  466. break;
  467. case 64000000:
  468. phyck = IR_PHYCLK_64MHZ;
  469. break;
  470. default:
  471. clk_disable_unprepare(c);
  472. clk_put(c);
  473. return -EINVAL;
  474. }
  475. aup->irda_clk = c;
  476. enable = IR_HC | IR_CE | IR_C;
  477. #ifndef CONFIG_CPU_LITTLE_ENDIAN
  478. enable |= IR_BE;
  479. #endif
  480. aup->tx_head = 0;
  481. aup->tx_tail = 0;
  482. aup->rx_head = 0;
  483. for (i = 0; i < NUM_IR_DESC; i++)
  484. aup->rx_ring[i]->flags = AU_OWN;
  485. irda_write(aup, IR_ENABLE, enable);
  486. msleep(20);
  487. /* disable PHY */
  488. au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
  489. irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
  490. msleep(20);
  491. irda_write(aup, IR_MAX_PKT_LEN, MAX_BUF_SIZE);
  492. ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]);
  493. irda_write(aup, IR_RING_BASE_ADDR_H, ring_address >> 26);
  494. irda_write(aup, IR_RING_BASE_ADDR_L, (ring_address >> 10) & 0xffff);
  495. irda_write(aup, IR_RING_SIZE,
  496. (RING_SIZE_64 << 8) | (RING_SIZE_64 << 12));
  497. irda_write(aup, IR_CONFIG_2, phyck | IR_ONE_PIN);
  498. irda_write(aup, IR_RING_ADDR_CMPR, 0);
  499. au1k_irda_set_speed(dev, 9600);
  500. return 0;
  501. }
  502. static int au1k_irda_start(struct net_device *dev)
  503. {
  504. struct au1k_private *aup = netdev_priv(dev);
  505. char hwname[32];
  506. int retval;
  507. retval = au1k_init(dev);
  508. if (retval) {
  509. printk(KERN_ERR "%s: error in au1k_init\n", dev->name);
  510. return retval;
  511. }
  512. retval = request_irq(aup->irq_tx, &au1k_irda_interrupt, 0,
  513. dev->name, dev);
  514. if (retval) {
  515. printk(KERN_ERR "%s: unable to get IRQ %d\n",
  516. dev->name, dev->irq);
  517. return retval;
  518. }
  519. retval = request_irq(aup->irq_rx, &au1k_irda_interrupt, 0,
  520. dev->name, dev);
  521. if (retval) {
  522. free_irq(aup->irq_tx, dev);
  523. printk(KERN_ERR "%s: unable to get IRQ %d\n",
  524. dev->name, dev->irq);
  525. return retval;
  526. }
  527. /* Give self a hardware name */
  528. sprintf(hwname, "Au1000 SIR/FIR");
  529. aup->irlap = irlap_open(dev, &aup->qos, hwname);
  530. netif_start_queue(dev);
  531. /* int enable */
  532. irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) | IR_IEN);
  533. /* power up */
  534. au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
  535. return 0;
  536. }
  537. static int au1k_irda_stop(struct net_device *dev)
  538. {
  539. struct au1k_private *aup = netdev_priv(dev);
  540. au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
  541. /* disable interrupts */
  542. irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) & ~IR_IEN);
  543. irda_write(aup, IR_CONFIG_1, 0);
  544. irda_write(aup, IR_ENABLE, 0); /* disable clock */
  545. if (aup->irlap) {
  546. irlap_close(aup->irlap);
  547. aup->irlap = NULL;
  548. }
  549. netif_stop_queue(dev);
  550. /* disable the interrupt */
  551. free_irq(aup->irq_tx, dev);
  552. free_irq(aup->irq_rx, dev);
  553. clk_disable_unprepare(aup->irda_clk);
  554. clk_put(aup->irda_clk);
  555. return 0;
  556. }
  557. /*
  558. * Au1000 transmit routine.
  559. */
  560. static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
  561. {
  562. struct au1k_private *aup = netdev_priv(dev);
  563. int speed = irda_get_next_speed(skb);
  564. volatile struct ring_dest *ptxd;
  565. struct db_dest *pDB;
  566. u32 len, flags;
  567. if (speed != aup->speed && speed != -1)
  568. aup->newspeed = speed;
  569. if ((skb->len == 0) && (aup->newspeed)) {
  570. if (aup->tx_tail == aup->tx_head) {
  571. au1k_irda_set_speed(dev, speed);
  572. aup->newspeed = 0;
  573. }
  574. dev_kfree_skb(skb);
  575. return NETDEV_TX_OK;
  576. }
  577. ptxd = aup->tx_ring[aup->tx_head];
  578. flags = ptxd->flags;
  579. if (flags & AU_OWN) {
  580. printk(KERN_DEBUG "%s: tx_full\n", dev->name);
  581. netif_stop_queue(dev);
  582. aup->tx_full = 1;
  583. return 1;
  584. } else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
  585. printk(KERN_DEBUG "%s: tx_full\n", dev->name);
  586. netif_stop_queue(dev);
  587. aup->tx_full = 1;
  588. return 1;
  589. }
  590. pDB = aup->tx_db_inuse[aup->tx_head];
  591. #if 0
  592. if (irda_read(aup, IR_RX_BYTE_CNT) != 0) {
  593. printk(KERN_DEBUG "tx warning: rx byte cnt %x\n",
  594. irda_read(aup, IR_RX_BYTE_CNT));
  595. }
  596. #endif
  597. if (aup->speed == 4000000) {
  598. /* FIR */
  599. skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
  600. ptxd->count_0 = skb->len & 0xff;
  601. ptxd->count_1 = (skb->len >> 8) & 0xff;
  602. } else {
  603. /* SIR */
  604. len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE);
  605. ptxd->count_0 = len & 0xff;
  606. ptxd->count_1 = (len >> 8) & 0xff;
  607. ptxd->flags |= IR_DIS_CRC;
  608. }
  609. ptxd->flags |= AU_OWN;
  610. wmb();
  611. irda_write(aup, IR_CONFIG_1,
  612. irda_read(aup, IR_CONFIG_1) | IR_TX_ENABLE);
  613. irda_write(aup, IR_RING_PROMPT, 0);
  614. dev_kfree_skb(skb);
  615. aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
  616. return NETDEV_TX_OK;
  617. }
  618. /*
  619. * The Tx ring has been full longer than the watchdog timeout
  620. * value. The transmitter must be hung?
  621. */
  622. static void au1k_tx_timeout(struct net_device *dev)
  623. {
  624. u32 speed;
  625. struct au1k_private *aup = netdev_priv(dev);
  626. printk(KERN_ERR "%s: tx timeout\n", dev->name);
  627. speed = aup->speed;
  628. aup->speed = 0;
  629. au1k_irda_set_speed(dev, speed);
  630. aup->tx_full = 0;
  631. netif_wake_queue(dev);
  632. }
  633. static int au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
  634. {
  635. struct if_irda_req *rq = (struct if_irda_req *)ifreq;
  636. struct au1k_private *aup = netdev_priv(dev);
  637. int ret = -EOPNOTSUPP;
  638. switch (cmd) {
  639. case SIOCSBANDWIDTH:
  640. if (capable(CAP_NET_ADMIN)) {
  641. /*
  642. * We are unable to set the speed if the
  643. * device is not running.
  644. */
  645. if (aup->open)
  646. ret = au1k_irda_set_speed(dev,
  647. rq->ifr_baudrate);
  648. else {
  649. printk(KERN_ERR "%s ioctl: !netif_running\n",
  650. dev->name);
  651. ret = 0;
  652. }
  653. }
  654. break;
  655. case SIOCSMEDIABUSY:
  656. ret = -EPERM;
  657. if (capable(CAP_NET_ADMIN)) {
  658. irda_device_set_media_busy(dev, TRUE);
  659. ret = 0;
  660. }
  661. break;
  662. case SIOCGRECEIVING:
  663. rq->ifr_receiving = 0;
  664. break;
  665. default:
  666. break;
  667. }
  668. return ret;
  669. }
  670. static const struct net_device_ops au1k_irda_netdev_ops = {
  671. .ndo_open = au1k_irda_start,
  672. .ndo_stop = au1k_irda_stop,
  673. .ndo_start_xmit = au1k_irda_hard_xmit,
  674. .ndo_tx_timeout = au1k_tx_timeout,
  675. .ndo_do_ioctl = au1k_irda_ioctl,
  676. };
  677. static int au1k_irda_net_init(struct net_device *dev)
  678. {
  679. struct au1k_private *aup = netdev_priv(dev);
  680. struct db_dest *pDB, *pDBfree;
  681. int i, err, retval = 0;
  682. dma_addr_t temp;
  683. err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
  684. if (err)
  685. goto out1;
  686. dev->netdev_ops = &au1k_irda_netdev_ops;
  687. irda_init_max_qos_capabilies(&aup->qos);
  688. /* The only value we must override it the baudrate */
  689. aup->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 |
  690. IR_57600 | IR_115200 | IR_576000 | (IR_4000000 << 8);
  691. aup->qos.min_turn_time.bits = qos_mtt_bits;
  692. irda_qos_bits_to_value(&aup->qos);
  693. retval = -ENOMEM;
  694. /* Tx ring follows rx ring + 512 bytes */
  695. /* we need a 1k aligned buffer */
  696. aup->rx_ring[0] = (struct ring_dest *)
  697. dma_alloc(2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)),
  698. &temp);
  699. if (!aup->rx_ring[0])
  700. goto out2;
  701. /* allocate the data buffers */
  702. aup->db[0].vaddr =
  703. dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
  704. if (!aup->db[0].vaddr)
  705. goto out3;
  706. setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
  707. pDBfree = NULL;
  708. pDB = aup->db;
  709. for (i = 0; i < (2 * NUM_IR_DESC); i++) {
  710. pDB->pnext = pDBfree;
  711. pDBfree = pDB;
  712. pDB->vaddr =
  713. (u32 *)((unsigned)aup->db[0].vaddr + (MAX_BUF_SIZE * i));
  714. pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
  715. pDB++;
  716. }
  717. aup->pDBfree = pDBfree;
  718. /* attach a data buffer to each descriptor */
  719. for (i = 0; i < NUM_IR_DESC; i++) {
  720. pDB = GetFreeDB(aup);
  721. if (!pDB)
  722. goto out3;
  723. aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
  724. aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
  725. aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
  726. aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
  727. aup->rx_db_inuse[i] = pDB;
  728. }
  729. for (i = 0; i < NUM_IR_DESC; i++) {
  730. pDB = GetFreeDB(aup);
  731. if (!pDB)
  732. goto out3;
  733. aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
  734. aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
  735. aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
  736. aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
  737. aup->tx_ring[i]->count_0 = 0;
  738. aup->tx_ring[i]->count_1 = 0;
  739. aup->tx_ring[i]->flags = 0;
  740. aup->tx_db_inuse[i] = pDB;
  741. }
  742. return 0;
  743. out3:
  744. dma_free((void *)aup->rx_ring[0],
  745. 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
  746. out2:
  747. kfree(aup->rx_buff.head);
  748. out1:
  749. printk(KERN_ERR "au1k_irda_net_init() failed. Returns %d\n", retval);
  750. return retval;
  751. }
  752. static int au1k_irda_probe(struct platform_device *pdev)
  753. {
  754. struct au1k_private *aup;
  755. struct net_device *dev;
  756. struct resource *r;
  757. struct clk *c;
  758. int err;
  759. dev = alloc_irdadev(sizeof(struct au1k_private));
  760. if (!dev)
  761. return -ENOMEM;
  762. aup = netdev_priv(dev);
  763. aup->platdata = pdev->dev.platform_data;
  764. err = -EINVAL;
  765. r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  766. if (!r)
  767. goto out;
  768. aup->irq_tx = r->start;
  769. r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
  770. if (!r)
  771. goto out;
  772. aup->irq_rx = r->start;
  773. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  774. if (!r)
  775. goto out;
  776. err = -EBUSY;
  777. aup->ioarea = request_mem_region(r->start, resource_size(r),
  778. pdev->name);
  779. if (!aup->ioarea)
  780. goto out;
  781. /* bail out early if clock doesn't exist */
  782. c = clk_get(NULL, "irda_clk");
  783. if (IS_ERR(c)) {
  784. err = PTR_ERR(c);
  785. goto out;
  786. }
  787. clk_put(c);
  788. aup->iobase = ioremap_nocache(r->start, resource_size(r));
  789. if (!aup->iobase)
  790. goto out2;
  791. dev->irq = aup->irq_rx;
  792. err = au1k_irda_net_init(dev);
  793. if (err)
  794. goto out3;
  795. err = register_netdev(dev);
  796. if (err)
  797. goto out4;
  798. platform_set_drvdata(pdev, dev);
  799. printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
  800. return 0;
  801. out4:
  802. dma_free((void *)aup->db[0].vaddr,
  803. MAX_BUF_SIZE * 2 * NUM_IR_DESC);
  804. dma_free((void *)aup->rx_ring[0],
  805. 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
  806. kfree(aup->rx_buff.head);
  807. out3:
  808. iounmap(aup->iobase);
  809. out2:
  810. release_resource(aup->ioarea);
  811. kfree(aup->ioarea);
  812. out:
  813. free_netdev(dev);
  814. return err;
  815. }
  816. static int au1k_irda_remove(struct platform_device *pdev)
  817. {
  818. struct net_device *dev = platform_get_drvdata(pdev);
  819. struct au1k_private *aup = netdev_priv(dev);
  820. unregister_netdev(dev);
  821. dma_free((void *)aup->db[0].vaddr,
  822. MAX_BUF_SIZE * 2 * NUM_IR_DESC);
  823. dma_free((void *)aup->rx_ring[0],
  824. 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
  825. kfree(aup->rx_buff.head);
  826. iounmap(aup->iobase);
  827. release_resource(aup->ioarea);
  828. kfree(aup->ioarea);
  829. free_netdev(dev);
  830. return 0;
  831. }
  832. static struct platform_driver au1k_irda_driver = {
  833. .driver = {
  834. .name = "au1000-irda",
  835. },
  836. .probe = au1k_irda_probe,
  837. .remove = au1k_irda_remove,
  838. };
  839. module_platform_driver(au1k_irda_driver);
  840. MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
  841. MODULE_DESCRIPTION("Au1000 IrDA Device Driver");