bfin_sir.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. /*
  2. * Blackfin Infra-red Driver
  3. *
  4. * Copyright 2006-2009 Analog Devices Inc.
  5. *
  6. * Enter bugs at http://blackfin.uclinux.org/
  7. *
  8. * Licensed under the GPL-2 or later.
  9. *
  10. */
  11. #include "bfin_sir.h"
  12. #ifdef CONFIG_SIR_BFIN_DMA
  13. #define DMA_SIR_RX_XCNT 10
  14. #define DMA_SIR_RX_YCNT (PAGE_SIZE / DMA_SIR_RX_XCNT)
  15. #define DMA_SIR_RX_FLUSH_JIFS (HZ * 4 / 250)
  16. #endif
  17. #if ANOMALY_05000447
  18. static int max_rate = 57600;
  19. #else
  20. static int max_rate = 115200;
  21. #endif
  22. static void turnaround_delay(int mtt)
  23. {
  24. long ticks;
  25. mtt = mtt < 10000 ? 10000 : mtt;
  26. ticks = 1 + mtt / (USEC_PER_SEC / HZ);
  27. schedule_timeout_uninterruptible(ticks);
  28. }
  29. static void bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device *pdev)
  30. {
  31. int i;
  32. struct resource *res;
  33. for (i = 0; i < pdev->num_resources; i++) {
  34. res = &pdev->resource[i];
  35. switch (res->flags) {
  36. case IORESOURCE_MEM:
  37. sp->membase = (void __iomem *)res->start;
  38. break;
  39. case IORESOURCE_IRQ:
  40. sp->irq = res->start;
  41. break;
  42. case IORESOURCE_DMA:
  43. sp->rx_dma_channel = res->start;
  44. sp->tx_dma_channel = res->end;
  45. break;
  46. default:
  47. break;
  48. }
  49. }
  50. sp->clk = get_sclk();
  51. #ifdef CONFIG_SIR_BFIN_DMA
  52. sp->tx_done = 1;
  53. init_timer(&(sp->rx_dma_timer));
  54. #endif
  55. }
  56. static void bfin_sir_stop_tx(struct bfin_sir_port *port)
  57. {
  58. #ifdef CONFIG_SIR_BFIN_DMA
  59. disable_dma(port->tx_dma_channel);
  60. #endif
  61. while (!(UART_GET_LSR(port) & THRE)) {
  62. cpu_relax();
  63. continue;
  64. }
  65. UART_CLEAR_IER(port, ETBEI);
  66. }
  67. static void bfin_sir_enable_tx(struct bfin_sir_port *port)
  68. {
  69. UART_SET_IER(port, ETBEI);
  70. }
  71. static void bfin_sir_stop_rx(struct bfin_sir_port *port)
  72. {
  73. UART_CLEAR_IER(port, ERBFI);
  74. }
  75. static void bfin_sir_enable_rx(struct bfin_sir_port *port)
  76. {
  77. UART_SET_IER(port, ERBFI);
  78. }
  79. static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
  80. {
  81. int ret = -EINVAL;
  82. unsigned int quot;
  83. unsigned short val, lsr, lcr;
  84. static int utime;
  85. int count = 10;
  86. lcr = WLS(8);
  87. switch (speed) {
  88. case 9600:
  89. case 19200:
  90. case 38400:
  91. case 57600:
  92. case 115200:
  93. /*
  94. * IRDA is not affected by anomaly 05000230, so there is no
  95. * need to tweak the divisor like he UART driver (which will
  96. * slightly speed up the baud rate on us).
  97. */
  98. quot = (port->clk + (8 * speed)) / (16 * speed);
  99. do {
  100. udelay(utime);
  101. lsr = UART_GET_LSR(port);
  102. } while (!(lsr & TEMT) && count--);
  103. /* The useconds for 1 bits to transmit */
  104. utime = 1000000 / speed + 1;
  105. /* Clear UCEN bit to reset the UART state machine
  106. * and control registers
  107. */
  108. val = UART_GET_GCTL(port);
  109. val &= ~UCEN;
  110. UART_PUT_GCTL(port, val);
  111. /* Set DLAB in LCR to Access THR RBR IER */
  112. UART_SET_DLAB(port);
  113. SSYNC();
  114. UART_PUT_DLL(port, quot & 0xFF);
  115. UART_PUT_DLH(port, (quot >> 8) & 0xFF);
  116. SSYNC();
  117. /* Clear DLAB in LCR */
  118. UART_CLEAR_DLAB(port);
  119. SSYNC();
  120. UART_PUT_LCR(port, lcr);
  121. val = UART_GET_GCTL(port);
  122. val |= UCEN;
  123. UART_PUT_GCTL(port, val);
  124. ret = 0;
  125. break;
  126. default:
  127. printk(KERN_WARNING "bfin_sir: Invalid speed %d\n", speed);
  128. break;
  129. }
  130. val = UART_GET_GCTL(port);
  131. /* If not add the 'RPOLC', we can't catch the receive interrupt.
  132. * It's related with the HW layout and the IR transiver.
  133. */
  134. val |= UMOD_IRDA | RPOLC;
  135. UART_PUT_GCTL(port, val);
  136. return ret;
  137. }
  138. static int bfin_sir_is_receiving(struct net_device *dev)
  139. {
  140. struct bfin_sir_self *self = netdev_priv(dev);
  141. struct bfin_sir_port *port = self->sir_port;
  142. if (!(UART_GET_IER(port) & ERBFI))
  143. return 0;
  144. return self->rx_buff.state != OUTSIDE_FRAME;
  145. }
  146. #ifdef CONFIG_SIR_BFIN_PIO
  147. static void bfin_sir_tx_chars(struct net_device *dev)
  148. {
  149. unsigned int chr;
  150. struct bfin_sir_self *self = netdev_priv(dev);
  151. struct bfin_sir_port *port = self->sir_port;
  152. if (self->tx_buff.len != 0) {
  153. chr = *(self->tx_buff.data);
  154. UART_PUT_CHAR(port, chr);
  155. self->tx_buff.data++;
  156. self->tx_buff.len--;
  157. } else {
  158. self->stats.tx_packets++;
  159. self->stats.tx_bytes += self->tx_buff.data - self->tx_buff.head;
  160. if (self->newspeed) {
  161. bfin_sir_set_speed(port, self->newspeed);
  162. self->speed = self->newspeed;
  163. self->newspeed = 0;
  164. }
  165. bfin_sir_stop_tx(port);
  166. bfin_sir_enable_rx(port);
  167. /* I'm hungry! */
  168. netif_wake_queue(dev);
  169. }
  170. }
  171. static void bfin_sir_rx_chars(struct net_device *dev)
  172. {
  173. struct bfin_sir_self *self = netdev_priv(dev);
  174. struct bfin_sir_port *port = self->sir_port;
  175. unsigned char ch;
  176. UART_CLEAR_LSR(port);
  177. ch = UART_GET_CHAR(port);
  178. async_unwrap_char(dev, &self->stats, &self->rx_buff, ch);
  179. }
  180. static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id)
  181. {
  182. struct net_device *dev = dev_id;
  183. struct bfin_sir_self *self = netdev_priv(dev);
  184. struct bfin_sir_port *port = self->sir_port;
  185. spin_lock(&self->lock);
  186. while ((UART_GET_LSR(port) & DR))
  187. bfin_sir_rx_chars(dev);
  188. spin_unlock(&self->lock);
  189. return IRQ_HANDLED;
  190. }
  191. static irqreturn_t bfin_sir_tx_int(int irq, void *dev_id)
  192. {
  193. struct net_device *dev = dev_id;
  194. struct bfin_sir_self *self = netdev_priv(dev);
  195. struct bfin_sir_port *port = self->sir_port;
  196. spin_lock(&self->lock);
  197. if (UART_GET_LSR(port) & THRE)
  198. bfin_sir_tx_chars(dev);
  199. spin_unlock(&self->lock);
  200. return IRQ_HANDLED;
  201. }
  202. #endif /* CONFIG_SIR_BFIN_PIO */
  203. #ifdef CONFIG_SIR_BFIN_DMA
  204. static void bfin_sir_dma_tx_chars(struct net_device *dev)
  205. {
  206. struct bfin_sir_self *self = netdev_priv(dev);
  207. struct bfin_sir_port *port = self->sir_port;
  208. if (!port->tx_done)
  209. return;
  210. port->tx_done = 0;
  211. if (self->tx_buff.len == 0) {
  212. self->stats.tx_packets++;
  213. if (self->newspeed) {
  214. bfin_sir_set_speed(port, self->newspeed);
  215. self->speed = self->newspeed;
  216. self->newspeed = 0;
  217. }
  218. bfin_sir_enable_rx(port);
  219. port->tx_done = 1;
  220. netif_wake_queue(dev);
  221. return;
  222. }
  223. blackfin_dcache_flush_range((unsigned long)(self->tx_buff.data),
  224. (unsigned long)(self->tx_buff.data+self->tx_buff.len));
  225. set_dma_config(port->tx_dma_channel,
  226. set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
  227. INTR_ON_BUF, DIMENSION_LINEAR, DATA_SIZE_8,
  228. DMA_SYNC_RESTART));
  229. set_dma_start_addr(port->tx_dma_channel,
  230. (unsigned long)(self->tx_buff.data));
  231. set_dma_x_count(port->tx_dma_channel, self->tx_buff.len);
  232. set_dma_x_modify(port->tx_dma_channel, 1);
  233. enable_dma(port->tx_dma_channel);
  234. }
  235. static irqreturn_t bfin_sir_dma_tx_int(int irq, void *dev_id)
  236. {
  237. struct net_device *dev = dev_id;
  238. struct bfin_sir_self *self = netdev_priv(dev);
  239. struct bfin_sir_port *port = self->sir_port;
  240. spin_lock(&self->lock);
  241. if (!(get_dma_curr_irqstat(port->tx_dma_channel) & DMA_RUN)) {
  242. clear_dma_irqstat(port->tx_dma_channel);
  243. bfin_sir_stop_tx(port);
  244. self->stats.tx_packets++;
  245. self->stats.tx_bytes += self->tx_buff.len;
  246. self->tx_buff.len = 0;
  247. if (self->newspeed) {
  248. bfin_sir_set_speed(port, self->newspeed);
  249. self->speed = self->newspeed;
  250. self->newspeed = 0;
  251. }
  252. bfin_sir_enable_rx(port);
  253. /* I'm hungry! */
  254. netif_wake_queue(dev);
  255. port->tx_done = 1;
  256. }
  257. spin_unlock(&self->lock);
  258. return IRQ_HANDLED;
  259. }
  260. static void bfin_sir_dma_rx_chars(struct net_device *dev)
  261. {
  262. struct bfin_sir_self *self = netdev_priv(dev);
  263. struct bfin_sir_port *port = self->sir_port;
  264. int i;
  265. UART_CLEAR_LSR(port);
  266. for (i = port->rx_dma_buf.head; i < port->rx_dma_buf.tail; i++)
  267. async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]);
  268. }
  269. void bfin_sir_rx_dma_timeout(struct net_device *dev)
  270. {
  271. struct bfin_sir_self *self = netdev_priv(dev);
  272. struct bfin_sir_port *port = self->sir_port;
  273. int x_pos, pos;
  274. unsigned long flags;
  275. spin_lock_irqsave(&self->lock, flags);
  276. x_pos = DMA_SIR_RX_XCNT - get_dma_curr_xcount(port->rx_dma_channel);
  277. if (x_pos == DMA_SIR_RX_XCNT)
  278. x_pos = 0;
  279. pos = port->rx_dma_nrows * DMA_SIR_RX_XCNT + x_pos;
  280. if (pos > port->rx_dma_buf.tail) {
  281. port->rx_dma_buf.tail = pos;
  282. bfin_sir_dma_rx_chars(dev);
  283. port->rx_dma_buf.head = port->rx_dma_buf.tail;
  284. }
  285. spin_unlock_irqrestore(&self->lock, flags);
  286. }
  287. static irqreturn_t bfin_sir_dma_rx_int(int irq, void *dev_id)
  288. {
  289. struct net_device *dev = dev_id;
  290. struct bfin_sir_self *self = netdev_priv(dev);
  291. struct bfin_sir_port *port = self->sir_port;
  292. unsigned short irqstat;
  293. spin_lock(&self->lock);
  294. port->rx_dma_nrows++;
  295. port->rx_dma_buf.tail = DMA_SIR_RX_XCNT * port->rx_dma_nrows;
  296. bfin_sir_dma_rx_chars(dev);
  297. if (port->rx_dma_nrows >= DMA_SIR_RX_YCNT) {
  298. port->rx_dma_nrows = 0;
  299. port->rx_dma_buf.tail = 0;
  300. }
  301. port->rx_dma_buf.head = port->rx_dma_buf.tail;
  302. irqstat = get_dma_curr_irqstat(port->rx_dma_channel);
  303. clear_dma_irqstat(port->rx_dma_channel);
  304. spin_unlock(&self->lock);
  305. mod_timer(&port->rx_dma_timer, jiffies + DMA_SIR_RX_FLUSH_JIFS);
  306. return IRQ_HANDLED;
  307. }
  308. #endif /* CONFIG_SIR_BFIN_DMA */
  309. static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
  310. {
  311. #ifdef CONFIG_SIR_BFIN_DMA
  312. dma_addr_t dma_handle;
  313. #endif /* CONFIG_SIR_BFIN_DMA */
  314. if (request_dma(port->rx_dma_channel, "BFIN_UART_RX") < 0) {
  315. dev_warn(&dev->dev, "Unable to attach SIR RX DMA channel\n");
  316. return -EBUSY;
  317. }
  318. if (request_dma(port->tx_dma_channel, "BFIN_UART_TX") < 0) {
  319. dev_warn(&dev->dev, "Unable to attach SIR TX DMA channel\n");
  320. free_dma(port->rx_dma_channel);
  321. return -EBUSY;
  322. }
  323. #ifdef CONFIG_SIR_BFIN_DMA
  324. set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
  325. set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);
  326. port->rx_dma_buf.buf = dma_alloc_coherent(NULL, PAGE_SIZE,
  327. &dma_handle, GFP_DMA);
  328. port->rx_dma_buf.head = 0;
  329. port->rx_dma_buf.tail = 0;
  330. port->rx_dma_nrows = 0;
  331. set_dma_config(port->rx_dma_channel,
  332. set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
  333. INTR_ON_ROW, DIMENSION_2D,
  334. DATA_SIZE_8, DMA_SYNC_RESTART));
  335. set_dma_x_count(port->rx_dma_channel, DMA_SIR_RX_XCNT);
  336. set_dma_x_modify(port->rx_dma_channel, 1);
  337. set_dma_y_count(port->rx_dma_channel, DMA_SIR_RX_YCNT);
  338. set_dma_y_modify(port->rx_dma_channel, 1);
  339. set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf);
  340. enable_dma(port->rx_dma_channel);
  341. port->rx_dma_timer.data = (unsigned long)(dev);
  342. port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout;
  343. #else
  344. if (request_irq(port->irq, bfin_sir_rx_int, 0, "BFIN_SIR_RX", dev)) {
  345. dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
  346. return -EBUSY;
  347. }
  348. if (request_irq(port->irq+1, bfin_sir_tx_int, 0, "BFIN_SIR_TX", dev)) {
  349. dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
  350. free_irq(port->irq, dev);
  351. return -EBUSY;
  352. }
  353. #endif
  354. return 0;
  355. }
  356. static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev)
  357. {
  358. unsigned short val;
  359. bfin_sir_stop_rx(port);
  360. val = UART_GET_GCTL(port);
  361. val &= ~(UCEN | UMOD_MASK | RPOLC);
  362. UART_PUT_GCTL(port, val);
  363. #ifdef CONFIG_SIR_BFIN_DMA
  364. disable_dma(port->tx_dma_channel);
  365. disable_dma(port->rx_dma_channel);
  366. del_timer(&(port->rx_dma_timer));
  367. dma_free_coherent(NULL, PAGE_SIZE, port->rx_dma_buf.buf, 0);
  368. #else
  369. free_irq(port->irq+1, dev);
  370. free_irq(port->irq, dev);
  371. #endif
  372. free_dma(port->tx_dma_channel);
  373. free_dma(port->rx_dma_channel);
  374. }
  375. #ifdef CONFIG_PM
  376. static int bfin_sir_suspend(struct platform_device *pdev, pm_message_t state)
  377. {
  378. struct bfin_sir_port *sir_port;
  379. struct net_device *dev;
  380. struct bfin_sir_self *self;
  381. sir_port = platform_get_drvdata(pdev);
  382. if (!sir_port)
  383. return 0;
  384. dev = sir_port->dev;
  385. self = netdev_priv(dev);
  386. if (self->open) {
  387. flush_work(&self->work);
  388. bfin_sir_shutdown(self->sir_port, dev);
  389. netif_device_detach(dev);
  390. }
  391. return 0;
  392. }
  393. static int bfin_sir_resume(struct platform_device *pdev)
  394. {
  395. struct bfin_sir_port *sir_port;
  396. struct net_device *dev;
  397. struct bfin_sir_self *self;
  398. struct bfin_sir_port *port;
  399. sir_port = platform_get_drvdata(pdev);
  400. if (!sir_port)
  401. return 0;
  402. dev = sir_port->dev;
  403. self = netdev_priv(dev);
  404. port = self->sir_port;
  405. if (self->open) {
  406. if (self->newspeed) {
  407. self->speed = self->newspeed;
  408. self->newspeed = 0;
  409. }
  410. bfin_sir_startup(port, dev);
  411. bfin_sir_set_speed(port, 9600);
  412. bfin_sir_enable_rx(port);
  413. netif_device_attach(dev);
  414. }
  415. return 0;
  416. }
  417. #else
  418. #define bfin_sir_suspend NULL
  419. #define bfin_sir_resume NULL
  420. #endif
  421. static void bfin_sir_send_work(struct work_struct *work)
  422. {
  423. struct bfin_sir_self *self = container_of(work, struct bfin_sir_self, work);
  424. struct net_device *dev = self->sir_port->dev;
  425. struct bfin_sir_port *port = self->sir_port;
  426. unsigned short val;
  427. int tx_cnt = 10;
  428. while (bfin_sir_is_receiving(dev) && --tx_cnt)
  429. turnaround_delay(self->mtt);
  430. bfin_sir_stop_rx(port);
  431. /* To avoid losting RX interrupt, we reset IR function before
  432. * sending data. We also can set the speed, which will
  433. * reset all the UART.
  434. */
  435. val = UART_GET_GCTL(port);
  436. val &= ~(UMOD_MASK | RPOLC);
  437. UART_PUT_GCTL(port, val);
  438. SSYNC();
  439. val |= UMOD_IRDA | RPOLC;
  440. UART_PUT_GCTL(port, val);
  441. SSYNC();
  442. /* bfin_sir_set_speed(port, self->speed); */
  443. #ifdef CONFIG_SIR_BFIN_DMA
  444. bfin_sir_dma_tx_chars(dev);
  445. #endif
  446. bfin_sir_enable_tx(port);
  447. netif_trans_update(dev);
  448. }
  449. static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
  450. {
  451. struct bfin_sir_self *self = netdev_priv(dev);
  452. int speed = irda_get_next_speed(skb);
  453. netif_stop_queue(dev);
  454. self->mtt = irda_get_mtt(skb);
  455. if (speed != self->speed && speed != -1)
  456. self->newspeed = speed;
  457. self->tx_buff.data = self->tx_buff.head;
  458. if (skb->len == 0)
  459. self->tx_buff.len = 0;
  460. else
  461. self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize);
  462. schedule_work(&self->work);
  463. dev_kfree_skb(skb);
  464. return 0;
  465. }
  466. static int bfin_sir_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
  467. {
  468. struct if_irda_req *rq = (struct if_irda_req *)ifreq;
  469. struct bfin_sir_self *self = netdev_priv(dev);
  470. struct bfin_sir_port *port = self->sir_port;
  471. int ret = 0;
  472. switch (cmd) {
  473. case SIOCSBANDWIDTH:
  474. if (capable(CAP_NET_ADMIN)) {
  475. if (self->open) {
  476. ret = bfin_sir_set_speed(port, rq->ifr_baudrate);
  477. bfin_sir_enable_rx(port);
  478. } else {
  479. dev_warn(&dev->dev, "SIOCSBANDWIDTH: !netif_running\n");
  480. ret = 0;
  481. }
  482. }
  483. break;
  484. case SIOCSMEDIABUSY:
  485. ret = -EPERM;
  486. if (capable(CAP_NET_ADMIN)) {
  487. irda_device_set_media_busy(dev, TRUE);
  488. ret = 0;
  489. }
  490. break;
  491. case SIOCGRECEIVING:
  492. rq->ifr_receiving = bfin_sir_is_receiving(dev);
  493. break;
  494. default:
  495. ret = -EOPNOTSUPP;
  496. break;
  497. }
  498. return ret;
  499. }
  500. static struct net_device_stats *bfin_sir_stats(struct net_device *dev)
  501. {
  502. struct bfin_sir_self *self = netdev_priv(dev);
  503. return &self->stats;
  504. }
  505. static int bfin_sir_open(struct net_device *dev)
  506. {
  507. struct bfin_sir_self *self = netdev_priv(dev);
  508. struct bfin_sir_port *port = self->sir_port;
  509. int err;
  510. self->newspeed = 0;
  511. self->speed = 9600;
  512. spin_lock_init(&self->lock);
  513. err = bfin_sir_startup(port, dev);
  514. if (err)
  515. goto err_startup;
  516. bfin_sir_set_speed(port, 9600);
  517. self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME);
  518. if (!self->irlap) {
  519. err = -ENOMEM;
  520. goto err_irlap;
  521. }
  522. INIT_WORK(&self->work, bfin_sir_send_work);
  523. /*
  524. * Now enable the interrupt then start the queue
  525. */
  526. self->open = 1;
  527. bfin_sir_enable_rx(port);
  528. netif_start_queue(dev);
  529. return 0;
  530. err_irlap:
  531. self->open = 0;
  532. bfin_sir_shutdown(port, dev);
  533. err_startup:
  534. return err;
  535. }
  536. static int bfin_sir_stop(struct net_device *dev)
  537. {
  538. struct bfin_sir_self *self = netdev_priv(dev);
  539. flush_work(&self->work);
  540. bfin_sir_shutdown(self->sir_port, dev);
  541. if (self->rxskb) {
  542. dev_kfree_skb(self->rxskb);
  543. self->rxskb = NULL;
  544. }
  545. /* Stop IrLAP */
  546. if (self->irlap) {
  547. irlap_close(self->irlap);
  548. self->irlap = NULL;
  549. }
  550. netif_stop_queue(dev);
  551. self->open = 0;
  552. return 0;
  553. }
  554. static int bfin_sir_init_iobuf(iobuff_t *io, int size)
  555. {
  556. io->head = kmalloc(size, GFP_KERNEL);
  557. if (!io->head)
  558. return -ENOMEM;
  559. io->truesize = size;
  560. io->in_frame = FALSE;
  561. io->state = OUTSIDE_FRAME;
  562. io->data = io->head;
  563. return 0;
  564. }
  565. static const struct net_device_ops bfin_sir_ndo = {
  566. .ndo_open = bfin_sir_open,
  567. .ndo_stop = bfin_sir_stop,
  568. .ndo_start_xmit = bfin_sir_hard_xmit,
  569. .ndo_do_ioctl = bfin_sir_ioctl,
  570. .ndo_get_stats = bfin_sir_stats,
  571. };
  572. static int bfin_sir_probe(struct platform_device *pdev)
  573. {
  574. struct net_device *dev;
  575. struct bfin_sir_self *self;
  576. unsigned int baudrate_mask;
  577. struct bfin_sir_port *sir_port;
  578. int err;
  579. if (pdev->id >= 0 && pdev->id < ARRAY_SIZE(per) && \
  580. per[pdev->id][3] == pdev->id) {
  581. err = peripheral_request_list(per[pdev->id], DRIVER_NAME);
  582. if (err)
  583. return err;
  584. } else {
  585. dev_err(&pdev->dev, "Invalid pdev id, please check board file\n");
  586. return -ENODEV;
  587. }
  588. err = -ENOMEM;
  589. sir_port = kmalloc(sizeof(*sir_port), GFP_KERNEL);
  590. if (!sir_port)
  591. goto err_mem_0;
  592. bfin_sir_init_ports(sir_port, pdev);
  593. dev = alloc_irdadev(sizeof(*self));
  594. if (!dev)
  595. goto err_mem_1;
  596. self = netdev_priv(dev);
  597. self->dev = &pdev->dev;
  598. self->sir_port = sir_port;
  599. sir_port->dev = dev;
  600. err = bfin_sir_init_iobuf(&self->rx_buff, IRDA_SKB_MAX_MTU);
  601. if (err)
  602. goto err_mem_2;
  603. err = bfin_sir_init_iobuf(&self->tx_buff, IRDA_SIR_MAX_FRAME);
  604. if (err)
  605. goto err_mem_3;
  606. dev->netdev_ops = &bfin_sir_ndo;
  607. dev->irq = sir_port->irq;
  608. irda_init_max_qos_capabilies(&self->qos);
  609. baudrate_mask = IR_9600;
  610. switch (max_rate) {
  611. case 115200:
  612. baudrate_mask |= IR_115200;
  613. case 57600:
  614. baudrate_mask |= IR_57600;
  615. case 38400:
  616. baudrate_mask |= IR_38400;
  617. case 19200:
  618. baudrate_mask |= IR_19200;
  619. case 9600:
  620. break;
  621. default:
  622. dev_warn(&pdev->dev, "Invalid maximum baud rate, using 9600\n");
  623. }
  624. self->qos.baud_rate.bits &= baudrate_mask;
  625. self->qos.min_turn_time.bits = 1; /* 10 ms or more */
  626. irda_qos_bits_to_value(&self->qos);
  627. err = register_netdev(dev);
  628. if (err) {
  629. kfree(self->tx_buff.head);
  630. err_mem_3:
  631. kfree(self->rx_buff.head);
  632. err_mem_2:
  633. free_netdev(dev);
  634. err_mem_1:
  635. kfree(sir_port);
  636. err_mem_0:
  637. peripheral_free_list(per[pdev->id]);
  638. } else
  639. platform_set_drvdata(pdev, sir_port);
  640. return err;
  641. }
  642. static int bfin_sir_remove(struct platform_device *pdev)
  643. {
  644. struct bfin_sir_port *sir_port;
  645. struct net_device *dev = NULL;
  646. struct bfin_sir_self *self;
  647. sir_port = platform_get_drvdata(pdev);
  648. if (!sir_port)
  649. return 0;
  650. dev = sir_port->dev;
  651. self = netdev_priv(dev);
  652. unregister_netdev(dev);
  653. kfree(self->tx_buff.head);
  654. kfree(self->rx_buff.head);
  655. free_netdev(dev);
  656. kfree(sir_port);
  657. return 0;
  658. }
  659. static struct platform_driver bfin_ir_driver = {
  660. .probe = bfin_sir_probe,
  661. .remove = bfin_sir_remove,
  662. .suspend = bfin_sir_suspend,
  663. .resume = bfin_sir_resume,
  664. .driver = {
  665. .name = DRIVER_NAME,
  666. },
  667. };
  668. module_platform_driver(bfin_ir_driver);
  669. module_param(max_rate, int, 0);
  670. MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
  671. MODULE_AUTHOR("Graf Yang <graf.yang@analog.com>");
  672. MODULE_DESCRIPTION("Blackfin IrDA driver");
  673. MODULE_LICENSE("GPL");