xilinx_can.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233
  1. /* Xilinx CAN device driver
  2. *
  3. * Copyright (C) 2012 - 2014 Xilinx, Inc.
  4. * Copyright (C) 2009 PetaLogix. All rights reserved.
  5. *
  6. * Description:
  7. * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
  8. * This program is free software: you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation, either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. */
  18. #include <linux/clk.h>
  19. #include <linux/errno.h>
  20. #include <linux/init.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/netdevice.h>
  26. #include <linux/of.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/skbuff.h>
  29. #include <linux/string.h>
  30. #include <linux/types.h>
  31. #include <linux/can/dev.h>
  32. #include <linux/can/error.h>
  33. #include <linux/can/led.h>
  34. #include <linux/pm_runtime.h>
  35. #define DRIVER_NAME "xilinx_can"
  36. /* CAN registers set */
  37. enum xcan_reg {
  38. XCAN_SRR_OFFSET = 0x00, /* Software reset */
  39. XCAN_MSR_OFFSET = 0x04, /* Mode select */
  40. XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */
  41. XCAN_BTR_OFFSET = 0x0C, /* Bit timing */
  42. XCAN_ECR_OFFSET = 0x10, /* Error counter */
  43. XCAN_ESR_OFFSET = 0x14, /* Error status */
  44. XCAN_SR_OFFSET = 0x18, /* Status */
  45. XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
  46. XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
  47. XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
  48. XCAN_TXFIFO_ID_OFFSET = 0x30,/* TX FIFO ID */
  49. XCAN_TXFIFO_DLC_OFFSET = 0x34, /* TX FIFO DLC */
  50. XCAN_TXFIFO_DW1_OFFSET = 0x38, /* TX FIFO Data Word 1 */
  51. XCAN_TXFIFO_DW2_OFFSET = 0x3C, /* TX FIFO Data Word 2 */
  52. XCAN_RXFIFO_ID_OFFSET = 0x50, /* RX FIFO ID */
  53. XCAN_RXFIFO_DLC_OFFSET = 0x54, /* RX FIFO DLC */
  54. XCAN_RXFIFO_DW1_OFFSET = 0x58, /* RX FIFO Data Word 1 */
  55. XCAN_RXFIFO_DW2_OFFSET = 0x5C, /* RX FIFO Data Word 2 */
  56. };
  57. /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
  58. #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
  59. #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
  60. #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
  61. #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
  62. #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
  63. #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
  64. #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
  65. #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
  66. #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
  67. #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
  68. #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
  69. #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */
  70. #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
  71. #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
  72. #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
  73. #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
  74. #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
  75. #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
  76. #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
  77. #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
  78. #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
  79. #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
  80. #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
  81. #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
  82. #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */
  83. #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */
  84. #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */
  85. #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */
  86. #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */
  87. #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
  88. #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
  89. #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
  90. #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
  91. #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
  92. #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
  93. #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
  94. #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
  95. #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
  96. #define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
  97. XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
  98. XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
  99. XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
  100. /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
  101. #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
  102. #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
  103. #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
  104. #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
  105. #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
  106. #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */
  107. /* CAN frame length constants */
  108. #define XCAN_FRAME_MAX_DATA_LEN 8
  109. #define XCAN_TIMEOUT (1 * HZ)
  110. /**
  111. * struct xcan_priv - This definition define CAN driver instance
  112. * @can: CAN private data structure.
  113. * @tx_head: Tx CAN packets ready to send on the queue
  114. * @tx_tail: Tx CAN packets successfully sended on the queue
  115. * @tx_max: Maximum number packets the driver can send
  116. * @napi: NAPI structure
  117. * @read_reg: For reading data from CAN registers
  118. * @write_reg: For writing data to CAN registers
  119. * @dev: Network device data structure
  120. * @reg_base: Ioremapped address to registers
  121. * @irq_flags: For request_irq()
  122. * @bus_clk: Pointer to struct clk
  123. * @can_clk: Pointer to struct clk
  124. */
  125. struct xcan_priv {
  126. struct can_priv can;
  127. unsigned int tx_head;
  128. unsigned int tx_tail;
  129. unsigned int tx_max;
  130. struct napi_struct napi;
  131. u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
  132. void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
  133. u32 val);
  134. struct device *dev;
  135. void __iomem *reg_base;
  136. unsigned long irq_flags;
  137. struct clk *bus_clk;
  138. struct clk *can_clk;
  139. };
  140. /* CAN Bittiming constants as per Xilinx CAN specs */
  141. static const struct can_bittiming_const xcan_bittiming_const = {
  142. .name = DRIVER_NAME,
  143. .tseg1_min = 1,
  144. .tseg1_max = 16,
  145. .tseg2_min = 1,
  146. .tseg2_max = 8,
  147. .sjw_max = 4,
  148. .brp_min = 1,
  149. .brp_max = 256,
  150. .brp_inc = 1,
  151. };
  152. /**
  153. * xcan_write_reg_le - Write a value to the device register little endian
  154. * @priv: Driver private data structure
  155. * @reg: Register offset
  156. * @val: Value to write at the Register offset
  157. *
  158. * Write data to the paricular CAN register
  159. */
  160. static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
  161. u32 val)
  162. {
  163. iowrite32(val, priv->reg_base + reg);
  164. }
  165. /**
  166. * xcan_read_reg_le - Read a value from the device register little endian
  167. * @priv: Driver private data structure
  168. * @reg: Register offset
  169. *
  170. * Read data from the particular CAN register
  171. * Return: value read from the CAN register
  172. */
  173. static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
  174. {
  175. return ioread32(priv->reg_base + reg);
  176. }
  177. /**
  178. * xcan_write_reg_be - Write a value to the device register big endian
  179. * @priv: Driver private data structure
  180. * @reg: Register offset
  181. * @val: Value to write at the Register offset
  182. *
  183. * Write data to the paricular CAN register
  184. */
  185. static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
  186. u32 val)
  187. {
  188. iowrite32be(val, priv->reg_base + reg);
  189. }
  190. /**
  191. * xcan_read_reg_be - Read a value from the device register big endian
  192. * @priv: Driver private data structure
  193. * @reg: Register offset
  194. *
  195. * Read data from the particular CAN register
  196. * Return: value read from the CAN register
  197. */
  198. static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
  199. {
  200. return ioread32be(priv->reg_base + reg);
  201. }
  202. /**
  203. * set_reset_mode - Resets the CAN device mode
  204. * @ndev: Pointer to net_device structure
  205. *
  206. * This is the driver reset mode routine.The driver
  207. * enters into configuration mode.
  208. *
  209. * Return: 0 on success and failure value on error
  210. */
  211. static int set_reset_mode(struct net_device *ndev)
  212. {
  213. struct xcan_priv *priv = netdev_priv(ndev);
  214. unsigned long timeout;
  215. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  216. timeout = jiffies + XCAN_TIMEOUT;
  217. while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
  218. if (time_after(jiffies, timeout)) {
  219. netdev_warn(ndev, "timed out for config mode\n");
  220. return -ETIMEDOUT;
  221. }
  222. usleep_range(500, 10000);
  223. }
  224. return 0;
  225. }
  226. /**
  227. * xcan_set_bittiming - CAN set bit timing routine
  228. * @ndev: Pointer to net_device structure
  229. *
  230. * This is the driver set bittiming routine.
  231. * Return: 0 on success and failure value on error
  232. */
  233. static int xcan_set_bittiming(struct net_device *ndev)
  234. {
  235. struct xcan_priv *priv = netdev_priv(ndev);
  236. struct can_bittiming *bt = &priv->can.bittiming;
  237. u32 btr0, btr1;
  238. u32 is_config_mode;
  239. /* Check whether Xilinx CAN is in configuration mode.
  240. * It cannot set bit timing if Xilinx CAN is not in configuration mode.
  241. */
  242. is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
  243. XCAN_SR_CONFIG_MASK;
  244. if (!is_config_mode) {
  245. netdev_alert(ndev,
  246. "BUG! Cannot set bittiming - CAN is not in config mode\n");
  247. return -EPERM;
  248. }
  249. /* Setting Baud Rate prescalar value in BRPR Register */
  250. btr0 = (bt->brp - 1);
  251. /* Setting Time Segment 1 in BTR Register */
  252. btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
  253. /* Setting Time Segment 2 in BTR Register */
  254. btr1 |= (bt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT;
  255. /* Setting Synchronous jump width in BTR Register */
  256. btr1 |= (bt->sjw - 1) << XCAN_BTR_SJW_SHIFT;
  257. priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
  258. priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
  259. netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
  260. priv->read_reg(priv, XCAN_BRPR_OFFSET),
  261. priv->read_reg(priv, XCAN_BTR_OFFSET));
  262. return 0;
  263. }
  264. /**
  265. * xcan_chip_start - This the drivers start routine
  266. * @ndev: Pointer to net_device structure
  267. *
  268. * This is the drivers start routine.
  269. * Based on the State of the CAN device it puts
  270. * the CAN device into a proper mode.
  271. *
  272. * Return: 0 on success and failure value on error
  273. */
  274. static int xcan_chip_start(struct net_device *ndev)
  275. {
  276. struct xcan_priv *priv = netdev_priv(ndev);
  277. u32 reg_msr, reg_sr_mask;
  278. int err;
  279. unsigned long timeout;
  280. /* Check if it is in reset mode */
  281. err = set_reset_mode(ndev);
  282. if (err < 0)
  283. return err;
  284. err = xcan_set_bittiming(ndev);
  285. if (err < 0)
  286. return err;
  287. /* Enable interrupts */
  288. priv->write_reg(priv, XCAN_IER_OFFSET, XCAN_INTR_ALL);
  289. /* Check whether it is loopback mode or normal mode */
  290. if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
  291. reg_msr = XCAN_MSR_LBACK_MASK;
  292. reg_sr_mask = XCAN_SR_LBACK_MASK;
  293. } else {
  294. reg_msr = 0x0;
  295. reg_sr_mask = XCAN_SR_NORMAL_MASK;
  296. }
  297. priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
  298. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
  299. timeout = jiffies + XCAN_TIMEOUT;
  300. while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
  301. if (time_after(jiffies, timeout)) {
  302. netdev_warn(ndev,
  303. "timed out for correct mode\n");
  304. return -ETIMEDOUT;
  305. }
  306. }
  307. netdev_dbg(ndev, "status:#x%08x\n",
  308. priv->read_reg(priv, XCAN_SR_OFFSET));
  309. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  310. return 0;
  311. }
  312. /**
  313. * xcan_do_set_mode - This sets the mode of the driver
  314. * @ndev: Pointer to net_device structure
  315. * @mode: Tells the mode of the driver
  316. *
  317. * This check the drivers state and calls the
  318. * the corresponding modes to set.
  319. *
  320. * Return: 0 on success and failure value on error
  321. */
  322. static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
  323. {
  324. int ret;
  325. switch (mode) {
  326. case CAN_MODE_START:
  327. ret = xcan_chip_start(ndev);
  328. if (ret < 0) {
  329. netdev_err(ndev, "xcan_chip_start failed!\n");
  330. return ret;
  331. }
  332. netif_wake_queue(ndev);
  333. break;
  334. default:
  335. ret = -EOPNOTSUPP;
  336. break;
  337. }
  338. return ret;
  339. }
  340. /**
  341. * xcan_start_xmit - Starts the transmission
  342. * @skb: sk_buff pointer that contains data to be Txed
  343. * @ndev: Pointer to net_device structure
  344. *
  345. * This function is invoked from upper layers to initiate transmission. This
  346. * function uses the next available free txbuff and populates their fields to
  347. * start the transmission.
  348. *
  349. * Return: 0 on success and failure value on error
  350. */
  351. static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  352. {
  353. struct xcan_priv *priv = netdev_priv(ndev);
  354. struct net_device_stats *stats = &ndev->stats;
  355. struct can_frame *cf = (struct can_frame *)skb->data;
  356. u32 id, dlc, data[2] = {0, 0};
  357. if (can_dropped_invalid_skb(ndev, skb))
  358. return NETDEV_TX_OK;
  359. /* Check if the TX buffer is full */
  360. if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
  361. XCAN_SR_TXFLL_MASK)) {
  362. netif_stop_queue(ndev);
  363. netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n");
  364. return NETDEV_TX_BUSY;
  365. }
  366. /* Watch carefully on the bit sequence */
  367. if (cf->can_id & CAN_EFF_FLAG) {
  368. /* Extended CAN ID format */
  369. id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
  370. XCAN_IDR_ID2_MASK;
  371. id |= (((cf->can_id & CAN_EFF_MASK) >>
  372. (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
  373. XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
  374. /* The substibute remote TX request bit should be "1"
  375. * for extended frames as in the Xilinx CAN datasheet
  376. */
  377. id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
  378. if (cf->can_id & CAN_RTR_FLAG)
  379. /* Extended frames remote TX request */
  380. id |= XCAN_IDR_RTR_MASK;
  381. } else {
  382. /* Standard CAN ID format */
  383. id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
  384. XCAN_IDR_ID1_MASK;
  385. if (cf->can_id & CAN_RTR_FLAG)
  386. /* Standard frames remote TX request */
  387. id |= XCAN_IDR_SRR_MASK;
  388. }
  389. dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT;
  390. if (cf->can_dlc > 0)
  391. data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
  392. if (cf->can_dlc > 4)
  393. data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
  394. can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
  395. priv->tx_head++;
  396. /* Write the Frame to Xilinx CAN TX FIFO */
  397. priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id);
  398. /* If the CAN frame is RTR frame this write triggers tranmission */
  399. priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc);
  400. if (!(cf->can_id & CAN_RTR_FLAG)) {
  401. priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]);
  402. /* If the CAN frame is Standard/Extended frame this
  403. * write triggers tranmission
  404. */
  405. priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]);
  406. stats->tx_bytes += cf->can_dlc;
  407. }
  408. /* Check if the TX buffer is full */
  409. if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
  410. netif_stop_queue(ndev);
  411. return NETDEV_TX_OK;
  412. }
  413. /**
  414. * xcan_rx - Is called from CAN isr to complete the received
  415. * frame processing
  416. * @ndev: Pointer to net_device structure
  417. *
  418. * This function is invoked from the CAN isr(poll) to process the Rx frames. It
  419. * does minimal processing and invokes "netif_receive_skb" to complete further
  420. * processing.
  421. * Return: 1 on success and 0 on failure.
  422. */
  423. static int xcan_rx(struct net_device *ndev)
  424. {
  425. struct xcan_priv *priv = netdev_priv(ndev);
  426. struct net_device_stats *stats = &ndev->stats;
  427. struct can_frame *cf;
  428. struct sk_buff *skb;
  429. u32 id_xcan, dlc, data[2] = {0, 0};
  430. skb = alloc_can_skb(ndev, &cf);
  431. if (unlikely(!skb)) {
  432. stats->rx_dropped++;
  433. return 0;
  434. }
  435. /* Read a frame from Xilinx zynq CANPS */
  436. id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET);
  437. dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >>
  438. XCAN_DLCR_DLC_SHIFT;
  439. /* Change Xilinx CAN data length format to socketCAN data format */
  440. cf->can_dlc = get_can_dlc(dlc);
  441. /* Change Xilinx CAN ID format to socketCAN ID format */
  442. if (id_xcan & XCAN_IDR_IDE_MASK) {
  443. /* The received frame is an Extended format frame */
  444. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
  445. cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
  446. XCAN_IDR_ID2_SHIFT;
  447. cf->can_id |= CAN_EFF_FLAG;
  448. if (id_xcan & XCAN_IDR_RTR_MASK)
  449. cf->can_id |= CAN_RTR_FLAG;
  450. } else {
  451. /* The received frame is a standard format frame */
  452. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
  453. XCAN_IDR_ID1_SHIFT;
  454. if (id_xcan & XCAN_IDR_SRR_MASK)
  455. cf->can_id |= CAN_RTR_FLAG;
  456. }
  457. /* DW1/DW2 must always be read to remove message from RXFIFO */
  458. data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
  459. data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
  460. if (!(cf->can_id & CAN_RTR_FLAG)) {
  461. /* Change Xilinx CAN data format to socketCAN data format */
  462. if (cf->can_dlc > 0)
  463. *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
  464. if (cf->can_dlc > 4)
  465. *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
  466. }
  467. stats->rx_bytes += cf->can_dlc;
  468. stats->rx_packets++;
  469. netif_receive_skb(skb);
  470. return 1;
  471. }
  472. /**
  473. * xcan_err_interrupt - error frame Isr
  474. * @ndev: net_device pointer
  475. * @isr: interrupt status register value
  476. *
  477. * This is the CAN error interrupt and it will
  478. * check the the type of error and forward the error
  479. * frame to upper layers.
  480. */
  481. static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
  482. {
  483. struct xcan_priv *priv = netdev_priv(ndev);
  484. struct net_device_stats *stats = &ndev->stats;
  485. struct can_frame *cf;
  486. struct sk_buff *skb;
  487. u32 err_status, status, txerr = 0, rxerr = 0;
  488. skb = alloc_can_err_skb(ndev, &cf);
  489. err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
  490. priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
  491. txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
  492. rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
  493. XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
  494. status = priv->read_reg(priv, XCAN_SR_OFFSET);
  495. if (isr & XCAN_IXR_BSOFF_MASK) {
  496. priv->can.state = CAN_STATE_BUS_OFF;
  497. priv->can.can_stats.bus_off++;
  498. /* Leave device in Config Mode in bus-off state */
  499. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  500. can_bus_off(ndev);
  501. if (skb)
  502. cf->can_id |= CAN_ERR_BUSOFF;
  503. } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
  504. priv->can.state = CAN_STATE_ERROR_PASSIVE;
  505. priv->can.can_stats.error_passive++;
  506. if (skb) {
  507. cf->can_id |= CAN_ERR_CRTL;
  508. cf->data[1] = (rxerr > 127) ?
  509. CAN_ERR_CRTL_RX_PASSIVE :
  510. CAN_ERR_CRTL_TX_PASSIVE;
  511. cf->data[6] = txerr;
  512. cf->data[7] = rxerr;
  513. }
  514. } else if (status & XCAN_SR_ERRWRN_MASK) {
  515. priv->can.state = CAN_STATE_ERROR_WARNING;
  516. priv->can.can_stats.error_warning++;
  517. if (skb) {
  518. cf->can_id |= CAN_ERR_CRTL;
  519. cf->data[1] |= (txerr > rxerr) ?
  520. CAN_ERR_CRTL_TX_WARNING :
  521. CAN_ERR_CRTL_RX_WARNING;
  522. cf->data[6] = txerr;
  523. cf->data[7] = rxerr;
  524. }
  525. }
  526. /* Check for Arbitration lost interrupt */
  527. if (isr & XCAN_IXR_ARBLST_MASK) {
  528. priv->can.can_stats.arbitration_lost++;
  529. if (skb) {
  530. cf->can_id |= CAN_ERR_LOSTARB;
  531. cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
  532. }
  533. }
  534. /* Check for RX FIFO Overflow interrupt */
  535. if (isr & XCAN_IXR_RXOFLW_MASK) {
  536. stats->rx_over_errors++;
  537. stats->rx_errors++;
  538. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  539. if (skb) {
  540. cf->can_id |= CAN_ERR_CRTL;
  541. cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
  542. }
  543. }
  544. /* Check for error interrupt */
  545. if (isr & XCAN_IXR_ERROR_MASK) {
  546. if (skb)
  547. cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
  548. /* Check for Ack error interrupt */
  549. if (err_status & XCAN_ESR_ACKER_MASK) {
  550. stats->tx_errors++;
  551. if (skb) {
  552. cf->can_id |= CAN_ERR_ACK;
  553. cf->data[3] = CAN_ERR_PROT_LOC_ACK;
  554. }
  555. }
  556. /* Check for Bit error interrupt */
  557. if (err_status & XCAN_ESR_BERR_MASK) {
  558. stats->tx_errors++;
  559. if (skb) {
  560. cf->can_id |= CAN_ERR_PROT;
  561. cf->data[2] = CAN_ERR_PROT_BIT;
  562. }
  563. }
  564. /* Check for Stuff error interrupt */
  565. if (err_status & XCAN_ESR_STER_MASK) {
  566. stats->rx_errors++;
  567. if (skb) {
  568. cf->can_id |= CAN_ERR_PROT;
  569. cf->data[2] = CAN_ERR_PROT_STUFF;
  570. }
  571. }
  572. /* Check for Form error interrupt */
  573. if (err_status & XCAN_ESR_FMER_MASK) {
  574. stats->rx_errors++;
  575. if (skb) {
  576. cf->can_id |= CAN_ERR_PROT;
  577. cf->data[2] = CAN_ERR_PROT_FORM;
  578. }
  579. }
  580. /* Check for CRC error interrupt */
  581. if (err_status & XCAN_ESR_CRCER_MASK) {
  582. stats->rx_errors++;
  583. if (skb) {
  584. cf->can_id |= CAN_ERR_PROT;
  585. cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
  586. }
  587. }
  588. priv->can.can_stats.bus_error++;
  589. }
  590. if (skb) {
  591. stats->rx_packets++;
  592. stats->rx_bytes += cf->can_dlc;
  593. netif_rx(skb);
  594. }
  595. netdev_dbg(ndev, "%s: error status register:0x%x\n",
  596. __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
  597. }
  598. /**
  599. * xcan_state_interrupt - It will check the state of the CAN device
  600. * @ndev: net_device pointer
  601. * @isr: interrupt status register value
  602. *
  603. * This will checks the state of the CAN device
  604. * and puts the device into appropriate state.
  605. */
  606. static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
  607. {
  608. struct xcan_priv *priv = netdev_priv(ndev);
  609. /* Check for Sleep interrupt if set put CAN device in sleep state */
  610. if (isr & XCAN_IXR_SLP_MASK)
  611. priv->can.state = CAN_STATE_SLEEPING;
  612. /* Check for Wake up interrupt if set put CAN device in Active state */
  613. if (isr & XCAN_IXR_WKUP_MASK)
  614. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  615. }
  616. /**
  617. * xcan_rx_poll - Poll routine for rx packets (NAPI)
  618. * @napi: napi structure pointer
  619. * @quota: Max number of rx packets to be processed.
  620. *
  621. * This is the poll routine for rx part.
  622. * It will process the packets maximux quota value.
  623. *
  624. * Return: number of packets received
  625. */
  626. static int xcan_rx_poll(struct napi_struct *napi, int quota)
  627. {
  628. struct net_device *ndev = napi->dev;
  629. struct xcan_priv *priv = netdev_priv(ndev);
  630. u32 isr, ier;
  631. int work_done = 0;
  632. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  633. while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
  634. if (isr & XCAN_IXR_RXOK_MASK) {
  635. priv->write_reg(priv, XCAN_ICR_OFFSET,
  636. XCAN_IXR_RXOK_MASK);
  637. work_done += xcan_rx(ndev);
  638. } else {
  639. priv->write_reg(priv, XCAN_ICR_OFFSET,
  640. XCAN_IXR_RXNEMP_MASK);
  641. break;
  642. }
  643. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
  644. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  645. }
  646. if (work_done)
  647. can_led_event(ndev, CAN_LED_EVENT_RX);
  648. if (work_done < quota) {
  649. napi_complete(napi);
  650. ier = priv->read_reg(priv, XCAN_IER_OFFSET);
  651. ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
  652. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  653. }
  654. return work_done;
  655. }
  656. /**
  657. * xcan_tx_interrupt - Tx Done Isr
  658. * @ndev: net_device pointer
  659. * @isr: Interrupt status register value
  660. */
  661. static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
  662. {
  663. struct xcan_priv *priv = netdev_priv(ndev);
  664. struct net_device_stats *stats = &ndev->stats;
  665. while ((priv->tx_head - priv->tx_tail > 0) &&
  666. (isr & XCAN_IXR_TXOK_MASK)) {
  667. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
  668. can_get_echo_skb(ndev, priv->tx_tail %
  669. priv->tx_max);
  670. priv->tx_tail++;
  671. stats->tx_packets++;
  672. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  673. }
  674. can_led_event(ndev, CAN_LED_EVENT_TX);
  675. netif_wake_queue(ndev);
  676. }
  677. /**
  678. * xcan_interrupt - CAN Isr
  679. * @irq: irq number
  680. * @dev_id: device id poniter
  681. *
  682. * This is the xilinx CAN Isr. It checks for the type of interrupt
  683. * and invokes the corresponding ISR.
  684. *
  685. * Return:
  686. * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
  687. */
  688. static irqreturn_t xcan_interrupt(int irq, void *dev_id)
  689. {
  690. struct net_device *ndev = (struct net_device *)dev_id;
  691. struct xcan_priv *priv = netdev_priv(ndev);
  692. u32 isr, ier;
  693. /* Get the interrupt status from Xilinx CAN */
  694. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  695. if (!isr)
  696. return IRQ_NONE;
  697. /* Check for the type of interrupt and Processing it */
  698. if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
  699. priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
  700. XCAN_IXR_WKUP_MASK));
  701. xcan_state_interrupt(ndev, isr);
  702. }
  703. /* Check for Tx interrupt and Processing it */
  704. if (isr & XCAN_IXR_TXOK_MASK)
  705. xcan_tx_interrupt(ndev, isr);
  706. /* Check for the type of error interrupt and Processing it */
  707. if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
  708. XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
  709. priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
  710. XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
  711. XCAN_IXR_ARBLST_MASK));
  712. xcan_err_interrupt(ndev, isr);
  713. }
  714. /* Check for the type of receive interrupt and Processing it */
  715. if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
  716. ier = priv->read_reg(priv, XCAN_IER_OFFSET);
  717. ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
  718. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  719. napi_schedule(&priv->napi);
  720. }
  721. return IRQ_HANDLED;
  722. }
  723. /**
  724. * xcan_chip_stop - Driver stop routine
  725. * @ndev: Pointer to net_device structure
  726. *
  727. * This is the drivers stop routine. It will disable the
  728. * interrupts and put the device into configuration mode.
  729. */
  730. static void xcan_chip_stop(struct net_device *ndev)
  731. {
  732. struct xcan_priv *priv = netdev_priv(ndev);
  733. u32 ier;
  734. /* Disable interrupts and leave the can in configuration mode */
  735. ier = priv->read_reg(priv, XCAN_IER_OFFSET);
  736. ier &= ~XCAN_INTR_ALL;
  737. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  738. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  739. priv->can.state = CAN_STATE_STOPPED;
  740. }
  741. /**
  742. * xcan_open - Driver open routine
  743. * @ndev: Pointer to net_device structure
  744. *
  745. * This is the driver open routine.
  746. * Return: 0 on success and failure value on error
  747. */
  748. static int xcan_open(struct net_device *ndev)
  749. {
  750. struct xcan_priv *priv = netdev_priv(ndev);
  751. int ret;
  752. ret = pm_runtime_get_sync(priv->dev);
  753. if (ret < 0) {
  754. netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
  755. __func__, ret);
  756. return ret;
  757. }
  758. ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
  759. ndev->name, ndev);
  760. if (ret < 0) {
  761. netdev_err(ndev, "irq allocation for CAN failed\n");
  762. goto err;
  763. }
  764. /* Set chip into reset mode */
  765. ret = set_reset_mode(ndev);
  766. if (ret < 0) {
  767. netdev_err(ndev, "mode resetting failed!\n");
  768. goto err_irq;
  769. }
  770. /* Common open */
  771. ret = open_candev(ndev);
  772. if (ret)
  773. goto err_irq;
  774. ret = xcan_chip_start(ndev);
  775. if (ret < 0) {
  776. netdev_err(ndev, "xcan_chip_start failed!\n");
  777. goto err_candev;
  778. }
  779. can_led_event(ndev, CAN_LED_EVENT_OPEN);
  780. napi_enable(&priv->napi);
  781. netif_start_queue(ndev);
  782. return 0;
  783. err_candev:
  784. close_candev(ndev);
  785. err_irq:
  786. free_irq(ndev->irq, ndev);
  787. err:
  788. pm_runtime_put(priv->dev);
  789. return ret;
  790. }
  791. /**
  792. * xcan_close - Driver close routine
  793. * @ndev: Pointer to net_device structure
  794. *
  795. * Return: 0 always
  796. */
  797. static int xcan_close(struct net_device *ndev)
  798. {
  799. struct xcan_priv *priv = netdev_priv(ndev);
  800. netif_stop_queue(ndev);
  801. napi_disable(&priv->napi);
  802. xcan_chip_stop(ndev);
  803. free_irq(ndev->irq, ndev);
  804. close_candev(ndev);
  805. can_led_event(ndev, CAN_LED_EVENT_STOP);
  806. pm_runtime_put(priv->dev);
  807. return 0;
  808. }
  809. /**
  810. * xcan_get_berr_counter - error counter routine
  811. * @ndev: Pointer to net_device structure
  812. * @bec: Pointer to can_berr_counter structure
  813. *
  814. * This is the driver error counter routine.
  815. * Return: 0 on success and failure value on error
  816. */
  817. static int xcan_get_berr_counter(const struct net_device *ndev,
  818. struct can_berr_counter *bec)
  819. {
  820. struct xcan_priv *priv = netdev_priv(ndev);
  821. int ret;
  822. ret = pm_runtime_get_sync(priv->dev);
  823. if (ret < 0) {
  824. netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
  825. __func__, ret);
  826. return ret;
  827. }
  828. bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
  829. bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
  830. XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
  831. pm_runtime_put(priv->dev);
  832. return 0;
  833. }
  834. static const struct net_device_ops xcan_netdev_ops = {
  835. .ndo_open = xcan_open,
  836. .ndo_stop = xcan_close,
  837. .ndo_start_xmit = xcan_start_xmit,
  838. .ndo_change_mtu = can_change_mtu,
  839. };
  840. /**
  841. * xcan_suspend - Suspend method for the driver
  842. * @dev: Address of the device structure
  843. *
  844. * Put the driver into low power mode.
  845. * Return: 0 on success and failure value on error
  846. */
  847. static int __maybe_unused xcan_suspend(struct device *dev)
  848. {
  849. if (!device_may_wakeup(dev))
  850. return pm_runtime_force_suspend(dev);
  851. return 0;
  852. }
  853. /**
  854. * xcan_resume - Resume from suspend
  855. * @dev: Address of the device structure
  856. *
  857. * Resume operation after suspend.
  858. * Return: 0 on success and failure value on error
  859. */
  860. static int __maybe_unused xcan_resume(struct device *dev)
  861. {
  862. if (!device_may_wakeup(dev))
  863. return pm_runtime_force_resume(dev);
  864. return 0;
  865. }
  866. /**
  867. * xcan_runtime_suspend - Runtime suspend method for the driver
  868. * @dev: Address of the device structure
  869. *
  870. * Put the driver into low power mode.
  871. * Return: 0 always
  872. */
  873. static int __maybe_unused xcan_runtime_suspend(struct device *dev)
  874. {
  875. struct net_device *ndev = dev_get_drvdata(dev);
  876. struct xcan_priv *priv = netdev_priv(ndev);
  877. if (netif_running(ndev)) {
  878. netif_stop_queue(ndev);
  879. netif_device_detach(ndev);
  880. }
  881. priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
  882. priv->can.state = CAN_STATE_SLEEPING;
  883. clk_disable_unprepare(priv->bus_clk);
  884. clk_disable_unprepare(priv->can_clk);
  885. return 0;
  886. }
  887. /**
  888. * xcan_runtime_resume - Runtime resume from suspend
  889. * @dev: Address of the device structure
  890. *
  891. * Resume operation after suspend.
  892. * Return: 0 on success and failure value on error
  893. */
  894. static int __maybe_unused xcan_runtime_resume(struct device *dev)
  895. {
  896. struct net_device *ndev = dev_get_drvdata(dev);
  897. struct xcan_priv *priv = netdev_priv(ndev);
  898. int ret;
  899. u32 isr, status;
  900. ret = clk_prepare_enable(priv->bus_clk);
  901. if (ret) {
  902. dev_err(dev, "Cannot enable clock.\n");
  903. return ret;
  904. }
  905. ret = clk_prepare_enable(priv->can_clk);
  906. if (ret) {
  907. dev_err(dev, "Cannot enable clock.\n");
  908. clk_disable_unprepare(priv->bus_clk);
  909. return ret;
  910. }
  911. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  912. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  913. status = priv->read_reg(priv, XCAN_SR_OFFSET);
  914. if (netif_running(ndev)) {
  915. if (isr & XCAN_IXR_BSOFF_MASK) {
  916. priv->can.state = CAN_STATE_BUS_OFF;
  917. priv->write_reg(priv, XCAN_SRR_OFFSET,
  918. XCAN_SRR_RESET_MASK);
  919. } else if ((status & XCAN_SR_ESTAT_MASK) ==
  920. XCAN_SR_ESTAT_MASK) {
  921. priv->can.state = CAN_STATE_ERROR_PASSIVE;
  922. } else if (status & XCAN_SR_ERRWRN_MASK) {
  923. priv->can.state = CAN_STATE_ERROR_WARNING;
  924. } else {
  925. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  926. }
  927. netif_device_attach(ndev);
  928. netif_start_queue(ndev);
  929. }
  930. return 0;
  931. }
  932. static const struct dev_pm_ops xcan_dev_pm_ops = {
  933. SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
  934. SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
  935. };
  936. /**
  937. * xcan_probe - Platform registration call
  938. * @pdev: Handle to the platform device structure
  939. *
  940. * This function does all the memory allocation and registration for the CAN
  941. * device.
  942. *
  943. * Return: 0 on success and failure value on error
  944. */
  945. static int xcan_probe(struct platform_device *pdev)
  946. {
  947. struct resource *res; /* IO mem resources */
  948. struct net_device *ndev;
  949. struct xcan_priv *priv;
  950. void __iomem *addr;
  951. int ret, rx_max, tx_max;
  952. /* Get the virtual base address for the device */
  953. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  954. addr = devm_ioremap_resource(&pdev->dev, res);
  955. if (IS_ERR(addr)) {
  956. ret = PTR_ERR(addr);
  957. goto err;
  958. }
  959. ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
  960. if (ret < 0)
  961. goto err;
  962. ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max);
  963. if (ret < 0)
  964. goto err;
  965. /* Create a CAN device instance */
  966. ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
  967. if (!ndev)
  968. return -ENOMEM;
  969. priv = netdev_priv(ndev);
  970. priv->dev = &pdev->dev;
  971. priv->can.bittiming_const = &xcan_bittiming_const;
  972. priv->can.do_set_mode = xcan_do_set_mode;
  973. priv->can.do_get_berr_counter = xcan_get_berr_counter;
  974. priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
  975. CAN_CTRLMODE_BERR_REPORTING;
  976. priv->reg_base = addr;
  977. priv->tx_max = tx_max;
  978. /* Get IRQ for the device */
  979. ndev->irq = platform_get_irq(pdev, 0);
  980. ndev->flags |= IFF_ECHO; /* We support local echo */
  981. platform_set_drvdata(pdev, ndev);
  982. SET_NETDEV_DEV(ndev, &pdev->dev);
  983. ndev->netdev_ops = &xcan_netdev_ops;
  984. /* Getting the CAN can_clk info */
  985. priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
  986. if (IS_ERR(priv->can_clk)) {
  987. dev_err(&pdev->dev, "Device clock not found.\n");
  988. ret = PTR_ERR(priv->can_clk);
  989. goto err_free;
  990. }
  991. /* Check for type of CAN device */
  992. if (of_device_is_compatible(pdev->dev.of_node,
  993. "xlnx,zynq-can-1.0")) {
  994. priv->bus_clk = devm_clk_get(&pdev->dev, "pclk");
  995. if (IS_ERR(priv->bus_clk)) {
  996. dev_err(&pdev->dev, "bus clock not found\n");
  997. ret = PTR_ERR(priv->bus_clk);
  998. goto err_free;
  999. }
  1000. } else {
  1001. priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
  1002. if (IS_ERR(priv->bus_clk)) {
  1003. dev_err(&pdev->dev, "bus clock not found\n");
  1004. ret = PTR_ERR(priv->bus_clk);
  1005. goto err_free;
  1006. }
  1007. }
  1008. priv->write_reg = xcan_write_reg_le;
  1009. priv->read_reg = xcan_read_reg_le;
  1010. pm_runtime_enable(&pdev->dev);
  1011. ret = pm_runtime_get_sync(&pdev->dev);
  1012. if (ret < 0) {
  1013. netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
  1014. __func__, ret);
  1015. goto err_pmdisable;
  1016. }
  1017. if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
  1018. priv->write_reg = xcan_write_reg_be;
  1019. priv->read_reg = xcan_read_reg_be;
  1020. }
  1021. priv->can.clock.freq = clk_get_rate(priv->can_clk);
  1022. netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
  1023. ret = register_candev(ndev);
  1024. if (ret) {
  1025. dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
  1026. goto err_disableclks;
  1027. }
  1028. devm_can_led_init(ndev);
  1029. pm_runtime_put(&pdev->dev);
  1030. netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
  1031. priv->reg_base, ndev->irq, priv->can.clock.freq,
  1032. priv->tx_max);
  1033. return 0;
  1034. err_disableclks:
  1035. pm_runtime_put(priv->dev);
  1036. err_pmdisable:
  1037. pm_runtime_disable(&pdev->dev);
  1038. err_free:
  1039. free_candev(ndev);
  1040. err:
  1041. return ret;
  1042. }
  1043. /**
  1044. * xcan_remove - Unregister the device after releasing the resources
  1045. * @pdev: Handle to the platform device structure
  1046. *
  1047. * This function frees all the resources allocated to the device.
  1048. * Return: 0 always
  1049. */
  1050. static int xcan_remove(struct platform_device *pdev)
  1051. {
  1052. struct net_device *ndev = platform_get_drvdata(pdev);
  1053. struct xcan_priv *priv = netdev_priv(ndev);
  1054. unregister_candev(ndev);
  1055. pm_runtime_disable(&pdev->dev);
  1056. netif_napi_del(&priv->napi);
  1057. free_candev(ndev);
  1058. return 0;
  1059. }
  1060. /* Match table for OF platform binding */
  1061. static const struct of_device_id xcan_of_match[] = {
  1062. { .compatible = "xlnx,zynq-can-1.0", },
  1063. { .compatible = "xlnx,axi-can-1.00.a", },
  1064. { /* end of list */ },
  1065. };
  1066. MODULE_DEVICE_TABLE(of, xcan_of_match);
  1067. static struct platform_driver xcan_driver = {
  1068. .probe = xcan_probe,
  1069. .remove = xcan_remove,
  1070. .driver = {
  1071. .name = DRIVER_NAME,
  1072. .pm = &xcan_dev_pm_ops,
  1073. .of_match_table = xcan_of_match,
  1074. },
  1075. };
  1076. module_platform_driver(xcan_driver);
  1077. MODULE_LICENSE("GPL");
  1078. MODULE_AUTHOR("Xilinx Inc");
  1079. MODULE_DESCRIPTION("Xilinx CAN interface");