xilinx_can.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232
  1. /* Xilinx CAN device driver
  2. *
  3. * Copyright (C) 2012 - 2014 Xilinx, Inc.
  4. * Copyright (C) 2009 PetaLogix. All rights reserved.
  5. *
  6. * Description:
  7. * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
  8. * This program is free software: you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation, either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. */
  18. #include <linux/clk.h>
  19. #include <linux/errno.h>
  20. #include <linux/init.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/netdevice.h>
  26. #include <linux/of.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/skbuff.h>
  29. #include <linux/string.h>
  30. #include <linux/types.h>
  31. #include <linux/can/dev.h>
  32. #include <linux/can/error.h>
  33. #include <linux/can/led.h>
  34. #include <linux/pm_runtime.h>
  35. #define DRIVER_NAME "xilinx_can"
  36. /* CAN registers set */
  37. enum xcan_reg {
  38. XCAN_SRR_OFFSET = 0x00, /* Software reset */
  39. XCAN_MSR_OFFSET = 0x04, /* Mode select */
  40. XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */
  41. XCAN_BTR_OFFSET = 0x0C, /* Bit timing */
  42. XCAN_ECR_OFFSET = 0x10, /* Error counter */
  43. XCAN_ESR_OFFSET = 0x14, /* Error status */
  44. XCAN_SR_OFFSET = 0x18, /* Status */
  45. XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
  46. XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
  47. XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
  48. XCAN_TXFIFO_ID_OFFSET = 0x30,/* TX FIFO ID */
  49. XCAN_TXFIFO_DLC_OFFSET = 0x34, /* TX FIFO DLC */
  50. XCAN_TXFIFO_DW1_OFFSET = 0x38, /* TX FIFO Data Word 1 */
  51. XCAN_TXFIFO_DW2_OFFSET = 0x3C, /* TX FIFO Data Word 2 */
  52. XCAN_RXFIFO_ID_OFFSET = 0x50, /* RX FIFO ID */
  53. XCAN_RXFIFO_DLC_OFFSET = 0x54, /* RX FIFO DLC */
  54. XCAN_RXFIFO_DW1_OFFSET = 0x58, /* RX FIFO Data Word 1 */
  55. XCAN_RXFIFO_DW2_OFFSET = 0x5C, /* RX FIFO Data Word 2 */
  56. };
  57. /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
  58. #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
  59. #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
  60. #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
  61. #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
  62. #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
  63. #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
  64. #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
  65. #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
  66. #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
  67. #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
  68. #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
  69. #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */
  70. #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
  71. #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
  72. #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
  73. #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
  74. #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
  75. #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
  76. #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
  77. #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
  78. #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
  79. #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
  80. #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
  81. #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
  82. #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */
  83. #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */
  84. #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */
  85. #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */
  86. #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */
  87. #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
  88. #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
  89. #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
  90. #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
  91. #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
  92. #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
  93. #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
  94. #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
  95. #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
  96. #define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
  97. XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
  98. XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
  99. XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
  100. /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
  101. #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
  102. #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
  103. #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
  104. #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
  105. #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
  106. #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */
  107. /* CAN frame length constants */
  108. #define XCAN_FRAME_MAX_DATA_LEN 8
  109. #define XCAN_TIMEOUT (1 * HZ)
  110. /**
  111. * struct xcan_priv - This definition define CAN driver instance
  112. * @can: CAN private data structure.
  113. * @tx_head: Tx CAN packets ready to send on the queue
  114. * @tx_tail: Tx CAN packets successfully sended on the queue
  115. * @tx_max: Maximum number packets the driver can send
  116. * @napi: NAPI structure
  117. * @read_reg: For reading data from CAN registers
  118. * @write_reg: For writing data to CAN registers
  119. * @dev: Network device data structure
  120. * @reg_base: Ioremapped address to registers
  121. * @irq_flags: For request_irq()
  122. * @bus_clk: Pointer to struct clk
  123. * @can_clk: Pointer to struct clk
  124. */
  125. struct xcan_priv {
  126. struct can_priv can;
  127. unsigned int tx_head;
  128. unsigned int tx_tail;
  129. unsigned int tx_max;
  130. struct napi_struct napi;
  131. u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
  132. void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
  133. u32 val);
  134. struct device *dev;
  135. void __iomem *reg_base;
  136. unsigned long irq_flags;
  137. struct clk *bus_clk;
  138. struct clk *can_clk;
  139. };
  140. /* CAN Bittiming constants as per Xilinx CAN specs */
  141. static const struct can_bittiming_const xcan_bittiming_const = {
  142. .name = DRIVER_NAME,
  143. .tseg1_min = 1,
  144. .tseg1_max = 16,
  145. .tseg2_min = 1,
  146. .tseg2_max = 8,
  147. .sjw_max = 4,
  148. .brp_min = 1,
  149. .brp_max = 256,
  150. .brp_inc = 1,
  151. };
  152. /**
  153. * xcan_write_reg_le - Write a value to the device register little endian
  154. * @priv: Driver private data structure
  155. * @reg: Register offset
  156. * @val: Value to write at the Register offset
  157. *
  158. * Write data to the paricular CAN register
  159. */
  160. static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
  161. u32 val)
  162. {
  163. iowrite32(val, priv->reg_base + reg);
  164. }
  165. /**
  166. * xcan_read_reg_le - Read a value from the device register little endian
  167. * @priv: Driver private data structure
  168. * @reg: Register offset
  169. *
  170. * Read data from the particular CAN register
  171. * Return: value read from the CAN register
  172. */
  173. static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
  174. {
  175. return ioread32(priv->reg_base + reg);
  176. }
  177. /**
  178. * xcan_write_reg_be - Write a value to the device register big endian
  179. * @priv: Driver private data structure
  180. * @reg: Register offset
  181. * @val: Value to write at the Register offset
  182. *
  183. * Write data to the paricular CAN register
  184. */
  185. static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
  186. u32 val)
  187. {
  188. iowrite32be(val, priv->reg_base + reg);
  189. }
  190. /**
  191. * xcan_read_reg_be - Read a value from the device register big endian
  192. * @priv: Driver private data structure
  193. * @reg: Register offset
  194. *
  195. * Read data from the particular CAN register
  196. * Return: value read from the CAN register
  197. */
  198. static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
  199. {
  200. return ioread32be(priv->reg_base + reg);
  201. }
  202. /**
  203. * set_reset_mode - Resets the CAN device mode
  204. * @ndev: Pointer to net_device structure
  205. *
  206. * This is the driver reset mode routine.The driver
  207. * enters into configuration mode.
  208. *
  209. * Return: 0 on success and failure value on error
  210. */
  211. static int set_reset_mode(struct net_device *ndev)
  212. {
  213. struct xcan_priv *priv = netdev_priv(ndev);
  214. unsigned long timeout;
  215. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  216. timeout = jiffies + XCAN_TIMEOUT;
  217. while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
  218. if (time_after(jiffies, timeout)) {
  219. netdev_warn(ndev, "timed out for config mode\n");
  220. return -ETIMEDOUT;
  221. }
  222. usleep_range(500, 10000);
  223. }
  224. return 0;
  225. }
  226. /**
  227. * xcan_set_bittiming - CAN set bit timing routine
  228. * @ndev: Pointer to net_device structure
  229. *
  230. * This is the driver set bittiming routine.
  231. * Return: 0 on success and failure value on error
  232. */
  233. static int xcan_set_bittiming(struct net_device *ndev)
  234. {
  235. struct xcan_priv *priv = netdev_priv(ndev);
  236. struct can_bittiming *bt = &priv->can.bittiming;
  237. u32 btr0, btr1;
  238. u32 is_config_mode;
  239. /* Check whether Xilinx CAN is in configuration mode.
  240. * It cannot set bit timing if Xilinx CAN is not in configuration mode.
  241. */
  242. is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
  243. XCAN_SR_CONFIG_MASK;
  244. if (!is_config_mode) {
  245. netdev_alert(ndev,
  246. "BUG! Cannot set bittiming - CAN is not in config mode\n");
  247. return -EPERM;
  248. }
  249. /* Setting Baud Rate prescalar value in BRPR Register */
  250. btr0 = (bt->brp - 1);
  251. /* Setting Time Segment 1 in BTR Register */
  252. btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
  253. /* Setting Time Segment 2 in BTR Register */
  254. btr1 |= (bt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT;
  255. /* Setting Synchronous jump width in BTR Register */
  256. btr1 |= (bt->sjw - 1) << XCAN_BTR_SJW_SHIFT;
  257. priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
  258. priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
  259. netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
  260. priv->read_reg(priv, XCAN_BRPR_OFFSET),
  261. priv->read_reg(priv, XCAN_BTR_OFFSET));
  262. return 0;
  263. }
  264. /**
  265. * xcan_chip_start - This the drivers start routine
  266. * @ndev: Pointer to net_device structure
  267. *
  268. * This is the drivers start routine.
  269. * Based on the State of the CAN device it puts
  270. * the CAN device into a proper mode.
  271. *
  272. * Return: 0 on success and failure value on error
  273. */
  274. static int xcan_chip_start(struct net_device *ndev)
  275. {
  276. struct xcan_priv *priv = netdev_priv(ndev);
  277. u32 reg_msr, reg_sr_mask;
  278. int err;
  279. unsigned long timeout;
  280. /* Check if it is in reset mode */
  281. err = set_reset_mode(ndev);
  282. if (err < 0)
  283. return err;
  284. err = xcan_set_bittiming(ndev);
  285. if (err < 0)
  286. return err;
  287. /* Enable interrupts */
  288. priv->write_reg(priv, XCAN_IER_OFFSET, XCAN_INTR_ALL);
  289. /* Check whether it is loopback mode or normal mode */
  290. if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
  291. reg_msr = XCAN_MSR_LBACK_MASK;
  292. reg_sr_mask = XCAN_SR_LBACK_MASK;
  293. } else {
  294. reg_msr = 0x0;
  295. reg_sr_mask = XCAN_SR_NORMAL_MASK;
  296. }
  297. priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
  298. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
  299. timeout = jiffies + XCAN_TIMEOUT;
  300. while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
  301. if (time_after(jiffies, timeout)) {
  302. netdev_warn(ndev,
  303. "timed out for correct mode\n");
  304. return -ETIMEDOUT;
  305. }
  306. }
  307. netdev_dbg(ndev, "status:#x%08x\n",
  308. priv->read_reg(priv, XCAN_SR_OFFSET));
  309. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  310. return 0;
  311. }
  312. /**
  313. * xcan_do_set_mode - This sets the mode of the driver
  314. * @ndev: Pointer to net_device structure
  315. * @mode: Tells the mode of the driver
  316. *
  317. * This check the drivers state and calls the
  318. * the corresponding modes to set.
  319. *
  320. * Return: 0 on success and failure value on error
  321. */
  322. static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
  323. {
  324. int ret;
  325. switch (mode) {
  326. case CAN_MODE_START:
  327. ret = xcan_chip_start(ndev);
  328. if (ret < 0) {
  329. netdev_err(ndev, "xcan_chip_start failed!\n");
  330. return ret;
  331. }
  332. netif_wake_queue(ndev);
  333. break;
  334. default:
  335. ret = -EOPNOTSUPP;
  336. break;
  337. }
  338. return ret;
  339. }
  340. /**
  341. * xcan_start_xmit - Starts the transmission
  342. * @skb: sk_buff pointer that contains data to be Txed
  343. * @ndev: Pointer to net_device structure
  344. *
  345. * This function is invoked from upper layers to initiate transmission. This
  346. * function uses the next available free txbuff and populates their fields to
  347. * start the transmission.
  348. *
  349. * Return: 0 on success and failure value on error
  350. */
  351. static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  352. {
  353. struct xcan_priv *priv = netdev_priv(ndev);
  354. struct net_device_stats *stats = &ndev->stats;
  355. struct can_frame *cf = (struct can_frame *)skb->data;
  356. u32 id, dlc, data[2] = {0, 0};
  357. if (can_dropped_invalid_skb(ndev, skb))
  358. return NETDEV_TX_OK;
  359. /* Check if the TX buffer is full */
  360. if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
  361. XCAN_SR_TXFLL_MASK)) {
  362. netif_stop_queue(ndev);
  363. netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n");
  364. return NETDEV_TX_BUSY;
  365. }
  366. /* Watch carefully on the bit sequence */
  367. if (cf->can_id & CAN_EFF_FLAG) {
  368. /* Extended CAN ID format */
  369. id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
  370. XCAN_IDR_ID2_MASK;
  371. id |= (((cf->can_id & CAN_EFF_MASK) >>
  372. (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
  373. XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
  374. /* The substibute remote TX request bit should be "1"
  375. * for extended frames as in the Xilinx CAN datasheet
  376. */
  377. id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
  378. if (cf->can_id & CAN_RTR_FLAG)
  379. /* Extended frames remote TX request */
  380. id |= XCAN_IDR_RTR_MASK;
  381. } else {
  382. /* Standard CAN ID format */
  383. id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
  384. XCAN_IDR_ID1_MASK;
  385. if (cf->can_id & CAN_RTR_FLAG)
  386. /* Standard frames remote TX request */
  387. id |= XCAN_IDR_SRR_MASK;
  388. }
  389. dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT;
  390. if (cf->can_dlc > 0)
  391. data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
  392. if (cf->can_dlc > 4)
  393. data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
  394. can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
  395. priv->tx_head++;
  396. /* Write the Frame to Xilinx CAN TX FIFO */
  397. priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id);
  398. /* If the CAN frame is RTR frame this write triggers tranmission */
  399. priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc);
  400. if (!(cf->can_id & CAN_RTR_FLAG)) {
  401. priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]);
  402. /* If the CAN frame is Standard/Extended frame this
  403. * write triggers tranmission
  404. */
  405. priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]);
  406. stats->tx_bytes += cf->can_dlc;
  407. }
  408. /* Check if the TX buffer is full */
  409. if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
  410. netif_stop_queue(ndev);
  411. return NETDEV_TX_OK;
  412. }
  413. /**
  414. * xcan_rx - Is called from CAN isr to complete the received
  415. * frame processing
  416. * @ndev: Pointer to net_device structure
  417. *
  418. * This function is invoked from the CAN isr(poll) to process the Rx frames. It
  419. * does minimal processing and invokes "netif_receive_skb" to complete further
  420. * processing.
  421. * Return: 1 on success and 0 on failure.
  422. */
  423. static int xcan_rx(struct net_device *ndev)
  424. {
  425. struct xcan_priv *priv = netdev_priv(ndev);
  426. struct net_device_stats *stats = &ndev->stats;
  427. struct can_frame *cf;
  428. struct sk_buff *skb;
  429. u32 id_xcan, dlc, data[2] = {0, 0};
  430. skb = alloc_can_skb(ndev, &cf);
  431. if (unlikely(!skb)) {
  432. stats->rx_dropped++;
  433. return 0;
  434. }
  435. /* Read a frame from Xilinx zynq CANPS */
  436. id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET);
  437. dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >>
  438. XCAN_DLCR_DLC_SHIFT;
  439. /* Change Xilinx CAN data length format to socketCAN data format */
  440. cf->can_dlc = get_can_dlc(dlc);
  441. /* Change Xilinx CAN ID format to socketCAN ID format */
  442. if (id_xcan & XCAN_IDR_IDE_MASK) {
  443. /* The received frame is an Extended format frame */
  444. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
  445. cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
  446. XCAN_IDR_ID2_SHIFT;
  447. cf->can_id |= CAN_EFF_FLAG;
  448. if (id_xcan & XCAN_IDR_RTR_MASK)
  449. cf->can_id |= CAN_RTR_FLAG;
  450. } else {
  451. /* The received frame is a standard format frame */
  452. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
  453. XCAN_IDR_ID1_SHIFT;
  454. if (id_xcan & XCAN_IDR_SRR_MASK)
  455. cf->can_id |= CAN_RTR_FLAG;
  456. }
  457. /* DW1/DW2 must always be read to remove message from RXFIFO */
  458. data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
  459. data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
  460. if (!(cf->can_id & CAN_RTR_FLAG)) {
  461. /* Change Xilinx CAN data format to socketCAN data format */
  462. if (cf->can_dlc > 0)
  463. *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
  464. if (cf->can_dlc > 4)
  465. *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
  466. }
  467. stats->rx_bytes += cf->can_dlc;
  468. stats->rx_packets++;
  469. netif_receive_skb(skb);
  470. return 1;
  471. }
  472. /**
  473. * xcan_err_interrupt - error frame Isr
  474. * @ndev: net_device pointer
  475. * @isr: interrupt status register value
  476. *
  477. * This is the CAN error interrupt and it will
  478. * check the the type of error and forward the error
  479. * frame to upper layers.
  480. */
  481. static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
  482. {
  483. struct xcan_priv *priv = netdev_priv(ndev);
  484. struct net_device_stats *stats = &ndev->stats;
  485. struct can_frame *cf;
  486. struct sk_buff *skb;
  487. u32 err_status, status, txerr = 0, rxerr = 0;
  488. skb = alloc_can_err_skb(ndev, &cf);
  489. err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
  490. priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
  491. txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
  492. rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
  493. XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
  494. status = priv->read_reg(priv, XCAN_SR_OFFSET);
  495. if (isr & XCAN_IXR_BSOFF_MASK) {
  496. priv->can.state = CAN_STATE_BUS_OFF;
  497. priv->can.can_stats.bus_off++;
  498. /* Leave device in Config Mode in bus-off state */
  499. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  500. can_bus_off(ndev);
  501. if (skb)
  502. cf->can_id |= CAN_ERR_BUSOFF;
  503. } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
  504. priv->can.state = CAN_STATE_ERROR_PASSIVE;
  505. priv->can.can_stats.error_passive++;
  506. if (skb) {
  507. cf->can_id |= CAN_ERR_CRTL;
  508. cf->data[1] = (rxerr > 127) ?
  509. CAN_ERR_CRTL_RX_PASSIVE :
  510. CAN_ERR_CRTL_TX_PASSIVE;
  511. cf->data[6] = txerr;
  512. cf->data[7] = rxerr;
  513. }
  514. } else if (status & XCAN_SR_ERRWRN_MASK) {
  515. priv->can.state = CAN_STATE_ERROR_WARNING;
  516. priv->can.can_stats.error_warning++;
  517. if (skb) {
  518. cf->can_id |= CAN_ERR_CRTL;
  519. cf->data[1] |= (txerr > rxerr) ?
  520. CAN_ERR_CRTL_TX_WARNING :
  521. CAN_ERR_CRTL_RX_WARNING;
  522. cf->data[6] = txerr;
  523. cf->data[7] = rxerr;
  524. }
  525. }
  526. /* Check for Arbitration lost interrupt */
  527. if (isr & XCAN_IXR_ARBLST_MASK) {
  528. priv->can.can_stats.arbitration_lost++;
  529. if (skb) {
  530. cf->can_id |= CAN_ERR_LOSTARB;
  531. cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
  532. }
  533. }
  534. /* Check for RX FIFO Overflow interrupt */
  535. if (isr & XCAN_IXR_RXOFLW_MASK) {
  536. stats->rx_over_errors++;
  537. stats->rx_errors++;
  538. if (skb) {
  539. cf->can_id |= CAN_ERR_CRTL;
  540. cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
  541. }
  542. }
  543. /* Check for error interrupt */
  544. if (isr & XCAN_IXR_ERROR_MASK) {
  545. if (skb)
  546. cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
  547. /* Check for Ack error interrupt */
  548. if (err_status & XCAN_ESR_ACKER_MASK) {
  549. stats->tx_errors++;
  550. if (skb) {
  551. cf->can_id |= CAN_ERR_ACK;
  552. cf->data[3] = CAN_ERR_PROT_LOC_ACK;
  553. }
  554. }
  555. /* Check for Bit error interrupt */
  556. if (err_status & XCAN_ESR_BERR_MASK) {
  557. stats->tx_errors++;
  558. if (skb) {
  559. cf->can_id |= CAN_ERR_PROT;
  560. cf->data[2] = CAN_ERR_PROT_BIT;
  561. }
  562. }
  563. /* Check for Stuff error interrupt */
  564. if (err_status & XCAN_ESR_STER_MASK) {
  565. stats->rx_errors++;
  566. if (skb) {
  567. cf->can_id |= CAN_ERR_PROT;
  568. cf->data[2] = CAN_ERR_PROT_STUFF;
  569. }
  570. }
  571. /* Check for Form error interrupt */
  572. if (err_status & XCAN_ESR_FMER_MASK) {
  573. stats->rx_errors++;
  574. if (skb) {
  575. cf->can_id |= CAN_ERR_PROT;
  576. cf->data[2] = CAN_ERR_PROT_FORM;
  577. }
  578. }
  579. /* Check for CRC error interrupt */
  580. if (err_status & XCAN_ESR_CRCER_MASK) {
  581. stats->rx_errors++;
  582. if (skb) {
  583. cf->can_id |= CAN_ERR_PROT;
  584. cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
  585. }
  586. }
  587. priv->can.can_stats.bus_error++;
  588. }
  589. if (skb) {
  590. stats->rx_packets++;
  591. stats->rx_bytes += cf->can_dlc;
  592. netif_rx(skb);
  593. }
  594. netdev_dbg(ndev, "%s: error status register:0x%x\n",
  595. __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
  596. }
  597. /**
  598. * xcan_state_interrupt - It will check the state of the CAN device
  599. * @ndev: net_device pointer
  600. * @isr: interrupt status register value
  601. *
  602. * This will checks the state of the CAN device
  603. * and puts the device into appropriate state.
  604. */
  605. static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
  606. {
  607. struct xcan_priv *priv = netdev_priv(ndev);
  608. /* Check for Sleep interrupt if set put CAN device in sleep state */
  609. if (isr & XCAN_IXR_SLP_MASK)
  610. priv->can.state = CAN_STATE_SLEEPING;
  611. /* Check for Wake up interrupt if set put CAN device in Active state */
  612. if (isr & XCAN_IXR_WKUP_MASK)
  613. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  614. }
  615. /**
  616. * xcan_rx_poll - Poll routine for rx packets (NAPI)
  617. * @napi: napi structure pointer
  618. * @quota: Max number of rx packets to be processed.
  619. *
  620. * This is the poll routine for rx part.
  621. * It will process the packets maximux quota value.
  622. *
  623. * Return: number of packets received
  624. */
  625. static int xcan_rx_poll(struct napi_struct *napi, int quota)
  626. {
  627. struct net_device *ndev = napi->dev;
  628. struct xcan_priv *priv = netdev_priv(ndev);
  629. u32 isr, ier;
  630. int work_done = 0;
  631. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  632. while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
  633. if (isr & XCAN_IXR_RXOK_MASK) {
  634. priv->write_reg(priv, XCAN_ICR_OFFSET,
  635. XCAN_IXR_RXOK_MASK);
  636. work_done += xcan_rx(ndev);
  637. } else {
  638. priv->write_reg(priv, XCAN_ICR_OFFSET,
  639. XCAN_IXR_RXNEMP_MASK);
  640. break;
  641. }
  642. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
  643. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  644. }
  645. if (work_done)
  646. can_led_event(ndev, CAN_LED_EVENT_RX);
  647. if (work_done < quota) {
  648. napi_complete_done(napi, work_done);
  649. ier = priv->read_reg(priv, XCAN_IER_OFFSET);
  650. ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
  651. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  652. }
  653. return work_done;
  654. }
  655. /**
  656. * xcan_tx_interrupt - Tx Done Isr
  657. * @ndev: net_device pointer
  658. * @isr: Interrupt status register value
  659. */
  660. static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
  661. {
  662. struct xcan_priv *priv = netdev_priv(ndev);
  663. struct net_device_stats *stats = &ndev->stats;
  664. while ((priv->tx_head - priv->tx_tail > 0) &&
  665. (isr & XCAN_IXR_TXOK_MASK)) {
  666. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
  667. can_get_echo_skb(ndev, priv->tx_tail %
  668. priv->tx_max);
  669. priv->tx_tail++;
  670. stats->tx_packets++;
  671. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  672. }
  673. can_led_event(ndev, CAN_LED_EVENT_TX);
  674. netif_wake_queue(ndev);
  675. }
  676. /**
  677. * xcan_interrupt - CAN Isr
  678. * @irq: irq number
  679. * @dev_id: device id poniter
  680. *
  681. * This is the xilinx CAN Isr. It checks for the type of interrupt
  682. * and invokes the corresponding ISR.
  683. *
  684. * Return:
  685. * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
  686. */
  687. static irqreturn_t xcan_interrupt(int irq, void *dev_id)
  688. {
  689. struct net_device *ndev = (struct net_device *)dev_id;
  690. struct xcan_priv *priv = netdev_priv(ndev);
  691. u32 isr, ier;
  692. /* Get the interrupt status from Xilinx CAN */
  693. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  694. if (!isr)
  695. return IRQ_NONE;
  696. /* Check for the type of interrupt and Processing it */
  697. if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
  698. priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
  699. XCAN_IXR_WKUP_MASK));
  700. xcan_state_interrupt(ndev, isr);
  701. }
  702. /* Check for Tx interrupt and Processing it */
  703. if (isr & XCAN_IXR_TXOK_MASK)
  704. xcan_tx_interrupt(ndev, isr);
  705. /* Check for the type of error interrupt and Processing it */
  706. if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
  707. XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
  708. priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
  709. XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
  710. XCAN_IXR_ARBLST_MASK));
  711. xcan_err_interrupt(ndev, isr);
  712. }
  713. /* Check for the type of receive interrupt and Processing it */
  714. if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
  715. ier = priv->read_reg(priv, XCAN_IER_OFFSET);
  716. ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
  717. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  718. napi_schedule(&priv->napi);
  719. }
  720. return IRQ_HANDLED;
  721. }
  722. /**
  723. * xcan_chip_stop - Driver stop routine
  724. * @ndev: Pointer to net_device structure
  725. *
  726. * This is the drivers stop routine. It will disable the
  727. * interrupts and put the device into configuration mode.
  728. */
  729. static void xcan_chip_stop(struct net_device *ndev)
  730. {
  731. struct xcan_priv *priv = netdev_priv(ndev);
  732. u32 ier;
  733. /* Disable interrupts and leave the can in configuration mode */
  734. ier = priv->read_reg(priv, XCAN_IER_OFFSET);
  735. ier &= ~XCAN_INTR_ALL;
  736. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  737. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  738. priv->can.state = CAN_STATE_STOPPED;
  739. }
  740. /**
  741. * xcan_open - Driver open routine
  742. * @ndev: Pointer to net_device structure
  743. *
  744. * This is the driver open routine.
  745. * Return: 0 on success and failure value on error
  746. */
  747. static int xcan_open(struct net_device *ndev)
  748. {
  749. struct xcan_priv *priv = netdev_priv(ndev);
  750. int ret;
  751. ret = pm_runtime_get_sync(priv->dev);
  752. if (ret < 0) {
  753. netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
  754. __func__, ret);
  755. return ret;
  756. }
  757. ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
  758. ndev->name, ndev);
  759. if (ret < 0) {
  760. netdev_err(ndev, "irq allocation for CAN failed\n");
  761. goto err;
  762. }
  763. /* Set chip into reset mode */
  764. ret = set_reset_mode(ndev);
  765. if (ret < 0) {
  766. netdev_err(ndev, "mode resetting failed!\n");
  767. goto err_irq;
  768. }
  769. /* Common open */
  770. ret = open_candev(ndev);
  771. if (ret)
  772. goto err_irq;
  773. ret = xcan_chip_start(ndev);
  774. if (ret < 0) {
  775. netdev_err(ndev, "xcan_chip_start failed!\n");
  776. goto err_candev;
  777. }
  778. can_led_event(ndev, CAN_LED_EVENT_OPEN);
  779. napi_enable(&priv->napi);
  780. netif_start_queue(ndev);
  781. return 0;
  782. err_candev:
  783. close_candev(ndev);
  784. err_irq:
  785. free_irq(ndev->irq, ndev);
  786. err:
  787. pm_runtime_put(priv->dev);
  788. return ret;
  789. }
  790. /**
  791. * xcan_close - Driver close routine
  792. * @ndev: Pointer to net_device structure
  793. *
  794. * Return: 0 always
  795. */
  796. static int xcan_close(struct net_device *ndev)
  797. {
  798. struct xcan_priv *priv = netdev_priv(ndev);
  799. netif_stop_queue(ndev);
  800. napi_disable(&priv->napi);
  801. xcan_chip_stop(ndev);
  802. free_irq(ndev->irq, ndev);
  803. close_candev(ndev);
  804. can_led_event(ndev, CAN_LED_EVENT_STOP);
  805. pm_runtime_put(priv->dev);
  806. return 0;
  807. }
  808. /**
  809. * xcan_get_berr_counter - error counter routine
  810. * @ndev: Pointer to net_device structure
  811. * @bec: Pointer to can_berr_counter structure
  812. *
  813. * This is the driver error counter routine.
  814. * Return: 0 on success and failure value on error
  815. */
  816. static int xcan_get_berr_counter(const struct net_device *ndev,
  817. struct can_berr_counter *bec)
  818. {
  819. struct xcan_priv *priv = netdev_priv(ndev);
  820. int ret;
  821. ret = pm_runtime_get_sync(priv->dev);
  822. if (ret < 0) {
  823. netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
  824. __func__, ret);
  825. return ret;
  826. }
  827. bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
  828. bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
  829. XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
  830. pm_runtime_put(priv->dev);
  831. return 0;
  832. }
  833. static const struct net_device_ops xcan_netdev_ops = {
  834. .ndo_open = xcan_open,
  835. .ndo_stop = xcan_close,
  836. .ndo_start_xmit = xcan_start_xmit,
  837. .ndo_change_mtu = can_change_mtu,
  838. };
  839. /**
  840. * xcan_suspend - Suspend method for the driver
  841. * @dev: Address of the device structure
  842. *
  843. * Put the driver into low power mode.
  844. * Return: 0 on success and failure value on error
  845. */
  846. static int __maybe_unused xcan_suspend(struct device *dev)
  847. {
  848. if (!device_may_wakeup(dev))
  849. return pm_runtime_force_suspend(dev);
  850. return 0;
  851. }
  852. /**
  853. * xcan_resume - Resume from suspend
  854. * @dev: Address of the device structure
  855. *
  856. * Resume operation after suspend.
  857. * Return: 0 on success and failure value on error
  858. */
  859. static int __maybe_unused xcan_resume(struct device *dev)
  860. {
  861. if (!device_may_wakeup(dev))
  862. return pm_runtime_force_resume(dev);
  863. return 0;
  864. }
  865. /**
  866. * xcan_runtime_suspend - Runtime suspend method for the driver
  867. * @dev: Address of the device structure
  868. *
  869. * Put the driver into low power mode.
  870. * Return: 0 always
  871. */
  872. static int __maybe_unused xcan_runtime_suspend(struct device *dev)
  873. {
  874. struct net_device *ndev = dev_get_drvdata(dev);
  875. struct xcan_priv *priv = netdev_priv(ndev);
  876. if (netif_running(ndev)) {
  877. netif_stop_queue(ndev);
  878. netif_device_detach(ndev);
  879. }
  880. priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
  881. priv->can.state = CAN_STATE_SLEEPING;
  882. clk_disable_unprepare(priv->bus_clk);
  883. clk_disable_unprepare(priv->can_clk);
  884. return 0;
  885. }
  886. /**
  887. * xcan_runtime_resume - Runtime resume from suspend
  888. * @dev: Address of the device structure
  889. *
  890. * Resume operation after suspend.
  891. * Return: 0 on success and failure value on error
  892. */
  893. static int __maybe_unused xcan_runtime_resume(struct device *dev)
  894. {
  895. struct net_device *ndev = dev_get_drvdata(dev);
  896. struct xcan_priv *priv = netdev_priv(ndev);
  897. int ret;
  898. u32 isr, status;
  899. ret = clk_prepare_enable(priv->bus_clk);
  900. if (ret) {
  901. dev_err(dev, "Cannot enable clock.\n");
  902. return ret;
  903. }
  904. ret = clk_prepare_enable(priv->can_clk);
  905. if (ret) {
  906. dev_err(dev, "Cannot enable clock.\n");
  907. clk_disable_unprepare(priv->bus_clk);
  908. return ret;
  909. }
  910. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  911. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  912. status = priv->read_reg(priv, XCAN_SR_OFFSET);
  913. if (netif_running(ndev)) {
  914. if (isr & XCAN_IXR_BSOFF_MASK) {
  915. priv->can.state = CAN_STATE_BUS_OFF;
  916. priv->write_reg(priv, XCAN_SRR_OFFSET,
  917. XCAN_SRR_RESET_MASK);
  918. } else if ((status & XCAN_SR_ESTAT_MASK) ==
  919. XCAN_SR_ESTAT_MASK) {
  920. priv->can.state = CAN_STATE_ERROR_PASSIVE;
  921. } else if (status & XCAN_SR_ERRWRN_MASK) {
  922. priv->can.state = CAN_STATE_ERROR_WARNING;
  923. } else {
  924. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  925. }
  926. netif_device_attach(ndev);
  927. netif_start_queue(ndev);
  928. }
  929. return 0;
  930. }
  931. static const struct dev_pm_ops xcan_dev_pm_ops = {
  932. SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
  933. SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
  934. };
  935. /**
  936. * xcan_probe - Platform registration call
  937. * @pdev: Handle to the platform device structure
  938. *
  939. * This function does all the memory allocation and registration for the CAN
  940. * device.
  941. *
  942. * Return: 0 on success and failure value on error
  943. */
  944. static int xcan_probe(struct platform_device *pdev)
  945. {
  946. struct resource *res; /* IO mem resources */
  947. struct net_device *ndev;
  948. struct xcan_priv *priv;
  949. void __iomem *addr;
  950. int ret, rx_max, tx_max;
  951. /* Get the virtual base address for the device */
  952. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  953. addr = devm_ioremap_resource(&pdev->dev, res);
  954. if (IS_ERR(addr)) {
  955. ret = PTR_ERR(addr);
  956. goto err;
  957. }
  958. ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
  959. if (ret < 0)
  960. goto err;
  961. ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max);
  962. if (ret < 0)
  963. goto err;
  964. /* Create a CAN device instance */
  965. ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
  966. if (!ndev)
  967. return -ENOMEM;
  968. priv = netdev_priv(ndev);
  969. priv->dev = &pdev->dev;
  970. priv->can.bittiming_const = &xcan_bittiming_const;
  971. priv->can.do_set_mode = xcan_do_set_mode;
  972. priv->can.do_get_berr_counter = xcan_get_berr_counter;
  973. priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
  974. CAN_CTRLMODE_BERR_REPORTING;
  975. priv->reg_base = addr;
  976. priv->tx_max = tx_max;
  977. /* Get IRQ for the device */
  978. ndev->irq = platform_get_irq(pdev, 0);
  979. ndev->flags |= IFF_ECHO; /* We support local echo */
  980. platform_set_drvdata(pdev, ndev);
  981. SET_NETDEV_DEV(ndev, &pdev->dev);
  982. ndev->netdev_ops = &xcan_netdev_ops;
  983. /* Getting the CAN can_clk info */
  984. priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
  985. if (IS_ERR(priv->can_clk)) {
  986. dev_err(&pdev->dev, "Device clock not found.\n");
  987. ret = PTR_ERR(priv->can_clk);
  988. goto err_free;
  989. }
  990. /* Check for type of CAN device */
  991. if (of_device_is_compatible(pdev->dev.of_node,
  992. "xlnx,zynq-can-1.0")) {
  993. priv->bus_clk = devm_clk_get(&pdev->dev, "pclk");
  994. if (IS_ERR(priv->bus_clk)) {
  995. dev_err(&pdev->dev, "bus clock not found\n");
  996. ret = PTR_ERR(priv->bus_clk);
  997. goto err_free;
  998. }
  999. } else {
  1000. priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
  1001. if (IS_ERR(priv->bus_clk)) {
  1002. dev_err(&pdev->dev, "bus clock not found\n");
  1003. ret = PTR_ERR(priv->bus_clk);
  1004. goto err_free;
  1005. }
  1006. }
  1007. priv->write_reg = xcan_write_reg_le;
  1008. priv->read_reg = xcan_read_reg_le;
  1009. pm_runtime_enable(&pdev->dev);
  1010. ret = pm_runtime_get_sync(&pdev->dev);
  1011. if (ret < 0) {
  1012. netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
  1013. __func__, ret);
  1014. goto err_pmdisable;
  1015. }
  1016. if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
  1017. priv->write_reg = xcan_write_reg_be;
  1018. priv->read_reg = xcan_read_reg_be;
  1019. }
  1020. priv->can.clock.freq = clk_get_rate(priv->can_clk);
  1021. netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
  1022. ret = register_candev(ndev);
  1023. if (ret) {
  1024. dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
  1025. goto err_disableclks;
  1026. }
  1027. devm_can_led_init(ndev);
  1028. pm_runtime_put(&pdev->dev);
  1029. netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
  1030. priv->reg_base, ndev->irq, priv->can.clock.freq,
  1031. priv->tx_max);
  1032. return 0;
  1033. err_disableclks:
  1034. pm_runtime_put(priv->dev);
  1035. err_pmdisable:
  1036. pm_runtime_disable(&pdev->dev);
  1037. err_free:
  1038. free_candev(ndev);
  1039. err:
  1040. return ret;
  1041. }
  1042. /**
  1043. * xcan_remove - Unregister the device after releasing the resources
  1044. * @pdev: Handle to the platform device structure
  1045. *
  1046. * This function frees all the resources allocated to the device.
  1047. * Return: 0 always
  1048. */
  1049. static int xcan_remove(struct platform_device *pdev)
  1050. {
  1051. struct net_device *ndev = platform_get_drvdata(pdev);
  1052. struct xcan_priv *priv = netdev_priv(ndev);
  1053. unregister_candev(ndev);
  1054. pm_runtime_disable(&pdev->dev);
  1055. netif_napi_del(&priv->napi);
  1056. free_candev(ndev);
  1057. return 0;
  1058. }
  1059. /* Match table for OF platform binding */
  1060. static const struct of_device_id xcan_of_match[] = {
  1061. { .compatible = "xlnx,zynq-can-1.0", },
  1062. { .compatible = "xlnx,axi-can-1.00.a", },
  1063. { /* end of list */ },
  1064. };
  1065. MODULE_DEVICE_TABLE(of, xcan_of_match);
  1066. static struct platform_driver xcan_driver = {
  1067. .probe = xcan_probe,
  1068. .remove = xcan_remove,
  1069. .driver = {
  1070. .name = DRIVER_NAME,
  1071. .pm = &xcan_dev_pm_ops,
  1072. .of_match_table = xcan_of_match,
  1073. },
  1074. };
  1075. module_platform_driver(xcan_driver);
  1076. MODULE_LICENSE("GPL");
  1077. MODULE_AUTHOR("Xilinx Inc");
  1078. MODULE_DESCRIPTION("Xilinx CAN interface");