smd.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327
  1. /*
  2. * Copyright (c) 2015, Sony Mobile Communications AB.
  3. * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/interrupt.h>
  15. #include <linux/io.h>
  16. #include <linux/mfd/syscon.h>
  17. #include <linux/module.h>
  18. #include <linux/of_irq.h>
  19. #include <linux/of_platform.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/regmap.h>
  22. #include <linux/sched.h>
  23. #include <linux/slab.h>
  24. #include <linux/soc/qcom/smd.h>
  25. #include <linux/soc/qcom/smem.h>
  26. #include <linux/wait.h>
  27. /*
  28. * The Qualcomm Shared Memory communication solution provides point-to-point
  29. * channels for clients to send and receive streaming or packet based data.
  30. *
  31. * Each channel consists of a control item (channel info) and a ring buffer
  32. * pair. The channel info carry information related to channel state, flow
  33. * control and the offsets within the ring buffer.
  34. *
  35. * All allocated channels are listed in an allocation table, identifying the
  36. * pair of items by name, type and remote processor.
  37. *
  38. * Upon creating a new channel the remote processor allocates channel info and
  39. * ring buffer items from the smem heap and populate the allocation table. An
  40. * interrupt is sent to the other end of the channel and a scan for new
  41. * channels should be done. A channel never goes away, it will only change
  42. * state.
  43. *
  44. * The remote processor signals it intent for bring up the communication
  45. * channel by setting the state of its end of the channel to "opening" and
  46. * sends out an interrupt. We detect this change and register a smd device to
  47. * consume the channel. Upon finding a consumer we finish the handshake and the
  48. * channel is up.
  49. *
  50. * Upon closing a channel, the remote processor will update the state of its
  51. * end of the channel and signal us, we will then unregister any attached
  52. * device and close our end of the channel.
  53. *
  54. * Devices attached to a channel can use the qcom_smd_send function to push
  55. * data to the channel, this is done by copying the data into the tx ring
  56. * buffer, updating the pointers in the channel info and signaling the remote
  57. * processor.
  58. *
  59. * The remote processor does the equivalent when it transfer data and upon
  60. * receiving the interrupt we check the channel info for new data and delivers
  61. * this to the attached device. If the device is not ready to receive the data
  62. * we leave it in the ring buffer for now.
  63. */
  64. struct smd_channel_info;
  65. struct smd_channel_info_word;
  66. #define SMD_ALLOC_TBL_COUNT 2
  67. #define SMD_ALLOC_TBL_SIZE 64
  68. /*
  69. * This lists the various smem heap items relevant for the allocation table and
  70. * smd channel entries.
  71. */
  72. static const struct {
  73. unsigned alloc_tbl_id;
  74. unsigned info_base_id;
  75. unsigned fifo_base_id;
  76. } smem_items[SMD_ALLOC_TBL_COUNT] = {
  77. {
  78. .alloc_tbl_id = 13,
  79. .info_base_id = 14,
  80. .fifo_base_id = 338
  81. },
  82. {
  83. .alloc_tbl_id = 14,
  84. .info_base_id = 266,
  85. .fifo_base_id = 202,
  86. },
  87. };
  88. /**
  89. * struct qcom_smd_edge - representing a remote processor
  90. * @smd: handle to qcom_smd
  91. * @of_node: of_node handle for information related to this edge
  92. * @edge_id: identifier of this edge
  93. * @remote_pid: identifier of remote processor
  94. * @irq: interrupt for signals on this edge
  95. * @ipc_regmap: regmap handle holding the outgoing ipc register
  96. * @ipc_offset: offset within @ipc_regmap of the register for ipc
  97. * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap
  98. * @channels: list of all channels detected on this edge
  99. * @channels_lock: guard for modifications of @channels
  100. * @allocated: array of bitmaps representing already allocated channels
  101. * @need_rescan: flag that the @work needs to scan smem for new channels
  102. * @smem_available: last available amount of smem triggering a channel scan
  103. * @work: work item for edge house keeping
  104. */
  105. struct qcom_smd_edge {
  106. struct qcom_smd *smd;
  107. struct device_node *of_node;
  108. unsigned edge_id;
  109. unsigned remote_pid;
  110. int irq;
  111. struct regmap *ipc_regmap;
  112. int ipc_offset;
  113. int ipc_bit;
  114. struct list_head channels;
  115. spinlock_t channels_lock;
  116. DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
  117. bool need_rescan;
  118. unsigned smem_available;
  119. struct work_struct work;
  120. };
  121. /*
  122. * SMD channel states.
  123. */
  124. enum smd_channel_state {
  125. SMD_CHANNEL_CLOSED,
  126. SMD_CHANNEL_OPENING,
  127. SMD_CHANNEL_OPENED,
  128. SMD_CHANNEL_FLUSHING,
  129. SMD_CHANNEL_CLOSING,
  130. SMD_CHANNEL_RESET,
  131. SMD_CHANNEL_RESET_OPENING
  132. };
  133. /**
  134. * struct qcom_smd_channel - smd channel struct
  135. * @edge: qcom_smd_edge this channel is living on
  136. * @qsdev: reference to a associated smd client device
  137. * @name: name of the channel
  138. * @state: local state of the channel
  139. * @remote_state: remote state of the channel
  140. * @tx_info: byte aligned outgoing channel info
  141. * @rx_info: byte aligned incoming channel info
  142. * @tx_info_word: word aligned outgoing channel info
  143. * @rx_info_word: word aligned incoming channel info
  144. * @tx_lock: lock to make writes to the channel mutually exclusive
  145. * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR
  146. * @tx_fifo: pointer to the outgoing ring buffer
  147. * @rx_fifo: pointer to the incoming ring buffer
  148. * @fifo_size: size of each ring buffer
  149. * @bounce_buffer: bounce buffer for reading wrapped packets
  150. * @cb: callback function registered for this channel
  151. * @recv_lock: guard for rx info modifications and cb pointer
  152. * @pkt_size: size of the currently handled packet
  153. * @list: lite entry for @channels in qcom_smd_edge
  154. */
  155. struct qcom_smd_channel {
  156. struct qcom_smd_edge *edge;
  157. struct qcom_smd_device *qsdev;
  158. char *name;
  159. enum smd_channel_state state;
  160. enum smd_channel_state remote_state;
  161. struct smd_channel_info *tx_info;
  162. struct smd_channel_info *rx_info;
  163. struct smd_channel_info_word *tx_info_word;
  164. struct smd_channel_info_word *rx_info_word;
  165. struct mutex tx_lock;
  166. wait_queue_head_t fblockread_event;
  167. void *tx_fifo;
  168. void *rx_fifo;
  169. int fifo_size;
  170. void *bounce_buffer;
  171. int (*cb)(struct qcom_smd_device *, const void *, size_t);
  172. spinlock_t recv_lock;
  173. int pkt_size;
  174. struct list_head list;
  175. };
  176. /**
  177. * struct qcom_smd - smd struct
  178. * @dev: device struct
  179. * @num_edges: number of entries in @edges
  180. * @edges: array of edges to be handled
  181. */
  182. struct qcom_smd {
  183. struct device *dev;
  184. unsigned num_edges;
  185. struct qcom_smd_edge edges[0];
  186. };
  187. /*
  188. * Format of the smd_info smem items, for byte aligned channels.
  189. */
  190. struct smd_channel_info {
  191. u32 state;
  192. u8 fDSR;
  193. u8 fCTS;
  194. u8 fCD;
  195. u8 fRI;
  196. u8 fHEAD;
  197. u8 fTAIL;
  198. u8 fSTATE;
  199. u8 fBLOCKREADINTR;
  200. u32 tail;
  201. u32 head;
  202. };
  203. /*
  204. * Format of the smd_info smem items, for word aligned channels.
  205. */
  206. struct smd_channel_info_word {
  207. u32 state;
  208. u32 fDSR;
  209. u32 fCTS;
  210. u32 fCD;
  211. u32 fRI;
  212. u32 fHEAD;
  213. u32 fTAIL;
  214. u32 fSTATE;
  215. u32 fBLOCKREADINTR;
  216. u32 tail;
  217. u32 head;
  218. };
  219. #define GET_RX_CHANNEL_INFO(channel, param) \
  220. (channel->rx_info_word ? \
  221. channel->rx_info_word->param : \
  222. channel->rx_info->param)
  223. #define SET_RX_CHANNEL_INFO(channel, param, value) \
  224. (channel->rx_info_word ? \
  225. (channel->rx_info_word->param = value) : \
  226. (channel->rx_info->param = value))
  227. #define GET_TX_CHANNEL_INFO(channel, param) \
  228. (channel->tx_info_word ? \
  229. channel->tx_info_word->param : \
  230. channel->tx_info->param)
  231. #define SET_TX_CHANNEL_INFO(channel, param, value) \
  232. (channel->tx_info_word ? \
  233. (channel->tx_info_word->param = value) : \
  234. (channel->tx_info->param = value))
  235. /**
  236. * struct qcom_smd_alloc_entry - channel allocation entry
  237. * @name: channel name
  238. * @cid: channel index
  239. * @flags: channel flags and edge id
  240. * @ref_count: reference count of the channel
  241. */
  242. struct qcom_smd_alloc_entry {
  243. u8 name[20];
  244. u32 cid;
  245. u32 flags;
  246. u32 ref_count;
  247. } __packed;
  248. #define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff
  249. #define SMD_CHANNEL_FLAGS_STREAM BIT(8)
  250. #define SMD_CHANNEL_FLAGS_PACKET BIT(9)
  251. /*
  252. * Each smd packet contains a 20 byte header, with the first 4 being the length
  253. * of the packet.
  254. */
  255. #define SMD_PACKET_HEADER_LEN 20
  256. /*
  257. * Signal the remote processor associated with 'channel'.
  258. */
  259. static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
  260. {
  261. struct qcom_smd_edge *edge = channel->edge;
  262. regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit));
  263. }
  264. /*
  265. * Initialize the tx channel info
  266. */
  267. static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
  268. {
  269. SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
  270. SET_TX_CHANNEL_INFO(channel, fDSR, 0);
  271. SET_TX_CHANNEL_INFO(channel, fCTS, 0);
  272. SET_TX_CHANNEL_INFO(channel, fCD, 0);
  273. SET_TX_CHANNEL_INFO(channel, fRI, 0);
  274. SET_TX_CHANNEL_INFO(channel, fHEAD, 0);
  275. SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
  276. SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
  277. SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
  278. SET_TX_CHANNEL_INFO(channel, head, 0);
  279. SET_TX_CHANNEL_INFO(channel, tail, 0);
  280. qcom_smd_signal_channel(channel);
  281. channel->state = SMD_CHANNEL_CLOSED;
  282. channel->pkt_size = 0;
  283. }
  284. /*
  285. * Calculate the amount of data available in the rx fifo
  286. */
  287. static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
  288. {
  289. unsigned head;
  290. unsigned tail;
  291. head = GET_RX_CHANNEL_INFO(channel, head);
  292. tail = GET_RX_CHANNEL_INFO(channel, tail);
  293. return (head - tail) & (channel->fifo_size - 1);
  294. }
  295. /*
  296. * Set tx channel state and inform the remote processor
  297. */
  298. static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
  299. int state)
  300. {
  301. struct qcom_smd_edge *edge = channel->edge;
  302. bool is_open = state == SMD_CHANNEL_OPENED;
  303. if (channel->state == state)
  304. return;
  305. dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state);
  306. SET_TX_CHANNEL_INFO(channel, fDSR, is_open);
  307. SET_TX_CHANNEL_INFO(channel, fCTS, is_open);
  308. SET_TX_CHANNEL_INFO(channel, fCD, is_open);
  309. SET_TX_CHANNEL_INFO(channel, state, state);
  310. SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
  311. channel->state = state;
  312. qcom_smd_signal_channel(channel);
  313. }
  314. /*
  315. * Copy count bytes of data using 32bit accesses, if that's required.
  316. */
  317. static void smd_copy_to_fifo(void __iomem *_dst,
  318. const void *_src,
  319. size_t count,
  320. bool word_aligned)
  321. {
  322. u32 *dst = (u32 *)_dst;
  323. u32 *src = (u32 *)_src;
  324. if (word_aligned) {
  325. count /= sizeof(u32);
  326. while (count--)
  327. writel_relaxed(*src++, dst++);
  328. } else {
  329. memcpy_toio(_dst, _src, count);
  330. }
  331. }
  332. /*
  333. * Copy count bytes of data using 32bit accesses, if that is required.
  334. */
  335. static void smd_copy_from_fifo(void *_dst,
  336. const void __iomem *_src,
  337. size_t count,
  338. bool word_aligned)
  339. {
  340. u32 *dst = (u32 *)_dst;
  341. u32 *src = (u32 *)_src;
  342. if (word_aligned) {
  343. count /= sizeof(u32);
  344. while (count--)
  345. *dst++ = readl_relaxed(src++);
  346. } else {
  347. memcpy_fromio(_dst, _src, count);
  348. }
  349. }
  350. /*
  351. * Read count bytes of data from the rx fifo into buf, but don't advance the
  352. * tail.
  353. */
  354. static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
  355. void *buf, size_t count)
  356. {
  357. bool word_aligned;
  358. unsigned tail;
  359. size_t len;
  360. word_aligned = channel->rx_info_word != NULL;
  361. tail = GET_RX_CHANNEL_INFO(channel, tail);
  362. len = min_t(size_t, count, channel->fifo_size - tail);
  363. if (len) {
  364. smd_copy_from_fifo(buf,
  365. channel->rx_fifo + tail,
  366. len,
  367. word_aligned);
  368. }
  369. if (len != count) {
  370. smd_copy_from_fifo(buf + len,
  371. channel->rx_fifo,
  372. count - len,
  373. word_aligned);
  374. }
  375. return count;
  376. }
  377. /*
  378. * Advance the rx tail by count bytes.
  379. */
  380. static void qcom_smd_channel_advance(struct qcom_smd_channel *channel,
  381. size_t count)
  382. {
  383. unsigned tail;
  384. tail = GET_RX_CHANNEL_INFO(channel, tail);
  385. tail += count;
  386. tail &= (channel->fifo_size - 1);
  387. SET_RX_CHANNEL_INFO(channel, tail, tail);
  388. }
  389. /*
  390. * Read out a single packet from the rx fifo and deliver it to the device
  391. */
  392. static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
  393. {
  394. struct qcom_smd_device *qsdev = channel->qsdev;
  395. unsigned tail;
  396. size_t len;
  397. void *ptr;
  398. int ret;
  399. if (!channel->cb)
  400. return 0;
  401. tail = GET_RX_CHANNEL_INFO(channel, tail);
  402. /* Use bounce buffer if the data wraps */
  403. if (tail + channel->pkt_size >= channel->fifo_size) {
  404. ptr = channel->bounce_buffer;
  405. len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size);
  406. } else {
  407. ptr = channel->rx_fifo + tail;
  408. len = channel->pkt_size;
  409. }
  410. ret = channel->cb(qsdev, ptr, len);
  411. if (ret < 0)
  412. return ret;
  413. /* Only forward the tail if the client consumed the data */
  414. qcom_smd_channel_advance(channel, len);
  415. channel->pkt_size = 0;
  416. return 0;
  417. }
  418. /*
  419. * Per channel interrupt handling
  420. */
  421. static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
  422. {
  423. bool need_state_scan = false;
  424. int remote_state;
  425. u32 pktlen;
  426. int avail;
  427. int ret;
  428. /* Handle state changes */
  429. remote_state = GET_RX_CHANNEL_INFO(channel, state);
  430. if (remote_state != channel->remote_state) {
  431. channel->remote_state = remote_state;
  432. need_state_scan = true;
  433. }
  434. /* Indicate that we have seen any state change */
  435. SET_RX_CHANNEL_INFO(channel, fSTATE, 0);
  436. /* Signal waiting qcom_smd_send() about the interrupt */
  437. if (!GET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR))
  438. wake_up_interruptible(&channel->fblockread_event);
  439. /* Don't consume any data until we've opened the channel */
  440. if (channel->state != SMD_CHANNEL_OPENED)
  441. goto out;
  442. /* Indicate that we've seen the new data */
  443. SET_RX_CHANNEL_INFO(channel, fHEAD, 0);
  444. /* Consume data */
  445. for (;;) {
  446. avail = qcom_smd_channel_get_rx_avail(channel);
  447. if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) {
  448. qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen));
  449. qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN);
  450. channel->pkt_size = pktlen;
  451. } else if (channel->pkt_size && avail >= channel->pkt_size) {
  452. ret = qcom_smd_channel_recv_single(channel);
  453. if (ret)
  454. break;
  455. } else {
  456. break;
  457. }
  458. }
  459. /* Indicate that we have seen and updated tail */
  460. SET_RX_CHANNEL_INFO(channel, fTAIL, 1);
  461. /* Signal the remote that we've consumed the data (if requested) */
  462. if (!GET_RX_CHANNEL_INFO(channel, fBLOCKREADINTR)) {
  463. /* Ensure ordering of channel info updates */
  464. wmb();
  465. qcom_smd_signal_channel(channel);
  466. }
  467. out:
  468. return need_state_scan;
  469. }
  470. /*
  471. * The edge interrupts are triggered by the remote processor on state changes,
  472. * channel info updates or when new channels are created.
  473. */
  474. static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
  475. {
  476. struct qcom_smd_edge *edge = data;
  477. struct qcom_smd_channel *channel;
  478. unsigned available;
  479. bool kick_worker = false;
  480. /*
  481. * Handle state changes or data on each of the channels on this edge
  482. */
  483. spin_lock(&edge->channels_lock);
  484. list_for_each_entry(channel, &edge->channels, list) {
  485. spin_lock(&channel->recv_lock);
  486. kick_worker |= qcom_smd_channel_intr(channel);
  487. spin_unlock(&channel->recv_lock);
  488. }
  489. spin_unlock(&edge->channels_lock);
  490. /*
  491. * Creating a new channel requires allocating an smem entry, so we only
  492. * have to scan if the amount of available space in smem have changed
  493. * since last scan.
  494. */
  495. available = qcom_smem_get_free_space(edge->remote_pid);
  496. if (available != edge->smem_available) {
  497. edge->smem_available = available;
  498. edge->need_rescan = true;
  499. kick_worker = true;
  500. }
  501. if (kick_worker)
  502. schedule_work(&edge->work);
  503. return IRQ_HANDLED;
  504. }
  505. /*
  506. * Delivers any outstanding packets in the rx fifo, can be used after probe of
  507. * the clients to deliver any packets that wasn't delivered before the client
  508. * was setup.
  509. */
  510. static void qcom_smd_channel_resume(struct qcom_smd_channel *channel)
  511. {
  512. unsigned long flags;
  513. spin_lock_irqsave(&channel->recv_lock, flags);
  514. qcom_smd_channel_intr(channel);
  515. spin_unlock_irqrestore(&channel->recv_lock, flags);
  516. }
  517. /*
  518. * Calculate how much space is available in the tx fifo.
  519. */
  520. static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel)
  521. {
  522. unsigned head;
  523. unsigned tail;
  524. unsigned mask = channel->fifo_size - 1;
  525. head = GET_TX_CHANNEL_INFO(channel, head);
  526. tail = GET_TX_CHANNEL_INFO(channel, tail);
  527. return mask - ((head - tail) & mask);
  528. }
  529. /*
  530. * Write count bytes of data into channel, possibly wrapping in the ring buffer
  531. */
  532. static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
  533. const void *data,
  534. size_t count)
  535. {
  536. bool word_aligned;
  537. unsigned head;
  538. size_t len;
  539. word_aligned = channel->tx_info_word != NULL;
  540. head = GET_TX_CHANNEL_INFO(channel, head);
  541. len = min_t(size_t, count, channel->fifo_size - head);
  542. if (len) {
  543. smd_copy_to_fifo(channel->tx_fifo + head,
  544. data,
  545. len,
  546. word_aligned);
  547. }
  548. if (len != count) {
  549. smd_copy_to_fifo(channel->tx_fifo,
  550. data + len,
  551. count - len,
  552. word_aligned);
  553. }
  554. head += count;
  555. head &= (channel->fifo_size - 1);
  556. SET_TX_CHANNEL_INFO(channel, head, head);
  557. return count;
  558. }
  559. /**
  560. * qcom_smd_send - write data to smd channel
  561. * @channel: channel handle
  562. * @data: buffer of data to write
  563. * @len: number of bytes to write
  564. *
  565. * This is a blocking write of len bytes into the channel's tx ring buffer and
  566. * signal the remote end. It will sleep until there is enough space available
  567. * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid
  568. * polling.
  569. */
  570. int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
  571. {
  572. u32 hdr[5] = {len,};
  573. int tlen = sizeof(hdr) + len;
  574. int ret;
  575. /* Word aligned channels only accept word size aligned data */
  576. if (channel->rx_info_word != NULL && len % 4)
  577. return -EINVAL;
  578. ret = mutex_lock_interruptible(&channel->tx_lock);
  579. if (ret)
  580. return ret;
  581. while (qcom_smd_get_tx_avail(channel) < tlen) {
  582. if (channel->state != SMD_CHANNEL_OPENED) {
  583. ret = -EPIPE;
  584. goto out;
  585. }
  586. SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
  587. ret = wait_event_interruptible(channel->fblockread_event,
  588. qcom_smd_get_tx_avail(channel) >= tlen ||
  589. channel->state != SMD_CHANNEL_OPENED);
  590. if (ret)
  591. goto out;
  592. SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
  593. }
  594. SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
  595. qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
  596. qcom_smd_write_fifo(channel, data, len);
  597. SET_TX_CHANNEL_INFO(channel, fHEAD, 1);
  598. /* Ensure ordering of channel info updates */
  599. wmb();
  600. qcom_smd_signal_channel(channel);
  601. out:
  602. mutex_unlock(&channel->tx_lock);
  603. return ret;
  604. }
  605. EXPORT_SYMBOL(qcom_smd_send);
  606. static struct qcom_smd_device *to_smd_device(struct device *dev)
  607. {
  608. return container_of(dev, struct qcom_smd_device, dev);
  609. }
  610. static struct qcom_smd_driver *to_smd_driver(struct device *dev)
  611. {
  612. struct qcom_smd_device *qsdev = to_smd_device(dev);
  613. return container_of(qsdev->dev.driver, struct qcom_smd_driver, driver);
  614. }
  615. static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
  616. {
  617. return of_driver_match_device(dev, drv);
  618. }
  619. /*
  620. * Probe the smd client.
  621. *
  622. * The remote side have indicated that it want the channel to be opened, so
  623. * complete the state handshake and probe our client driver.
  624. */
  625. static int qcom_smd_dev_probe(struct device *dev)
  626. {
  627. struct qcom_smd_device *qsdev = to_smd_device(dev);
  628. struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
  629. struct qcom_smd_channel *channel = qsdev->channel;
  630. size_t bb_size;
  631. int ret;
  632. /*
  633. * Packets are maximum 4k, but reduce if the fifo is smaller
  634. */
  635. bb_size = min(channel->fifo_size, SZ_4K);
  636. channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL);
  637. if (!channel->bounce_buffer)
  638. return -ENOMEM;
  639. channel->cb = qsdrv->callback;
  640. qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
  641. qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
  642. ret = qsdrv->probe(qsdev);
  643. if (ret)
  644. goto err;
  645. qcom_smd_channel_resume(channel);
  646. return 0;
  647. err:
  648. dev_err(&qsdev->dev, "probe failed\n");
  649. channel->cb = NULL;
  650. kfree(channel->bounce_buffer);
  651. channel->bounce_buffer = NULL;
  652. qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
  653. return ret;
  654. }
  655. /*
  656. * Remove the smd client.
  657. *
  658. * The channel is going away, for some reason, so remove the smd client and
  659. * reset the channel state.
  660. */
  661. static int qcom_smd_dev_remove(struct device *dev)
  662. {
  663. struct qcom_smd_device *qsdev = to_smd_device(dev);
  664. struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
  665. struct qcom_smd_channel *channel = qsdev->channel;
  666. unsigned long flags;
  667. qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
  668. /*
  669. * Make sure we don't race with the code receiving data.
  670. */
  671. spin_lock_irqsave(&channel->recv_lock, flags);
  672. channel->cb = NULL;
  673. spin_unlock_irqrestore(&channel->recv_lock, flags);
  674. /* Wake up any sleepers in qcom_smd_send() */
  675. wake_up_interruptible(&channel->fblockread_event);
  676. /*
  677. * We expect that the client might block in remove() waiting for any
  678. * outstanding calls to qcom_smd_send() to wake up and finish.
  679. */
  680. if (qsdrv->remove)
  681. qsdrv->remove(qsdev);
  682. /*
  683. * The client is now gone, cleanup and reset the channel state.
  684. */
  685. channel->qsdev = NULL;
  686. kfree(channel->bounce_buffer);
  687. channel->bounce_buffer = NULL;
  688. qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
  689. qcom_smd_channel_reset(channel);
  690. return 0;
  691. }
  692. static struct bus_type qcom_smd_bus = {
  693. .name = "qcom_smd",
  694. .match = qcom_smd_dev_match,
  695. .probe = qcom_smd_dev_probe,
  696. .remove = qcom_smd_dev_remove,
  697. };
  698. /*
  699. * Release function for the qcom_smd_device object.
  700. */
  701. static void qcom_smd_release_device(struct device *dev)
  702. {
  703. struct qcom_smd_device *qsdev = to_smd_device(dev);
  704. kfree(qsdev);
  705. }
  706. /*
  707. * Finds the device_node for the smd child interested in this channel.
  708. */
  709. static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
  710. const char *channel)
  711. {
  712. struct device_node *child;
  713. const char *name;
  714. const char *key;
  715. int ret;
  716. for_each_available_child_of_node(edge_node, child) {
  717. key = "qcom,smd-channels";
  718. ret = of_property_read_string(child, key, &name);
  719. if (ret) {
  720. of_node_put(child);
  721. continue;
  722. }
  723. if (strcmp(name, channel) == 0)
  724. return child;
  725. }
  726. return NULL;
  727. }
  728. /*
  729. * Create a smd client device for channel that is being opened.
  730. */
  731. static int qcom_smd_create_device(struct qcom_smd_channel *channel)
  732. {
  733. struct qcom_smd_device *qsdev;
  734. struct qcom_smd_edge *edge = channel->edge;
  735. struct device_node *node;
  736. struct qcom_smd *smd = edge->smd;
  737. int ret;
  738. if (channel->qsdev)
  739. return -EEXIST;
  740. node = qcom_smd_match_channel(edge->of_node, channel->name);
  741. if (!node) {
  742. dev_dbg(smd->dev, "no match for '%s'\n", channel->name);
  743. return -ENXIO;
  744. }
  745. dev_dbg(smd->dev, "registering '%s'\n", channel->name);
  746. qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
  747. if (!qsdev)
  748. return -ENOMEM;
  749. dev_set_name(&qsdev->dev, "%s.%s", edge->of_node->name, node->name);
  750. qsdev->dev.parent = smd->dev;
  751. qsdev->dev.bus = &qcom_smd_bus;
  752. qsdev->dev.release = qcom_smd_release_device;
  753. qsdev->dev.of_node = node;
  754. qsdev->channel = channel;
  755. channel->qsdev = qsdev;
  756. ret = device_register(&qsdev->dev);
  757. if (ret) {
  758. dev_err(smd->dev, "device_register failed: %d\n", ret);
  759. put_device(&qsdev->dev);
  760. }
  761. return ret;
  762. }
  763. /*
  764. * Destroy a smd client device for a channel that's going away.
  765. */
  766. static void qcom_smd_destroy_device(struct qcom_smd_channel *channel)
  767. {
  768. struct device *dev;
  769. BUG_ON(!channel->qsdev);
  770. dev = &channel->qsdev->dev;
  771. device_unregister(dev);
  772. of_node_put(dev->of_node);
  773. put_device(dev);
  774. }
  775. /**
  776. * qcom_smd_driver_register - register a smd driver
  777. * @qsdrv: qcom_smd_driver struct
  778. */
  779. int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv)
  780. {
  781. qsdrv->driver.bus = &qcom_smd_bus;
  782. return driver_register(&qsdrv->driver);
  783. }
  784. EXPORT_SYMBOL(qcom_smd_driver_register);
  785. /**
  786. * qcom_smd_driver_unregister - unregister a smd driver
  787. * @qsdrv: qcom_smd_driver struct
  788. */
  789. void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv)
  790. {
  791. driver_unregister(&qsdrv->driver);
  792. }
  793. EXPORT_SYMBOL(qcom_smd_driver_unregister);
  794. /*
  795. * Allocate the qcom_smd_channel object for a newly found smd channel,
  796. * retrieving and validating the smem items involved.
  797. */
  798. static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge,
  799. unsigned smem_info_item,
  800. unsigned smem_fifo_item,
  801. char *name)
  802. {
  803. struct qcom_smd_channel *channel;
  804. struct qcom_smd *smd = edge->smd;
  805. size_t fifo_size;
  806. size_t info_size;
  807. void *fifo_base;
  808. void *info;
  809. int ret;
  810. channel = devm_kzalloc(smd->dev, sizeof(*channel), GFP_KERNEL);
  811. if (!channel)
  812. return ERR_PTR(-ENOMEM);
  813. channel->edge = edge;
  814. channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
  815. if (!channel->name)
  816. return ERR_PTR(-ENOMEM);
  817. mutex_init(&channel->tx_lock);
  818. spin_lock_init(&channel->recv_lock);
  819. init_waitqueue_head(&channel->fblockread_event);
  820. ret = qcom_smem_get(edge->remote_pid, smem_info_item, (void **)&info,
  821. &info_size);
  822. if (ret)
  823. goto free_name_and_channel;
  824. /*
  825. * Use the size of the item to figure out which channel info struct to
  826. * use.
  827. */
  828. if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
  829. channel->tx_info_word = info;
  830. channel->rx_info_word = info + sizeof(struct smd_channel_info_word);
  831. } else if (info_size == 2 * sizeof(struct smd_channel_info)) {
  832. channel->tx_info = info;
  833. channel->rx_info = info + sizeof(struct smd_channel_info);
  834. } else {
  835. dev_err(smd->dev,
  836. "channel info of size %zu not supported\n", info_size);
  837. ret = -EINVAL;
  838. goto free_name_and_channel;
  839. }
  840. ret = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_base,
  841. &fifo_size);
  842. if (ret)
  843. goto free_name_and_channel;
  844. /* The channel consist of a rx and tx fifo of equal size */
  845. fifo_size /= 2;
  846. dev_dbg(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
  847. name, info_size, fifo_size);
  848. channel->tx_fifo = fifo_base;
  849. channel->rx_fifo = fifo_base + fifo_size;
  850. channel->fifo_size = fifo_size;
  851. qcom_smd_channel_reset(channel);
  852. return channel;
  853. free_name_and_channel:
  854. devm_kfree(smd->dev, channel->name);
  855. devm_kfree(smd->dev, channel);
  856. return ERR_PTR(ret);
  857. }
  858. /*
  859. * Scans the allocation table for any newly allocated channels, calls
  860. * qcom_smd_create_channel() to create representations of these and add
  861. * them to the edge's list of channels.
  862. */
  863. static void qcom_discover_channels(struct qcom_smd_edge *edge)
  864. {
  865. struct qcom_smd_alloc_entry *alloc_tbl;
  866. struct qcom_smd_alloc_entry *entry;
  867. struct qcom_smd_channel *channel;
  868. struct qcom_smd *smd = edge->smd;
  869. unsigned long flags;
  870. unsigned fifo_id;
  871. unsigned info_id;
  872. int ret;
  873. int tbl;
  874. int i;
  875. for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
  876. ret = qcom_smem_get(edge->remote_pid,
  877. smem_items[tbl].alloc_tbl_id,
  878. (void **)&alloc_tbl,
  879. NULL);
  880. if (ret < 0)
  881. continue;
  882. for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) {
  883. entry = &alloc_tbl[i];
  884. if (test_bit(i, edge->allocated[tbl]))
  885. continue;
  886. if (entry->ref_count == 0)
  887. continue;
  888. if (!entry->name[0])
  889. continue;
  890. if (!(entry->flags & SMD_CHANNEL_FLAGS_PACKET))
  891. continue;
  892. if ((entry->flags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
  893. continue;
  894. info_id = smem_items[tbl].info_base_id + entry->cid;
  895. fifo_id = smem_items[tbl].fifo_base_id + entry->cid;
  896. channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name);
  897. if (IS_ERR(channel))
  898. continue;
  899. spin_lock_irqsave(&edge->channels_lock, flags);
  900. list_add(&channel->list, &edge->channels);
  901. spin_unlock_irqrestore(&edge->channels_lock, flags);
  902. dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
  903. set_bit(i, edge->allocated[tbl]);
  904. }
  905. }
  906. schedule_work(&edge->work);
  907. }
  908. /*
  909. * This per edge worker scans smem for any new channels and register these. It
  910. * then scans all registered channels for state changes that should be handled
  911. * by creating or destroying smd client devices for the registered channels.
  912. *
  913. * LOCKING: edge->channels_lock is not needed to be held during the traversal
  914. * of the channels list as it's done synchronously with the only writer.
  915. */
  916. static void qcom_channel_state_worker(struct work_struct *work)
  917. {
  918. struct qcom_smd_channel *channel;
  919. struct qcom_smd_edge *edge = container_of(work,
  920. struct qcom_smd_edge,
  921. work);
  922. unsigned remote_state;
  923. /*
  924. * Rescan smem if we have reason to belive that there are new channels.
  925. */
  926. if (edge->need_rescan) {
  927. edge->need_rescan = false;
  928. qcom_discover_channels(edge);
  929. }
  930. /*
  931. * Register a device for any closed channel where the remote processor
  932. * is showing interest in opening the channel.
  933. */
  934. list_for_each_entry(channel, &edge->channels, list) {
  935. if (channel->state != SMD_CHANNEL_CLOSED)
  936. continue;
  937. remote_state = GET_RX_CHANNEL_INFO(channel, state);
  938. if (remote_state != SMD_CHANNEL_OPENING &&
  939. remote_state != SMD_CHANNEL_OPENED)
  940. continue;
  941. qcom_smd_create_device(channel);
  942. }
  943. /*
  944. * Unregister the device for any channel that is opened where the
  945. * remote processor is closing the channel.
  946. */
  947. list_for_each_entry(channel, &edge->channels, list) {
  948. if (channel->state != SMD_CHANNEL_OPENING &&
  949. channel->state != SMD_CHANNEL_OPENED)
  950. continue;
  951. remote_state = GET_RX_CHANNEL_INFO(channel, state);
  952. if (remote_state == SMD_CHANNEL_OPENING ||
  953. remote_state == SMD_CHANNEL_OPENED)
  954. continue;
  955. qcom_smd_destroy_device(channel);
  956. }
  957. }
  958. /*
  959. * Parses an of_node describing an edge.
  960. */
  961. static int qcom_smd_parse_edge(struct device *dev,
  962. struct device_node *node,
  963. struct qcom_smd_edge *edge)
  964. {
  965. struct device_node *syscon_np;
  966. const char *key;
  967. int irq;
  968. int ret;
  969. INIT_LIST_HEAD(&edge->channels);
  970. spin_lock_init(&edge->channels_lock);
  971. INIT_WORK(&edge->work, qcom_channel_state_worker);
  972. edge->of_node = of_node_get(node);
  973. irq = irq_of_parse_and_map(node, 0);
  974. if (irq < 0) {
  975. dev_err(dev, "required smd interrupt missing\n");
  976. return -EINVAL;
  977. }
  978. ret = devm_request_irq(dev, irq,
  979. qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
  980. node->name, edge);
  981. if (ret) {
  982. dev_err(dev, "failed to request smd irq\n");
  983. return ret;
  984. }
  985. edge->irq = irq;
  986. key = "qcom,smd-edge";
  987. ret = of_property_read_u32(node, key, &edge->edge_id);
  988. if (ret) {
  989. dev_err(dev, "edge missing %s property\n", key);
  990. return -EINVAL;
  991. }
  992. edge->remote_pid = QCOM_SMEM_HOST_ANY;
  993. key = "qcom,remote-pid";
  994. of_property_read_u32(node, key, &edge->remote_pid);
  995. syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
  996. if (!syscon_np) {
  997. dev_err(dev, "no qcom,ipc node\n");
  998. return -ENODEV;
  999. }
  1000. edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
  1001. if (IS_ERR(edge->ipc_regmap))
  1002. return PTR_ERR(edge->ipc_regmap);
  1003. key = "qcom,ipc";
  1004. ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
  1005. if (ret < 0) {
  1006. dev_err(dev, "no offset in %s\n", key);
  1007. return -EINVAL;
  1008. }
  1009. ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
  1010. if (ret < 0) {
  1011. dev_err(dev, "no bit in %s\n", key);
  1012. return -EINVAL;
  1013. }
  1014. return 0;
  1015. }
  1016. static int qcom_smd_probe(struct platform_device *pdev)
  1017. {
  1018. struct qcom_smd_edge *edge;
  1019. struct device_node *node;
  1020. struct qcom_smd *smd;
  1021. size_t array_size;
  1022. int num_edges;
  1023. int ret;
  1024. int i = 0;
  1025. /* Wait for smem */
  1026. ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL, NULL);
  1027. if (ret == -EPROBE_DEFER)
  1028. return ret;
  1029. num_edges = of_get_available_child_count(pdev->dev.of_node);
  1030. array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
  1031. smd = devm_kzalloc(&pdev->dev, array_size, GFP_KERNEL);
  1032. if (!smd)
  1033. return -ENOMEM;
  1034. smd->dev = &pdev->dev;
  1035. smd->num_edges = num_edges;
  1036. for_each_available_child_of_node(pdev->dev.of_node, node) {
  1037. edge = &smd->edges[i++];
  1038. edge->smd = smd;
  1039. ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
  1040. if (ret)
  1041. continue;
  1042. edge->need_rescan = true;
  1043. schedule_work(&edge->work);
  1044. }
  1045. platform_set_drvdata(pdev, smd);
  1046. return 0;
  1047. }
  1048. /*
  1049. * Shut down all smd clients by making sure that each edge stops processing
  1050. * events and scanning for new channels, then call destroy on the devices.
  1051. */
  1052. static int qcom_smd_remove(struct platform_device *pdev)
  1053. {
  1054. struct qcom_smd_channel *channel;
  1055. struct qcom_smd_edge *edge;
  1056. struct qcom_smd *smd = platform_get_drvdata(pdev);
  1057. int i;
  1058. for (i = 0; i < smd->num_edges; i++) {
  1059. edge = &smd->edges[i];
  1060. disable_irq(edge->irq);
  1061. cancel_work_sync(&edge->work);
  1062. list_for_each_entry(channel, &edge->channels, list) {
  1063. if (!channel->qsdev)
  1064. continue;
  1065. qcom_smd_destroy_device(channel);
  1066. }
  1067. }
  1068. return 0;
  1069. }
  1070. static const struct of_device_id qcom_smd_of_match[] = {
  1071. { .compatible = "qcom,smd" },
  1072. {}
  1073. };
  1074. MODULE_DEVICE_TABLE(of, qcom_smd_of_match);
  1075. static struct platform_driver qcom_smd_driver = {
  1076. .probe = qcom_smd_probe,
  1077. .remove = qcom_smd_remove,
  1078. .driver = {
  1079. .name = "qcom-smd",
  1080. .of_match_table = qcom_smd_of_match,
  1081. },
  1082. };
  1083. static int __init qcom_smd_init(void)
  1084. {
  1085. int ret;
  1086. ret = bus_register(&qcom_smd_bus);
  1087. if (ret) {
  1088. pr_err("failed to register smd bus: %d\n", ret);
  1089. return ret;
  1090. }
  1091. return platform_driver_register(&qcom_smd_driver);
  1092. }
  1093. postcore_initcall(qcom_smd_init);
  1094. static void __exit qcom_smd_exit(void)
  1095. {
  1096. platform_driver_unregister(&qcom_smd_driver);
  1097. bus_unregister(&qcom_smd_bus);
  1098. }
  1099. module_exit(qcom_smd_exit);
  1100. MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
  1101. MODULE_DESCRIPTION("Qualcomm Shared Memory Driver");
  1102. MODULE_LICENSE("GPL v2");