ti_sci.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Texas Instruments System Control Interface Protocol Driver
  4. *
  5. * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
  6. * Nishanth Menon
  7. */
  8. #define pr_fmt(fmt) "%s: " fmt, __func__
  9. #include <linux/bitmap.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/export.h>
  12. #include <linux/io.h>
  13. #include <linux/kernel.h>
  14. #include <linux/mailbox_client.h>
  15. #include <linux/module.h>
  16. #include <linux/of_device.h>
  17. #include <linux/semaphore.h>
  18. #include <linux/slab.h>
  19. #include <linux/soc/ti/ti-msgmgr.h>
  20. #include <linux/soc/ti/ti_sci_protocol.h>
  21. #include <linux/reboot.h>
  22. #include "ti_sci.h"
  23. /* List of all TI SCI devices active in system */
  24. static LIST_HEAD(ti_sci_list);
  25. /* Protection for the entire list */
  26. static DEFINE_MUTEX(ti_sci_list_mutex);
  27. /**
  28. * struct ti_sci_xfer - Structure representing a message flow
  29. * @tx_message: Transmit message
  30. * @rx_len: Receive message length
  31. * @xfer_buf: Preallocated buffer to store receive message
  32. * Since we work with request-ACK protocol, we can
  33. * reuse the same buffer for the rx path as we
  34. * use for the tx path.
  35. * @done: completion event
  36. */
  37. struct ti_sci_xfer {
  38. struct ti_msgmgr_message tx_message;
  39. u8 rx_len;
  40. u8 *xfer_buf;
  41. struct completion done;
  42. };
  43. /**
  44. * struct ti_sci_xfers_info - Structure to manage transfer information
  45. * @sem_xfer_count: Counting Semaphore for managing max simultaneous
  46. * Messages.
  47. * @xfer_block: Preallocated Message array
  48. * @xfer_alloc_table: Bitmap table for allocated messages.
  49. * Index of this bitmap table is also used for message
  50. * sequence identifier.
  51. * @xfer_lock: Protection for message allocation
  52. */
  53. struct ti_sci_xfers_info {
  54. struct semaphore sem_xfer_count;
  55. struct ti_sci_xfer *xfer_block;
  56. unsigned long *xfer_alloc_table;
  57. /* protect transfer allocation */
  58. spinlock_t xfer_lock;
  59. };
  60. /**
  61. * struct ti_sci_desc - Description of SoC integration
  62. * @default_host_id: Host identifier representing the compute entity
  63. * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
  64. * @max_msgs: Maximum number of messages that can be pending
  65. * simultaneously in the system
  66. * @max_msg_size: Maximum size of data per message that can be handled.
  67. */
  68. struct ti_sci_desc {
  69. u8 default_host_id;
  70. int max_rx_timeout_ms;
  71. int max_msgs;
  72. int max_msg_size;
  73. };
  74. /**
  75. * struct ti_sci_info - Structure representing a TI SCI instance
  76. * @dev: Device pointer
  77. * @desc: SoC description for this instance
  78. * @nb: Reboot Notifier block
  79. * @d: Debugfs file entry
  80. * @debug_region: Memory region where the debug message are available
  81. * @debug_region_size: Debug region size
  82. * @debug_buffer: Buffer allocated to copy debug messages.
  83. * @handle: Instance of TI SCI handle to send to clients.
  84. * @cl: Mailbox Client
  85. * @chan_tx: Transmit mailbox channel
  86. * @chan_rx: Receive mailbox channel
  87. * @minfo: Message info
  88. * @node: list head
  89. * @host_id: Host ID
  90. * @users: Number of users of this instance
  91. */
  92. struct ti_sci_info {
  93. struct device *dev;
  94. struct notifier_block nb;
  95. const struct ti_sci_desc *desc;
  96. struct dentry *d;
  97. void __iomem *debug_region;
  98. char *debug_buffer;
  99. size_t debug_region_size;
  100. struct ti_sci_handle handle;
  101. struct mbox_client cl;
  102. struct mbox_chan *chan_tx;
  103. struct mbox_chan *chan_rx;
  104. struct ti_sci_xfers_info minfo;
  105. struct list_head node;
  106. u8 host_id;
  107. /* protected by ti_sci_list_mutex */
  108. int users;
  109. };
  110. #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
  111. #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
  112. #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
  113. #ifdef CONFIG_DEBUG_FS
  114. /**
  115. * ti_sci_debug_show() - Helper to dump the debug log
  116. * @s: sequence file pointer
  117. * @unused: unused.
  118. *
  119. * Return: 0
  120. */
  121. static int ti_sci_debug_show(struct seq_file *s, void *unused)
  122. {
  123. struct ti_sci_info *info = s->private;
  124. memcpy_fromio(info->debug_buffer, info->debug_region,
  125. info->debug_region_size);
  126. /*
  127. * We don't trust firmware to leave NULL terminated last byte (hence
  128. * we have allocated 1 extra 0 byte). Since we cannot guarantee any
  129. * specific data format for debug messages, We just present the data
  130. * in the buffer as is - we expect the messages to be self explanatory.
  131. */
  132. seq_puts(s, info->debug_buffer);
  133. return 0;
  134. }
  135. /**
  136. * ti_sci_debug_open() - debug file open
  137. * @inode: inode pointer
  138. * @file: file pointer
  139. *
  140. * Return: result of single_open
  141. */
  142. static int ti_sci_debug_open(struct inode *inode, struct file *file)
  143. {
  144. return single_open(file, ti_sci_debug_show, inode->i_private);
  145. }
  146. /* log file operations */
  147. static const struct file_operations ti_sci_debug_fops = {
  148. .open = ti_sci_debug_open,
  149. .read = seq_read,
  150. .llseek = seq_lseek,
  151. .release = single_release,
  152. };
  153. /**
  154. * ti_sci_debugfs_create() - Create log debug file
  155. * @pdev: platform device pointer
  156. * @info: Pointer to SCI entity information
  157. *
  158. * Return: 0 if all went fine, else corresponding error.
  159. */
  160. static int ti_sci_debugfs_create(struct platform_device *pdev,
  161. struct ti_sci_info *info)
  162. {
  163. struct device *dev = &pdev->dev;
  164. struct resource *res;
  165. char debug_name[50] = "ti_sci_debug@";
  166. /* Debug region is optional */
  167. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  168. "debug_messages");
  169. info->debug_region = devm_ioremap_resource(dev, res);
  170. if (IS_ERR(info->debug_region))
  171. return 0;
  172. info->debug_region_size = resource_size(res);
  173. info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
  174. sizeof(char), GFP_KERNEL);
  175. if (!info->debug_buffer)
  176. return -ENOMEM;
  177. /* Setup NULL termination */
  178. info->debug_buffer[info->debug_region_size] = 0;
  179. info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
  180. sizeof(debug_name) -
  181. sizeof("ti_sci_debug@")),
  182. 0444, NULL, info, &ti_sci_debug_fops);
  183. if (IS_ERR(info->d))
  184. return PTR_ERR(info->d);
  185. dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
  186. info->debug_region, info->debug_region_size, res);
  187. return 0;
  188. }
  189. /**
  190. * ti_sci_debugfs_destroy() - clean up log debug file
  191. * @pdev: platform device pointer
  192. * @info: Pointer to SCI entity information
  193. */
  194. static void ti_sci_debugfs_destroy(struct platform_device *pdev,
  195. struct ti_sci_info *info)
  196. {
  197. if (IS_ERR(info->debug_region))
  198. return;
  199. debugfs_remove(info->d);
  200. }
  201. #else /* CONFIG_DEBUG_FS */
  202. static inline int ti_sci_debugfs_create(struct platform_device *dev,
  203. struct ti_sci_info *info)
  204. {
  205. return 0;
  206. }
  207. static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
  208. struct ti_sci_info *info)
  209. {
  210. }
  211. #endif /* CONFIG_DEBUG_FS */
  212. /**
  213. * ti_sci_dump_header_dbg() - Helper to dump a message header.
  214. * @dev: Device pointer corresponding to the SCI entity
  215. * @hdr: pointer to header.
  216. */
  217. static inline void ti_sci_dump_header_dbg(struct device *dev,
  218. struct ti_sci_msg_hdr *hdr)
  219. {
  220. dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
  221. hdr->type, hdr->host, hdr->seq, hdr->flags);
  222. }
  223. /**
  224. * ti_sci_rx_callback() - mailbox client callback for receive messages
  225. * @cl: client pointer
  226. * @m: mailbox message
  227. *
  228. * Processes one received message to appropriate transfer information and
  229. * signals completion of the transfer.
  230. *
  231. * NOTE: This function will be invoked in IRQ context, hence should be
  232. * as optimal as possible.
  233. */
  234. static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
  235. {
  236. struct ti_sci_info *info = cl_to_ti_sci_info(cl);
  237. struct device *dev = info->dev;
  238. struct ti_sci_xfers_info *minfo = &info->minfo;
  239. struct ti_msgmgr_message *mbox_msg = m;
  240. struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
  241. struct ti_sci_xfer *xfer;
  242. u8 xfer_id;
  243. xfer_id = hdr->seq;
  244. /*
  245. * Are we even expecting this?
  246. * NOTE: barriers were implicit in locks used for modifying the bitmap
  247. */
  248. if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
  249. dev_err(dev, "Message for %d is not expected!\n", xfer_id);
  250. return;
  251. }
  252. xfer = &minfo->xfer_block[xfer_id];
  253. /* Is the message of valid length? */
  254. if (mbox_msg->len > info->desc->max_msg_size) {
  255. dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
  256. mbox_msg->len, info->desc->max_msg_size);
  257. ti_sci_dump_header_dbg(dev, hdr);
  258. return;
  259. }
  260. if (mbox_msg->len < xfer->rx_len) {
  261. dev_err(dev, "Recv xfer %zu < expected %d length\n",
  262. mbox_msg->len, xfer->rx_len);
  263. ti_sci_dump_header_dbg(dev, hdr);
  264. return;
  265. }
  266. ti_sci_dump_header_dbg(dev, hdr);
  267. /* Take a copy to the rx buffer.. */
  268. memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
  269. complete(&xfer->done);
  270. }
  271. /**
  272. * ti_sci_get_one_xfer() - Allocate one message
  273. * @info: Pointer to SCI entity information
  274. * @msg_type: Message type
  275. * @msg_flags: Flag to set for the message
  276. * @tx_message_size: transmit message size
  277. * @rx_message_size: receive message size
  278. *
  279. * Helper function which is used by various command functions that are
  280. * exposed to clients of this driver for allocating a message traffic event.
  281. *
  282. * This function can sleep depending on pending requests already in the system
  283. * for the SCI entity. Further, this also holds a spinlock to maintain integrity
  284. * of internal data structures.
  285. *
  286. * Return: 0 if all went fine, else corresponding error.
  287. */
  288. static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
  289. u16 msg_type, u32 msg_flags,
  290. size_t tx_message_size,
  291. size_t rx_message_size)
  292. {
  293. struct ti_sci_xfers_info *minfo = &info->minfo;
  294. struct ti_sci_xfer *xfer;
  295. struct ti_sci_msg_hdr *hdr;
  296. unsigned long flags;
  297. unsigned long bit_pos;
  298. u8 xfer_id;
  299. int ret;
  300. int timeout;
  301. /* Ensure we have sane transfer sizes */
  302. if (rx_message_size > info->desc->max_msg_size ||
  303. tx_message_size > info->desc->max_msg_size ||
  304. rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
  305. return ERR_PTR(-ERANGE);
  306. /*
  307. * Ensure we have only controlled number of pending messages.
  308. * Ideally, we might just have to wait a single message, be
  309. * conservative and wait 5 times that..
  310. */
  311. timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
  312. ret = down_timeout(&minfo->sem_xfer_count, timeout);
  313. if (ret < 0)
  314. return ERR_PTR(ret);
  315. /* Keep the locked section as small as possible */
  316. spin_lock_irqsave(&minfo->xfer_lock, flags);
  317. bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
  318. info->desc->max_msgs);
  319. set_bit(bit_pos, minfo->xfer_alloc_table);
  320. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  321. /*
  322. * We already ensured in probe that we can have max messages that can
  323. * fit in hdr.seq - NOTE: this improves access latencies
  324. * to predictable O(1) access, BUT, it opens us to risk if
  325. * remote misbehaves with corrupted message sequence responses.
  326. * If that happens, we are going to be messed up anyways..
  327. */
  328. xfer_id = (u8)bit_pos;
  329. xfer = &minfo->xfer_block[xfer_id];
  330. hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  331. xfer->tx_message.len = tx_message_size;
  332. xfer->rx_len = (u8)rx_message_size;
  333. reinit_completion(&xfer->done);
  334. hdr->seq = xfer_id;
  335. hdr->type = msg_type;
  336. hdr->host = info->host_id;
  337. hdr->flags = msg_flags;
  338. return xfer;
  339. }
  340. /**
  341. * ti_sci_put_one_xfer() - Release a message
  342. * @minfo: transfer info pointer
  343. * @xfer: message that was reserved by ti_sci_get_one_xfer
  344. *
  345. * This holds a spinlock to maintain integrity of internal data structures.
  346. */
  347. static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
  348. struct ti_sci_xfer *xfer)
  349. {
  350. unsigned long flags;
  351. struct ti_sci_msg_hdr *hdr;
  352. u8 xfer_id;
  353. hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  354. xfer_id = hdr->seq;
  355. /*
  356. * Keep the locked section as small as possible
  357. * NOTE: we might escape with smp_mb and no lock here..
  358. * but just be conservative and symmetric.
  359. */
  360. spin_lock_irqsave(&minfo->xfer_lock, flags);
  361. clear_bit(xfer_id, minfo->xfer_alloc_table);
  362. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  363. /* Increment the count for the next user to get through */
  364. up(&minfo->sem_xfer_count);
  365. }
  366. /**
  367. * ti_sci_do_xfer() - Do one transfer
  368. * @info: Pointer to SCI entity information
  369. * @xfer: Transfer to initiate and wait for response
  370. *
  371. * Return: -ETIMEDOUT in case of no response, if transmit error,
  372. * return corresponding error, else if all goes well,
  373. * return 0.
  374. */
  375. static inline int ti_sci_do_xfer(struct ti_sci_info *info,
  376. struct ti_sci_xfer *xfer)
  377. {
  378. int ret;
  379. int timeout;
  380. struct device *dev = info->dev;
  381. ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
  382. if (ret < 0)
  383. return ret;
  384. ret = 0;
  385. /* And we wait for the response. */
  386. timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
  387. if (!wait_for_completion_timeout(&xfer->done, timeout)) {
  388. dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
  389. (void *)_RET_IP_);
  390. ret = -ETIMEDOUT;
  391. }
  392. /*
  393. * NOTE: we might prefer not to need the mailbox ticker to manage the
  394. * transfer queueing since the protocol layer queues things by itself.
  395. * Unfortunately, we have to kick the mailbox framework after we have
  396. * received our message.
  397. */
  398. mbox_client_txdone(info->chan_tx, ret);
  399. return ret;
  400. }
  401. /**
  402. * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
  403. * @info: Pointer to SCI entity information
  404. *
  405. * Updates the SCI information in the internal data structure.
  406. *
  407. * Return: 0 if all went fine, else return appropriate error.
  408. */
  409. static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
  410. {
  411. struct device *dev = info->dev;
  412. struct ti_sci_handle *handle = &info->handle;
  413. struct ti_sci_version_info *ver = &handle->version;
  414. struct ti_sci_msg_resp_version *rev_info;
  415. struct ti_sci_xfer *xfer;
  416. int ret;
  417. /* No need to setup flags since it is expected to respond */
  418. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
  419. 0x0, sizeof(struct ti_sci_msg_hdr),
  420. sizeof(*rev_info));
  421. if (IS_ERR(xfer)) {
  422. ret = PTR_ERR(xfer);
  423. dev_err(dev, "Message alloc failed(%d)\n", ret);
  424. return ret;
  425. }
  426. rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
  427. ret = ti_sci_do_xfer(info, xfer);
  428. if (ret) {
  429. dev_err(dev, "Mbox send fail %d\n", ret);
  430. goto fail;
  431. }
  432. ver->abi_major = rev_info->abi_major;
  433. ver->abi_minor = rev_info->abi_minor;
  434. ver->firmware_revision = rev_info->firmware_revision;
  435. strncpy(ver->firmware_description, rev_info->firmware_description,
  436. sizeof(ver->firmware_description));
  437. fail:
  438. ti_sci_put_one_xfer(&info->minfo, xfer);
  439. return ret;
  440. }
  441. /**
  442. * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
  443. * @r: pointer to response buffer
  444. *
  445. * Return: true if the response was an ACK, else returns false.
  446. */
  447. static inline bool ti_sci_is_response_ack(void *r)
  448. {
  449. struct ti_sci_msg_hdr *hdr = r;
  450. return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
  451. }
  452. /**
  453. * ti_sci_set_device_state() - Set device state helper
  454. * @handle: pointer to TI SCI handle
  455. * @id: Device identifier
  456. * @flags: flags to setup for the device
  457. * @state: State to move the device to
  458. *
  459. * Return: 0 if all went well, else returns appropriate error value.
  460. */
  461. static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
  462. u32 id, u32 flags, u8 state)
  463. {
  464. struct ti_sci_info *info;
  465. struct ti_sci_msg_req_set_device_state *req;
  466. struct ti_sci_msg_hdr *resp;
  467. struct ti_sci_xfer *xfer;
  468. struct device *dev;
  469. int ret = 0;
  470. if (IS_ERR(handle))
  471. return PTR_ERR(handle);
  472. if (!handle)
  473. return -EINVAL;
  474. info = handle_to_ti_sci_info(handle);
  475. dev = info->dev;
  476. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
  477. flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  478. sizeof(*req), sizeof(*resp));
  479. if (IS_ERR(xfer)) {
  480. ret = PTR_ERR(xfer);
  481. dev_err(dev, "Message alloc failed(%d)\n", ret);
  482. return ret;
  483. }
  484. req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
  485. req->id = id;
  486. req->state = state;
  487. ret = ti_sci_do_xfer(info, xfer);
  488. if (ret) {
  489. dev_err(dev, "Mbox send fail %d\n", ret);
  490. goto fail;
  491. }
  492. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  493. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  494. fail:
  495. ti_sci_put_one_xfer(&info->minfo, xfer);
  496. return ret;
  497. }
  498. /**
  499. * ti_sci_get_device_state() - Get device state helper
  500. * @handle: Handle to the device
  501. * @id: Device Identifier
  502. * @clcnt: Pointer to Context Loss Count
  503. * @resets: pointer to resets
  504. * @p_state: pointer to p_state
  505. * @c_state: pointer to c_state
  506. *
  507. * Return: 0 if all went fine, else return appropriate error.
  508. */
  509. static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
  510. u32 id, u32 *clcnt, u32 *resets,
  511. u8 *p_state, u8 *c_state)
  512. {
  513. struct ti_sci_info *info;
  514. struct ti_sci_msg_req_get_device_state *req;
  515. struct ti_sci_msg_resp_get_device_state *resp;
  516. struct ti_sci_xfer *xfer;
  517. struct device *dev;
  518. int ret = 0;
  519. if (IS_ERR(handle))
  520. return PTR_ERR(handle);
  521. if (!handle)
  522. return -EINVAL;
  523. if (!clcnt && !resets && !p_state && !c_state)
  524. return -EINVAL;
  525. info = handle_to_ti_sci_info(handle);
  526. dev = info->dev;
  527. /* Response is expected, so need of any flags */
  528. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
  529. 0, sizeof(*req), sizeof(*resp));
  530. if (IS_ERR(xfer)) {
  531. ret = PTR_ERR(xfer);
  532. dev_err(dev, "Message alloc failed(%d)\n", ret);
  533. return ret;
  534. }
  535. req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
  536. req->id = id;
  537. ret = ti_sci_do_xfer(info, xfer);
  538. if (ret) {
  539. dev_err(dev, "Mbox send fail %d\n", ret);
  540. goto fail;
  541. }
  542. resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
  543. if (!ti_sci_is_response_ack(resp)) {
  544. ret = -ENODEV;
  545. goto fail;
  546. }
  547. if (clcnt)
  548. *clcnt = resp->context_loss_count;
  549. if (resets)
  550. *resets = resp->resets;
  551. if (p_state)
  552. *p_state = resp->programmed_state;
  553. if (c_state)
  554. *c_state = resp->current_state;
  555. fail:
  556. ti_sci_put_one_xfer(&info->minfo, xfer);
  557. return ret;
  558. }
  559. /**
  560. * ti_sci_cmd_get_device() - command to request for device managed by TISCI
  561. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  562. * @id: Device Identifier
  563. *
  564. * Request for the device - NOTE: the client MUST maintain integrity of
  565. * usage count by balancing get_device with put_device. No refcounting is
  566. * managed by driver for that purpose.
  567. *
  568. * NOTE: The request is for exclusive access for the processor.
  569. *
  570. * Return: 0 if all went fine, else return appropriate error.
  571. */
  572. static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
  573. {
  574. return ti_sci_set_device_state(handle, id,
  575. MSG_FLAG_DEVICE_EXCLUSIVE,
  576. MSG_DEVICE_SW_STATE_ON);
  577. }
  578. /**
  579. * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
  580. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  581. * @id: Device Identifier
  582. *
  583. * Request for the device - NOTE: the client MUST maintain integrity of
  584. * usage count by balancing get_device with put_device. No refcounting is
  585. * managed by driver for that purpose.
  586. *
  587. * Return: 0 if all went fine, else return appropriate error.
  588. */
  589. static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
  590. {
  591. return ti_sci_set_device_state(handle, id,
  592. MSG_FLAG_DEVICE_EXCLUSIVE,
  593. MSG_DEVICE_SW_STATE_RETENTION);
  594. }
  595. /**
  596. * ti_sci_cmd_put_device() - command to release a device managed by TISCI
  597. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  598. * @id: Device Identifier
  599. *
  600. * Request for the device - NOTE: the client MUST maintain integrity of
  601. * usage count by balancing get_device with put_device. No refcounting is
  602. * managed by driver for that purpose.
  603. *
  604. * Return: 0 if all went fine, else return appropriate error.
  605. */
  606. static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
  607. {
  608. return ti_sci_set_device_state(handle, id,
  609. 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
  610. }
  611. /**
  612. * ti_sci_cmd_dev_is_valid() - Is the device valid
  613. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  614. * @id: Device Identifier
  615. *
  616. * Return: 0 if all went fine and the device ID is valid, else return
  617. * appropriate error.
  618. */
  619. static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
  620. {
  621. u8 unused;
  622. /* check the device state which will also tell us if the ID is valid */
  623. return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
  624. }
  625. /**
  626. * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
  627. * @handle: Pointer to TISCI handle
  628. * @id: Device Identifier
  629. * @count: Pointer to Context Loss counter to populate
  630. *
  631. * Return: 0 if all went fine, else return appropriate error.
  632. */
  633. static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
  634. u32 *count)
  635. {
  636. return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
  637. }
  638. /**
  639. * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
  640. * @handle: Pointer to TISCI handle
  641. * @id: Device Identifier
  642. * @r_state: true if requested to be idle
  643. *
  644. * Return: 0 if all went fine, else return appropriate error.
  645. */
  646. static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
  647. bool *r_state)
  648. {
  649. int ret;
  650. u8 state;
  651. if (!r_state)
  652. return -EINVAL;
  653. ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
  654. if (ret)
  655. return ret;
  656. *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
  657. return 0;
  658. }
  659. /**
  660. * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
  661. * @handle: Pointer to TISCI handle
  662. * @id: Device Identifier
  663. * @r_state: true if requested to be stopped
  664. * @curr_state: true if currently stopped.
  665. *
  666. * Return: 0 if all went fine, else return appropriate error.
  667. */
  668. static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
  669. bool *r_state, bool *curr_state)
  670. {
  671. int ret;
  672. u8 p_state, c_state;
  673. if (!r_state && !curr_state)
  674. return -EINVAL;
  675. ret =
  676. ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
  677. if (ret)
  678. return ret;
  679. if (r_state)
  680. *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
  681. if (curr_state)
  682. *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
  683. return 0;
  684. }
  685. /**
  686. * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
  687. * @handle: Pointer to TISCI handle
  688. * @id: Device Identifier
  689. * @r_state: true if requested to be ON
  690. * @curr_state: true if currently ON and active
  691. *
  692. * Return: 0 if all went fine, else return appropriate error.
  693. */
  694. static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
  695. bool *r_state, bool *curr_state)
  696. {
  697. int ret;
  698. u8 p_state, c_state;
  699. if (!r_state && !curr_state)
  700. return -EINVAL;
  701. ret =
  702. ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
  703. if (ret)
  704. return ret;
  705. if (r_state)
  706. *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
  707. if (curr_state)
  708. *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
  709. return 0;
  710. }
  711. /**
  712. * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
  713. * @handle: Pointer to TISCI handle
  714. * @id: Device Identifier
  715. * @curr_state: true if currently transitioning.
  716. *
  717. * Return: 0 if all went fine, else return appropriate error.
  718. */
  719. static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
  720. bool *curr_state)
  721. {
  722. int ret;
  723. u8 state;
  724. if (!curr_state)
  725. return -EINVAL;
  726. ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
  727. if (ret)
  728. return ret;
  729. *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
  730. return 0;
  731. }
  732. /**
  733. * ti_sci_cmd_set_device_resets() - command to set resets for device managed
  734. * by TISCI
  735. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  736. * @id: Device Identifier
  737. * @reset_state: Device specific reset bit field
  738. *
  739. * Return: 0 if all went fine, else return appropriate error.
  740. */
  741. static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
  742. u32 id, u32 reset_state)
  743. {
  744. struct ti_sci_info *info;
  745. struct ti_sci_msg_req_set_device_resets *req;
  746. struct ti_sci_msg_hdr *resp;
  747. struct ti_sci_xfer *xfer;
  748. struct device *dev;
  749. int ret = 0;
  750. if (IS_ERR(handle))
  751. return PTR_ERR(handle);
  752. if (!handle)
  753. return -EINVAL;
  754. info = handle_to_ti_sci_info(handle);
  755. dev = info->dev;
  756. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
  757. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  758. sizeof(*req), sizeof(*resp));
  759. if (IS_ERR(xfer)) {
  760. ret = PTR_ERR(xfer);
  761. dev_err(dev, "Message alloc failed(%d)\n", ret);
  762. return ret;
  763. }
  764. req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
  765. req->id = id;
  766. req->resets = reset_state;
  767. ret = ti_sci_do_xfer(info, xfer);
  768. if (ret) {
  769. dev_err(dev, "Mbox send fail %d\n", ret);
  770. goto fail;
  771. }
  772. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  773. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  774. fail:
  775. ti_sci_put_one_xfer(&info->minfo, xfer);
  776. return ret;
  777. }
  778. /**
  779. * ti_sci_cmd_get_device_resets() - Get reset state for device managed
  780. * by TISCI
  781. * @handle: Pointer to TISCI handle
  782. * @id: Device Identifier
  783. * @reset_state: Pointer to reset state to populate
  784. *
  785. * Return: 0 if all went fine, else return appropriate error.
  786. */
  787. static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
  788. u32 id, u32 *reset_state)
  789. {
  790. return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
  791. NULL);
  792. }
  793. /**
  794. * ti_sci_set_clock_state() - Set clock state helper
  795. * @handle: pointer to TI SCI handle
  796. * @dev_id: Device identifier this request is for
  797. * @clk_id: Clock identifier for the device for this request.
  798. * Each device has it's own set of clock inputs. This indexes
  799. * which clock input to modify.
  800. * @flags: Header flags as needed
  801. * @state: State to request for the clock.
  802. *
  803. * Return: 0 if all went well, else returns appropriate error value.
  804. */
  805. static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
  806. u32 dev_id, u8 clk_id,
  807. u32 flags, u8 state)
  808. {
  809. struct ti_sci_info *info;
  810. struct ti_sci_msg_req_set_clock_state *req;
  811. struct ti_sci_msg_hdr *resp;
  812. struct ti_sci_xfer *xfer;
  813. struct device *dev;
  814. int ret = 0;
  815. if (IS_ERR(handle))
  816. return PTR_ERR(handle);
  817. if (!handle)
  818. return -EINVAL;
  819. info = handle_to_ti_sci_info(handle);
  820. dev = info->dev;
  821. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
  822. flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  823. sizeof(*req), sizeof(*resp));
  824. if (IS_ERR(xfer)) {
  825. ret = PTR_ERR(xfer);
  826. dev_err(dev, "Message alloc failed(%d)\n", ret);
  827. return ret;
  828. }
  829. req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
  830. req->dev_id = dev_id;
  831. req->clk_id = clk_id;
  832. req->request_state = state;
  833. ret = ti_sci_do_xfer(info, xfer);
  834. if (ret) {
  835. dev_err(dev, "Mbox send fail %d\n", ret);
  836. goto fail;
  837. }
  838. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  839. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  840. fail:
  841. ti_sci_put_one_xfer(&info->minfo, xfer);
  842. return ret;
  843. }
  844. /**
  845. * ti_sci_cmd_get_clock_state() - Get clock state helper
  846. * @handle: pointer to TI SCI handle
  847. * @dev_id: Device identifier this request is for
  848. * @clk_id: Clock identifier for the device for this request.
  849. * Each device has it's own set of clock inputs. This indexes
  850. * which clock input to modify.
  851. * @programmed_state: State requested for clock to move to
  852. * @current_state: State that the clock is currently in
  853. *
  854. * Return: 0 if all went well, else returns appropriate error value.
  855. */
  856. static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
  857. u32 dev_id, u8 clk_id,
  858. u8 *programmed_state, u8 *current_state)
  859. {
  860. struct ti_sci_info *info;
  861. struct ti_sci_msg_req_get_clock_state *req;
  862. struct ti_sci_msg_resp_get_clock_state *resp;
  863. struct ti_sci_xfer *xfer;
  864. struct device *dev;
  865. int ret = 0;
  866. if (IS_ERR(handle))
  867. return PTR_ERR(handle);
  868. if (!handle)
  869. return -EINVAL;
  870. if (!programmed_state && !current_state)
  871. return -EINVAL;
  872. info = handle_to_ti_sci_info(handle);
  873. dev = info->dev;
  874. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
  875. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  876. sizeof(*req), sizeof(*resp));
  877. if (IS_ERR(xfer)) {
  878. ret = PTR_ERR(xfer);
  879. dev_err(dev, "Message alloc failed(%d)\n", ret);
  880. return ret;
  881. }
  882. req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
  883. req->dev_id = dev_id;
  884. req->clk_id = clk_id;
  885. ret = ti_sci_do_xfer(info, xfer);
  886. if (ret) {
  887. dev_err(dev, "Mbox send fail %d\n", ret);
  888. goto fail;
  889. }
  890. resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
  891. if (!ti_sci_is_response_ack(resp)) {
  892. ret = -ENODEV;
  893. goto fail;
  894. }
  895. if (programmed_state)
  896. *programmed_state = resp->programmed_state;
  897. if (current_state)
  898. *current_state = resp->current_state;
  899. fail:
  900. ti_sci_put_one_xfer(&info->minfo, xfer);
  901. return ret;
  902. }
  903. /**
  904. * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
  905. * @handle: pointer to TI SCI handle
  906. * @dev_id: Device identifier this request is for
  907. * @clk_id: Clock identifier for the device for this request.
  908. * Each device has it's own set of clock inputs. This indexes
  909. * which clock input to modify.
  910. * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
  911. * @can_change_freq: 'true' if frequency change is desired, else 'false'
  912. * @enable_input_term: 'true' if input termination is desired, else 'false'
  913. *
  914. * Return: 0 if all went well, else returns appropriate error value.
  915. */
  916. static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
  917. u8 clk_id, bool needs_ssc, bool can_change_freq,
  918. bool enable_input_term)
  919. {
  920. u32 flags = 0;
  921. flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
  922. flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
  923. flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
  924. return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
  925. MSG_CLOCK_SW_STATE_REQ);
  926. }
  927. /**
  928. * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
  929. * @handle: pointer to TI SCI handle
  930. * @dev_id: Device identifier this request is for
  931. * @clk_id: Clock identifier for the device for this request.
  932. * Each device has it's own set of clock inputs. This indexes
  933. * which clock input to modify.
  934. *
  935. * NOTE: This clock must have been requested by get_clock previously.
  936. *
  937. * Return: 0 if all went well, else returns appropriate error value.
  938. */
  939. static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
  940. u32 dev_id, u8 clk_id)
  941. {
  942. return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
  943. MSG_CLOCK_SW_STATE_UNREQ);
  944. }
  945. /**
  946. * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
  947. * @handle: pointer to TI SCI handle
  948. * @dev_id: Device identifier this request is for
  949. * @clk_id: Clock identifier for the device for this request.
  950. * Each device has it's own set of clock inputs. This indexes
  951. * which clock input to modify.
  952. *
  953. * NOTE: This clock must have been requested by get_clock previously.
  954. *
  955. * Return: 0 if all went well, else returns appropriate error value.
  956. */
  957. static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
  958. u32 dev_id, u8 clk_id)
  959. {
  960. return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
  961. MSG_CLOCK_SW_STATE_AUTO);
  962. }
  963. /**
  964. * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
  965. * @handle: pointer to TI SCI handle
  966. * @dev_id: Device identifier this request is for
  967. * @clk_id: Clock identifier for the device for this request.
  968. * Each device has it's own set of clock inputs. This indexes
  969. * which clock input to modify.
  970. * @req_state: state indicating if the clock is auto managed
  971. *
  972. * Return: 0 if all went well, else returns appropriate error value.
  973. */
  974. static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
  975. u32 dev_id, u8 clk_id, bool *req_state)
  976. {
  977. u8 state = 0;
  978. int ret;
  979. if (!req_state)
  980. return -EINVAL;
  981. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
  982. if (ret)
  983. return ret;
  984. *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
  985. return 0;
  986. }
  987. /**
  988. * ti_sci_cmd_clk_is_on() - Is the clock ON
  989. * @handle: pointer to TI SCI handle
  990. * @dev_id: Device identifier this request is for
  991. * @clk_id: Clock identifier for the device for this request.
  992. * Each device has it's own set of clock inputs. This indexes
  993. * which clock input to modify.
  994. * @req_state: state indicating if the clock is managed by us and enabled
  995. * @curr_state: state indicating if the clock is ready for operation
  996. *
  997. * Return: 0 if all went well, else returns appropriate error value.
  998. */
  999. static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
  1000. u8 clk_id, bool *req_state, bool *curr_state)
  1001. {
  1002. u8 c_state = 0, r_state = 0;
  1003. int ret;
  1004. if (!req_state && !curr_state)
  1005. return -EINVAL;
  1006. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
  1007. &r_state, &c_state);
  1008. if (ret)
  1009. return ret;
  1010. if (req_state)
  1011. *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
  1012. if (curr_state)
  1013. *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
  1014. return 0;
  1015. }
  1016. /**
  1017. * ti_sci_cmd_clk_is_off() - Is the clock OFF
  1018. * @handle: pointer to TI SCI handle
  1019. * @dev_id: Device identifier this request is for
  1020. * @clk_id: Clock identifier for the device for this request.
  1021. * Each device has it's own set of clock inputs. This indexes
  1022. * which clock input to modify.
  1023. * @req_state: state indicating if the clock is managed by us and disabled
  1024. * @curr_state: state indicating if the clock is NOT ready for operation
  1025. *
  1026. * Return: 0 if all went well, else returns appropriate error value.
  1027. */
  1028. static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
  1029. u8 clk_id, bool *req_state, bool *curr_state)
  1030. {
  1031. u8 c_state = 0, r_state = 0;
  1032. int ret;
  1033. if (!req_state && !curr_state)
  1034. return -EINVAL;
  1035. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
  1036. &r_state, &c_state);
  1037. if (ret)
  1038. return ret;
  1039. if (req_state)
  1040. *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
  1041. if (curr_state)
  1042. *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
  1043. return 0;
  1044. }
  1045. /**
  1046. * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
  1047. * @handle: pointer to TI SCI handle
  1048. * @dev_id: Device identifier this request is for
  1049. * @clk_id: Clock identifier for the device for this request.
  1050. * Each device has it's own set of clock inputs. This indexes
  1051. * which clock input to modify.
  1052. * @parent_id: Parent clock identifier to set
  1053. *
  1054. * Return: 0 if all went well, else returns appropriate error value.
  1055. */
  1056. static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
  1057. u32 dev_id, u8 clk_id, u8 parent_id)
  1058. {
  1059. struct ti_sci_info *info;
  1060. struct ti_sci_msg_req_set_clock_parent *req;
  1061. struct ti_sci_msg_hdr *resp;
  1062. struct ti_sci_xfer *xfer;
  1063. struct device *dev;
  1064. int ret = 0;
  1065. if (IS_ERR(handle))
  1066. return PTR_ERR(handle);
  1067. if (!handle)
  1068. return -EINVAL;
  1069. info = handle_to_ti_sci_info(handle);
  1070. dev = info->dev;
  1071. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
  1072. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1073. sizeof(*req), sizeof(*resp));
  1074. if (IS_ERR(xfer)) {
  1075. ret = PTR_ERR(xfer);
  1076. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1077. return ret;
  1078. }
  1079. req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
  1080. req->dev_id = dev_id;
  1081. req->clk_id = clk_id;
  1082. req->parent_id = parent_id;
  1083. ret = ti_sci_do_xfer(info, xfer);
  1084. if (ret) {
  1085. dev_err(dev, "Mbox send fail %d\n", ret);
  1086. goto fail;
  1087. }
  1088. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1089. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  1090. fail:
  1091. ti_sci_put_one_xfer(&info->minfo, xfer);
  1092. return ret;
  1093. }
  1094. /**
  1095. * ti_sci_cmd_clk_get_parent() - Get current parent clock source
  1096. * @handle: pointer to TI SCI handle
  1097. * @dev_id: Device identifier this request is for
  1098. * @clk_id: Clock identifier for the device for this request.
  1099. * Each device has it's own set of clock inputs. This indexes
  1100. * which clock input to modify.
  1101. * @parent_id: Current clock parent
  1102. *
  1103. * Return: 0 if all went well, else returns appropriate error value.
  1104. */
  1105. static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
  1106. u32 dev_id, u8 clk_id, u8 *parent_id)
  1107. {
  1108. struct ti_sci_info *info;
  1109. struct ti_sci_msg_req_get_clock_parent *req;
  1110. struct ti_sci_msg_resp_get_clock_parent *resp;
  1111. struct ti_sci_xfer *xfer;
  1112. struct device *dev;
  1113. int ret = 0;
  1114. if (IS_ERR(handle))
  1115. return PTR_ERR(handle);
  1116. if (!handle || !parent_id)
  1117. return -EINVAL;
  1118. info = handle_to_ti_sci_info(handle);
  1119. dev = info->dev;
  1120. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
  1121. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1122. sizeof(*req), sizeof(*resp));
  1123. if (IS_ERR(xfer)) {
  1124. ret = PTR_ERR(xfer);
  1125. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1126. return ret;
  1127. }
  1128. req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
  1129. req->dev_id = dev_id;
  1130. req->clk_id = clk_id;
  1131. ret = ti_sci_do_xfer(info, xfer);
  1132. if (ret) {
  1133. dev_err(dev, "Mbox send fail %d\n", ret);
  1134. goto fail;
  1135. }
  1136. resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
  1137. if (!ti_sci_is_response_ack(resp))
  1138. ret = -ENODEV;
  1139. else
  1140. *parent_id = resp->parent_id;
  1141. fail:
  1142. ti_sci_put_one_xfer(&info->minfo, xfer);
  1143. return ret;
  1144. }
  1145. /**
  1146. * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
  1147. * @handle: pointer to TI SCI handle
  1148. * @dev_id: Device identifier this request is for
  1149. * @clk_id: Clock identifier for the device for this request.
  1150. * Each device has it's own set of clock inputs. This indexes
  1151. * which clock input to modify.
  1152. * @num_parents: Returns he number of parents to the current clock.
  1153. *
  1154. * Return: 0 if all went well, else returns appropriate error value.
  1155. */
  1156. static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
  1157. u32 dev_id, u8 clk_id,
  1158. u8 *num_parents)
  1159. {
  1160. struct ti_sci_info *info;
  1161. struct ti_sci_msg_req_get_clock_num_parents *req;
  1162. struct ti_sci_msg_resp_get_clock_num_parents *resp;
  1163. struct ti_sci_xfer *xfer;
  1164. struct device *dev;
  1165. int ret = 0;
  1166. if (IS_ERR(handle))
  1167. return PTR_ERR(handle);
  1168. if (!handle || !num_parents)
  1169. return -EINVAL;
  1170. info = handle_to_ti_sci_info(handle);
  1171. dev = info->dev;
  1172. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
  1173. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1174. sizeof(*req), sizeof(*resp));
  1175. if (IS_ERR(xfer)) {
  1176. ret = PTR_ERR(xfer);
  1177. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1178. return ret;
  1179. }
  1180. req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
  1181. req->dev_id = dev_id;
  1182. req->clk_id = clk_id;
  1183. ret = ti_sci_do_xfer(info, xfer);
  1184. if (ret) {
  1185. dev_err(dev, "Mbox send fail %d\n", ret);
  1186. goto fail;
  1187. }
  1188. resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
  1189. if (!ti_sci_is_response_ack(resp))
  1190. ret = -ENODEV;
  1191. else
  1192. *num_parents = resp->num_parents;
  1193. fail:
  1194. ti_sci_put_one_xfer(&info->minfo, xfer);
  1195. return ret;
  1196. }
  1197. /**
  1198. * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
  1199. * @handle: pointer to TI SCI handle
  1200. * @dev_id: Device identifier this request is for
  1201. * @clk_id: Clock identifier for the device for this request.
  1202. * Each device has it's own set of clock inputs. This indexes
  1203. * which clock input to modify.
  1204. * @min_freq: The minimum allowable frequency in Hz. This is the minimum
  1205. * allowable programmed frequency and does not account for clock
  1206. * tolerances and jitter.
  1207. * @target_freq: The target clock frequency in Hz. A frequency will be
  1208. * processed as close to this target frequency as possible.
  1209. * @max_freq: The maximum allowable frequency in Hz. This is the maximum
  1210. * allowable programmed frequency and does not account for clock
  1211. * tolerances and jitter.
  1212. * @match_freq: Frequency match in Hz response.
  1213. *
  1214. * Return: 0 if all went well, else returns appropriate error value.
  1215. */
  1216. static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
  1217. u32 dev_id, u8 clk_id, u64 min_freq,
  1218. u64 target_freq, u64 max_freq,
  1219. u64 *match_freq)
  1220. {
  1221. struct ti_sci_info *info;
  1222. struct ti_sci_msg_req_query_clock_freq *req;
  1223. struct ti_sci_msg_resp_query_clock_freq *resp;
  1224. struct ti_sci_xfer *xfer;
  1225. struct device *dev;
  1226. int ret = 0;
  1227. if (IS_ERR(handle))
  1228. return PTR_ERR(handle);
  1229. if (!handle || !match_freq)
  1230. return -EINVAL;
  1231. info = handle_to_ti_sci_info(handle);
  1232. dev = info->dev;
  1233. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
  1234. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1235. sizeof(*req), sizeof(*resp));
  1236. if (IS_ERR(xfer)) {
  1237. ret = PTR_ERR(xfer);
  1238. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1239. return ret;
  1240. }
  1241. req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
  1242. req->dev_id = dev_id;
  1243. req->clk_id = clk_id;
  1244. req->min_freq_hz = min_freq;
  1245. req->target_freq_hz = target_freq;
  1246. req->max_freq_hz = max_freq;
  1247. ret = ti_sci_do_xfer(info, xfer);
  1248. if (ret) {
  1249. dev_err(dev, "Mbox send fail %d\n", ret);
  1250. goto fail;
  1251. }
  1252. resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
  1253. if (!ti_sci_is_response_ack(resp))
  1254. ret = -ENODEV;
  1255. else
  1256. *match_freq = resp->freq_hz;
  1257. fail:
  1258. ti_sci_put_one_xfer(&info->minfo, xfer);
  1259. return ret;
  1260. }
  1261. /**
  1262. * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
  1263. * @handle: pointer to TI SCI handle
  1264. * @dev_id: Device identifier this request is for
  1265. * @clk_id: Clock identifier for the device for this request.
  1266. * Each device has it's own set of clock inputs. This indexes
  1267. * which clock input to modify.
  1268. * @min_freq: The minimum allowable frequency in Hz. This is the minimum
  1269. * allowable programmed frequency and does not account for clock
  1270. * tolerances and jitter.
  1271. * @target_freq: The target clock frequency in Hz. A frequency will be
  1272. * processed as close to this target frequency as possible.
  1273. * @max_freq: The maximum allowable frequency in Hz. This is the maximum
  1274. * allowable programmed frequency and does not account for clock
  1275. * tolerances and jitter.
  1276. *
  1277. * Return: 0 if all went well, else returns appropriate error value.
  1278. */
  1279. static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
  1280. u32 dev_id, u8 clk_id, u64 min_freq,
  1281. u64 target_freq, u64 max_freq)
  1282. {
  1283. struct ti_sci_info *info;
  1284. struct ti_sci_msg_req_set_clock_freq *req;
  1285. struct ti_sci_msg_hdr *resp;
  1286. struct ti_sci_xfer *xfer;
  1287. struct device *dev;
  1288. int ret = 0;
  1289. if (IS_ERR(handle))
  1290. return PTR_ERR(handle);
  1291. if (!handle)
  1292. return -EINVAL;
  1293. info = handle_to_ti_sci_info(handle);
  1294. dev = info->dev;
  1295. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
  1296. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1297. sizeof(*req), sizeof(*resp));
  1298. if (IS_ERR(xfer)) {
  1299. ret = PTR_ERR(xfer);
  1300. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1301. return ret;
  1302. }
  1303. req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
  1304. req->dev_id = dev_id;
  1305. req->clk_id = clk_id;
  1306. req->min_freq_hz = min_freq;
  1307. req->target_freq_hz = target_freq;
  1308. req->max_freq_hz = max_freq;
  1309. ret = ti_sci_do_xfer(info, xfer);
  1310. if (ret) {
  1311. dev_err(dev, "Mbox send fail %d\n", ret);
  1312. goto fail;
  1313. }
  1314. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1315. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  1316. fail:
  1317. ti_sci_put_one_xfer(&info->minfo, xfer);
  1318. return ret;
  1319. }
  1320. /**
  1321. * ti_sci_cmd_clk_get_freq() - Get current frequency
  1322. * @handle: pointer to TI SCI handle
  1323. * @dev_id: Device identifier this request is for
  1324. * @clk_id: Clock identifier for the device for this request.
  1325. * Each device has it's own set of clock inputs. This indexes
  1326. * which clock input to modify.
  1327. * @freq: Currently frequency in Hz
  1328. *
  1329. * Return: 0 if all went well, else returns appropriate error value.
  1330. */
  1331. static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
  1332. u32 dev_id, u8 clk_id, u64 *freq)
  1333. {
  1334. struct ti_sci_info *info;
  1335. struct ti_sci_msg_req_get_clock_freq *req;
  1336. struct ti_sci_msg_resp_get_clock_freq *resp;
  1337. struct ti_sci_xfer *xfer;
  1338. struct device *dev;
  1339. int ret = 0;
  1340. if (IS_ERR(handle))
  1341. return PTR_ERR(handle);
  1342. if (!handle || !freq)
  1343. return -EINVAL;
  1344. info = handle_to_ti_sci_info(handle);
  1345. dev = info->dev;
  1346. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
  1347. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1348. sizeof(*req), sizeof(*resp));
  1349. if (IS_ERR(xfer)) {
  1350. ret = PTR_ERR(xfer);
  1351. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1352. return ret;
  1353. }
  1354. req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
  1355. req->dev_id = dev_id;
  1356. req->clk_id = clk_id;
  1357. ret = ti_sci_do_xfer(info, xfer);
  1358. if (ret) {
  1359. dev_err(dev, "Mbox send fail %d\n", ret);
  1360. goto fail;
  1361. }
  1362. resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
  1363. if (!ti_sci_is_response_ack(resp))
  1364. ret = -ENODEV;
  1365. else
  1366. *freq = resp->freq_hz;
  1367. fail:
  1368. ti_sci_put_one_xfer(&info->minfo, xfer);
  1369. return ret;
  1370. }
  1371. static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
  1372. {
  1373. struct ti_sci_info *info;
  1374. struct ti_sci_msg_req_reboot *req;
  1375. struct ti_sci_msg_hdr *resp;
  1376. struct ti_sci_xfer *xfer;
  1377. struct device *dev;
  1378. int ret = 0;
  1379. if (IS_ERR(handle))
  1380. return PTR_ERR(handle);
  1381. if (!handle)
  1382. return -EINVAL;
  1383. info = handle_to_ti_sci_info(handle);
  1384. dev = info->dev;
  1385. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
  1386. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1387. sizeof(*req), sizeof(*resp));
  1388. if (IS_ERR(xfer)) {
  1389. ret = PTR_ERR(xfer);
  1390. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1391. return ret;
  1392. }
  1393. req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
  1394. ret = ti_sci_do_xfer(info, xfer);
  1395. if (ret) {
  1396. dev_err(dev, "Mbox send fail %d\n", ret);
  1397. goto fail;
  1398. }
  1399. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1400. if (!ti_sci_is_response_ack(resp))
  1401. ret = -ENODEV;
  1402. else
  1403. ret = 0;
  1404. fail:
  1405. ti_sci_put_one_xfer(&info->minfo, xfer);
  1406. return ret;
  1407. }
  1408. /*
  1409. * ti_sci_setup_ops() - Setup the operations structures
  1410. * @info: pointer to TISCI pointer
  1411. */
  1412. static void ti_sci_setup_ops(struct ti_sci_info *info)
  1413. {
  1414. struct ti_sci_ops *ops = &info->handle.ops;
  1415. struct ti_sci_core_ops *core_ops = &ops->core_ops;
  1416. struct ti_sci_dev_ops *dops = &ops->dev_ops;
  1417. struct ti_sci_clk_ops *cops = &ops->clk_ops;
  1418. core_ops->reboot_device = ti_sci_cmd_core_reboot;
  1419. dops->get_device = ti_sci_cmd_get_device;
  1420. dops->idle_device = ti_sci_cmd_idle_device;
  1421. dops->put_device = ti_sci_cmd_put_device;
  1422. dops->is_valid = ti_sci_cmd_dev_is_valid;
  1423. dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
  1424. dops->is_idle = ti_sci_cmd_dev_is_idle;
  1425. dops->is_stop = ti_sci_cmd_dev_is_stop;
  1426. dops->is_on = ti_sci_cmd_dev_is_on;
  1427. dops->is_transitioning = ti_sci_cmd_dev_is_trans;
  1428. dops->set_device_resets = ti_sci_cmd_set_device_resets;
  1429. dops->get_device_resets = ti_sci_cmd_get_device_resets;
  1430. cops->get_clock = ti_sci_cmd_get_clock;
  1431. cops->idle_clock = ti_sci_cmd_idle_clock;
  1432. cops->put_clock = ti_sci_cmd_put_clock;
  1433. cops->is_auto = ti_sci_cmd_clk_is_auto;
  1434. cops->is_on = ti_sci_cmd_clk_is_on;
  1435. cops->is_off = ti_sci_cmd_clk_is_off;
  1436. cops->set_parent = ti_sci_cmd_clk_set_parent;
  1437. cops->get_parent = ti_sci_cmd_clk_get_parent;
  1438. cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
  1439. cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
  1440. cops->set_freq = ti_sci_cmd_clk_set_freq;
  1441. cops->get_freq = ti_sci_cmd_clk_get_freq;
  1442. }
  1443. /**
  1444. * ti_sci_get_handle() - Get the TI SCI handle for a device
  1445. * @dev: Pointer to device for which we want SCI handle
  1446. *
  1447. * NOTE: The function does not track individual clients of the framework
  1448. * and is expected to be maintained by caller of TI SCI protocol library.
  1449. * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
  1450. * Return: pointer to handle if successful, else:
  1451. * -EPROBE_DEFER if the instance is not ready
  1452. * -ENODEV if the required node handler is missing
  1453. * -EINVAL if invalid conditions are encountered.
  1454. */
  1455. const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
  1456. {
  1457. struct device_node *ti_sci_np;
  1458. struct list_head *p;
  1459. struct ti_sci_handle *handle = NULL;
  1460. struct ti_sci_info *info;
  1461. if (!dev) {
  1462. pr_err("I need a device pointer\n");
  1463. return ERR_PTR(-EINVAL);
  1464. }
  1465. ti_sci_np = of_get_parent(dev->of_node);
  1466. if (!ti_sci_np) {
  1467. dev_err(dev, "No OF information\n");
  1468. return ERR_PTR(-EINVAL);
  1469. }
  1470. mutex_lock(&ti_sci_list_mutex);
  1471. list_for_each(p, &ti_sci_list) {
  1472. info = list_entry(p, struct ti_sci_info, node);
  1473. if (ti_sci_np == info->dev->of_node) {
  1474. handle = &info->handle;
  1475. info->users++;
  1476. break;
  1477. }
  1478. }
  1479. mutex_unlock(&ti_sci_list_mutex);
  1480. of_node_put(ti_sci_np);
  1481. if (!handle)
  1482. return ERR_PTR(-EPROBE_DEFER);
  1483. return handle;
  1484. }
  1485. EXPORT_SYMBOL_GPL(ti_sci_get_handle);
  1486. /**
  1487. * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
  1488. * @handle: Handle acquired by ti_sci_get_handle
  1489. *
  1490. * NOTE: The function does not track individual clients of the framework
  1491. * and is expected to be maintained by caller of TI SCI protocol library.
  1492. * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
  1493. *
  1494. * Return: 0 is successfully released
  1495. * if an error pointer was passed, it returns the error value back,
  1496. * if null was passed, it returns -EINVAL;
  1497. */
  1498. int ti_sci_put_handle(const struct ti_sci_handle *handle)
  1499. {
  1500. struct ti_sci_info *info;
  1501. if (IS_ERR(handle))
  1502. return PTR_ERR(handle);
  1503. if (!handle)
  1504. return -EINVAL;
  1505. info = handle_to_ti_sci_info(handle);
  1506. mutex_lock(&ti_sci_list_mutex);
  1507. if (!WARN_ON(!info->users))
  1508. info->users--;
  1509. mutex_unlock(&ti_sci_list_mutex);
  1510. return 0;
  1511. }
  1512. EXPORT_SYMBOL_GPL(ti_sci_put_handle);
  1513. static void devm_ti_sci_release(struct device *dev, void *res)
  1514. {
  1515. const struct ti_sci_handle **ptr = res;
  1516. const struct ti_sci_handle *handle = *ptr;
  1517. int ret;
  1518. ret = ti_sci_put_handle(handle);
  1519. if (ret)
  1520. dev_err(dev, "failed to put handle %d\n", ret);
  1521. }
  1522. /**
  1523. * devm_ti_sci_get_handle() - Managed get handle
  1524. * @dev: device for which we want SCI handle for.
  1525. *
  1526. * NOTE: This releases the handle once the device resources are
  1527. * no longer needed. MUST NOT BE released with ti_sci_put_handle.
  1528. * The function does not track individual clients of the framework
  1529. * and is expected to be maintained by caller of TI SCI protocol library.
  1530. *
  1531. * Return: 0 if all went fine, else corresponding error.
  1532. */
  1533. const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
  1534. {
  1535. const struct ti_sci_handle **ptr;
  1536. const struct ti_sci_handle *handle;
  1537. ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
  1538. if (!ptr)
  1539. return ERR_PTR(-ENOMEM);
  1540. handle = ti_sci_get_handle(dev);
  1541. if (!IS_ERR(handle)) {
  1542. *ptr = handle;
  1543. devres_add(dev, ptr);
  1544. } else {
  1545. devres_free(ptr);
  1546. }
  1547. return handle;
  1548. }
  1549. EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
  1550. static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
  1551. void *cmd)
  1552. {
  1553. struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
  1554. const struct ti_sci_handle *handle = &info->handle;
  1555. ti_sci_cmd_core_reboot(handle);
  1556. /* call fail OR pass, we should not be here in the first place */
  1557. return NOTIFY_BAD;
  1558. }
  1559. /* Description for K2G */
  1560. static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
  1561. .default_host_id = 2,
  1562. /* Conservative duration */
  1563. .max_rx_timeout_ms = 1000,
  1564. /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
  1565. .max_msgs = 20,
  1566. .max_msg_size = 64,
  1567. };
  1568. static const struct of_device_id ti_sci_of_match[] = {
  1569. {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
  1570. { /* Sentinel */ },
  1571. };
  1572. MODULE_DEVICE_TABLE(of, ti_sci_of_match);
  1573. static int ti_sci_probe(struct platform_device *pdev)
  1574. {
  1575. struct device *dev = &pdev->dev;
  1576. const struct of_device_id *of_id;
  1577. const struct ti_sci_desc *desc;
  1578. struct ti_sci_xfer *xfer;
  1579. struct ti_sci_info *info = NULL;
  1580. struct ti_sci_xfers_info *minfo;
  1581. struct mbox_client *cl;
  1582. int ret = -EINVAL;
  1583. int i;
  1584. int reboot = 0;
  1585. u32 h_id;
  1586. of_id = of_match_device(ti_sci_of_match, dev);
  1587. if (!of_id) {
  1588. dev_err(dev, "OF data missing\n");
  1589. return -EINVAL;
  1590. }
  1591. desc = of_id->data;
  1592. info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
  1593. if (!info)
  1594. return -ENOMEM;
  1595. info->dev = dev;
  1596. info->desc = desc;
  1597. ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
  1598. /* if the property is not present in DT, use a default from desc */
  1599. if (ret < 0) {
  1600. info->host_id = info->desc->default_host_id;
  1601. } else {
  1602. if (!h_id) {
  1603. dev_warn(dev, "Host ID 0 is reserved for firmware\n");
  1604. info->host_id = info->desc->default_host_id;
  1605. } else {
  1606. info->host_id = h_id;
  1607. }
  1608. }
  1609. reboot = of_property_read_bool(dev->of_node,
  1610. "ti,system-reboot-controller");
  1611. INIT_LIST_HEAD(&info->node);
  1612. minfo = &info->minfo;
  1613. /*
  1614. * Pre-allocate messages
  1615. * NEVER allocate more than what we can indicate in hdr.seq
  1616. * if we have data description bug, force a fix..
  1617. */
  1618. if (WARN_ON(desc->max_msgs >=
  1619. 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
  1620. return -EINVAL;
  1621. minfo->xfer_block = devm_kcalloc(dev,
  1622. desc->max_msgs,
  1623. sizeof(*minfo->xfer_block),
  1624. GFP_KERNEL);
  1625. if (!minfo->xfer_block)
  1626. return -ENOMEM;
  1627. minfo->xfer_alloc_table = devm_kcalloc(dev,
  1628. BITS_TO_LONGS(desc->max_msgs),
  1629. sizeof(unsigned long),
  1630. GFP_KERNEL);
  1631. if (!minfo->xfer_alloc_table)
  1632. return -ENOMEM;
  1633. bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
  1634. /* Pre-initialize the buffer pointer to pre-allocated buffers */
  1635. for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
  1636. xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
  1637. GFP_KERNEL);
  1638. if (!xfer->xfer_buf)
  1639. return -ENOMEM;
  1640. xfer->tx_message.buf = xfer->xfer_buf;
  1641. init_completion(&xfer->done);
  1642. }
  1643. ret = ti_sci_debugfs_create(pdev, info);
  1644. if (ret)
  1645. dev_warn(dev, "Failed to create debug file\n");
  1646. platform_set_drvdata(pdev, info);
  1647. cl = &info->cl;
  1648. cl->dev = dev;
  1649. cl->tx_block = false;
  1650. cl->rx_callback = ti_sci_rx_callback;
  1651. cl->knows_txdone = true;
  1652. spin_lock_init(&minfo->xfer_lock);
  1653. sema_init(&minfo->sem_xfer_count, desc->max_msgs);
  1654. info->chan_rx = mbox_request_channel_byname(cl, "rx");
  1655. if (IS_ERR(info->chan_rx)) {
  1656. ret = PTR_ERR(info->chan_rx);
  1657. goto out;
  1658. }
  1659. info->chan_tx = mbox_request_channel_byname(cl, "tx");
  1660. if (IS_ERR(info->chan_tx)) {
  1661. ret = PTR_ERR(info->chan_tx);
  1662. goto out;
  1663. }
  1664. ret = ti_sci_cmd_get_revision(info);
  1665. if (ret) {
  1666. dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
  1667. goto out;
  1668. }
  1669. ti_sci_setup_ops(info);
  1670. if (reboot) {
  1671. info->nb.notifier_call = tisci_reboot_handler;
  1672. info->nb.priority = 128;
  1673. ret = register_restart_handler(&info->nb);
  1674. if (ret) {
  1675. dev_err(dev, "reboot registration fail(%d)\n", ret);
  1676. return ret;
  1677. }
  1678. }
  1679. dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
  1680. info->handle.version.abi_major, info->handle.version.abi_minor,
  1681. info->handle.version.firmware_revision,
  1682. info->handle.version.firmware_description);
  1683. mutex_lock(&ti_sci_list_mutex);
  1684. list_add_tail(&info->node, &ti_sci_list);
  1685. mutex_unlock(&ti_sci_list_mutex);
  1686. return of_platform_populate(dev->of_node, NULL, NULL, dev);
  1687. out:
  1688. if (!IS_ERR(info->chan_tx))
  1689. mbox_free_channel(info->chan_tx);
  1690. if (!IS_ERR(info->chan_rx))
  1691. mbox_free_channel(info->chan_rx);
  1692. debugfs_remove(info->d);
  1693. return ret;
  1694. }
  1695. static int ti_sci_remove(struct platform_device *pdev)
  1696. {
  1697. struct ti_sci_info *info;
  1698. struct device *dev = &pdev->dev;
  1699. int ret = 0;
  1700. of_platform_depopulate(dev);
  1701. info = platform_get_drvdata(pdev);
  1702. if (info->nb.notifier_call)
  1703. unregister_restart_handler(&info->nb);
  1704. mutex_lock(&ti_sci_list_mutex);
  1705. if (info->users)
  1706. ret = -EBUSY;
  1707. else
  1708. list_del(&info->node);
  1709. mutex_unlock(&ti_sci_list_mutex);
  1710. if (!ret) {
  1711. ti_sci_debugfs_destroy(pdev, info);
  1712. /* Safe to free channels since no more users */
  1713. mbox_free_channel(info->chan_tx);
  1714. mbox_free_channel(info->chan_rx);
  1715. }
  1716. return ret;
  1717. }
  1718. static struct platform_driver ti_sci_driver = {
  1719. .probe = ti_sci_probe,
  1720. .remove = ti_sci_remove,
  1721. .driver = {
  1722. .name = "ti-sci",
  1723. .of_match_table = of_match_ptr(ti_sci_of_match),
  1724. },
  1725. };
  1726. module_platform_driver(ti_sci_driver);
  1727. MODULE_LICENSE("GPL v2");
  1728. MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
  1729. MODULE_AUTHOR("Nishanth Menon");
  1730. MODULE_ALIAS("platform:ti-sci");