ti_sci.c 107 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Texas Instruments System Control Interface Protocol Driver
  4. *
  5. * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
  6. * Nishanth Menon
  7. */
  8. #define pr_fmt(fmt) "%s: " fmt, __func__
  9. #include <linux/bitmap.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/export.h>
  12. #include <linux/io.h>
  13. #include <linux/kernel.h>
  14. #include <linux/mailbox_client.h>
  15. #include <linux/module.h>
  16. #include <linux/of_device.h>
  17. #include <linux/semaphore.h>
  18. #include <linux/slab.h>
  19. #include <linux/soc/ti/ti-msgmgr.h>
  20. #include <linux/soc/ti/ti_sci_protocol.h>
  21. #include <linux/reboot.h>
  22. #include "ti_sci.h"
  23. /* List of all TI SCI devices active in system */
  24. static LIST_HEAD(ti_sci_list);
  25. /* Protection for the entire list */
  26. static DEFINE_MUTEX(ti_sci_list_mutex);
  27. /**
  28. * struct ti_sci_xfer - Structure representing a message flow
  29. * @tx_message: Transmit message
  30. * @rx_len: Receive message length
  31. * @xfer_buf: Preallocated buffer to store receive message
  32. * Since we work with request-ACK protocol, we can
  33. * reuse the same buffer for the rx path as we
  34. * use for the tx path.
  35. * @done: completion event
  36. */
  37. struct ti_sci_xfer {
  38. struct ti_msgmgr_message tx_message;
  39. u8 rx_len;
  40. u8 *xfer_buf;
  41. struct completion done;
  42. };
  43. /**
  44. * struct ti_sci_xfers_info - Structure to manage transfer information
  45. * @sem_xfer_count: Counting Semaphore for managing max simultaneous
  46. * Messages.
  47. * @xfer_block: Preallocated Message array
  48. * @xfer_alloc_table: Bitmap table for allocated messages.
  49. * Index of this bitmap table is also used for message
  50. * sequence identifier.
  51. * @xfer_lock: Protection for message allocation
  52. */
  53. struct ti_sci_xfers_info {
  54. struct semaphore sem_xfer_count;
  55. struct ti_sci_xfer *xfer_block;
  56. unsigned long *xfer_alloc_table;
  57. /* protect transfer allocation */
  58. spinlock_t xfer_lock;
  59. };
  60. /**
  61. * struct ti_sci_rm_type_map - Structure representing TISCI Resource
  62. * management representation of dev_ids.
  63. * @dev_id: TISCI device ID
  64. * @type: Corresponding id as identified by TISCI RM.
  65. *
  66. * Note: This is used only as a work around for using RM range apis
  67. * for AM654 SoC. For future SoCs dev_id will be used as type
  68. * for RM range APIs. In order to maintain ABI backward compatibility
  69. * type is not being changed for AM654 SoC.
  70. */
  71. struct ti_sci_rm_type_map {
  72. u32 dev_id;
  73. u16 type;
  74. };
  75. /**
  76. * struct ti_sci_desc - Description of SoC integration
  77. * @default_host_id: Host identifier representing the compute entity
  78. * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
  79. * @max_msgs: Maximum number of messages that can be pending
  80. * simultaneously in the system
  81. * @max_msg_size: Maximum size of data per message that can be handled.
  82. * @rm_type_map: RM resource type mapping structure.
  83. */
  84. struct ti_sci_desc {
  85. u8 default_host_id;
  86. int max_rx_timeout_ms;
  87. int max_msgs;
  88. int max_msg_size;
  89. struct ti_sci_rm_type_map *rm_type_map;
  90. };
  91. /**
  92. * struct ti_sci_info - Structure representing a TI SCI instance
  93. * @dev: Device pointer
  94. * @desc: SoC description for this instance
  95. * @nb: Reboot Notifier block
  96. * @d: Debugfs file entry
  97. * @debug_region: Memory region where the debug message are available
  98. * @debug_region_size: Debug region size
  99. * @debug_buffer: Buffer allocated to copy debug messages.
  100. * @handle: Instance of TI SCI handle to send to clients.
  101. * @cl: Mailbox Client
  102. * @chan_tx: Transmit mailbox channel
  103. * @chan_rx: Receive mailbox channel
  104. * @minfo: Message info
  105. * @node: list head
  106. * @host_id: Host ID
  107. * @users: Number of users of this instance
  108. */
  109. struct ti_sci_info {
  110. struct device *dev;
  111. struct notifier_block nb;
  112. const struct ti_sci_desc *desc;
  113. struct dentry *d;
  114. void __iomem *debug_region;
  115. char *debug_buffer;
  116. size_t debug_region_size;
  117. struct ti_sci_handle handle;
  118. struct mbox_client cl;
  119. struct mbox_chan *chan_tx;
  120. struct mbox_chan *chan_rx;
  121. struct ti_sci_xfers_info minfo;
  122. struct list_head node;
  123. u8 host_id;
  124. /* protected by ti_sci_list_mutex */
  125. int users;
  126. };
  127. #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
  128. #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
  129. #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
  130. #ifdef CONFIG_DEBUG_FS
  131. /**
  132. * ti_sci_debug_show() - Helper to dump the debug log
  133. * @s: sequence file pointer
  134. * @unused: unused.
  135. *
  136. * Return: 0
  137. */
  138. static int ti_sci_debug_show(struct seq_file *s, void *unused)
  139. {
  140. struct ti_sci_info *info = s->private;
  141. memcpy_fromio(info->debug_buffer, info->debug_region,
  142. info->debug_region_size);
  143. /*
  144. * We don't trust firmware to leave NULL terminated last byte (hence
  145. * we have allocated 1 extra 0 byte). Since we cannot guarantee any
  146. * specific data format for debug messages, We just present the data
  147. * in the buffer as is - we expect the messages to be self explanatory.
  148. */
  149. seq_puts(s, info->debug_buffer);
  150. return 0;
  151. }
  152. /**
  153. * ti_sci_debug_open() - debug file open
  154. * @inode: inode pointer
  155. * @file: file pointer
  156. *
  157. * Return: result of single_open
  158. */
  159. static int ti_sci_debug_open(struct inode *inode, struct file *file)
  160. {
  161. return single_open(file, ti_sci_debug_show, inode->i_private);
  162. }
  163. /* log file operations */
  164. static const struct file_operations ti_sci_debug_fops = {
  165. .open = ti_sci_debug_open,
  166. .read = seq_read,
  167. .llseek = seq_lseek,
  168. .release = single_release,
  169. };
  170. /**
  171. * ti_sci_debugfs_create() - Create log debug file
  172. * @pdev: platform device pointer
  173. * @info: Pointer to SCI entity information
  174. *
  175. * Return: 0 if all went fine, else corresponding error.
  176. */
  177. static int ti_sci_debugfs_create(struct platform_device *pdev,
  178. struct ti_sci_info *info)
  179. {
  180. struct device *dev = &pdev->dev;
  181. struct resource *res;
  182. char debug_name[50] = "ti_sci_debug@";
  183. /* Debug region is optional */
  184. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  185. "debug_messages");
  186. info->debug_region = devm_ioremap_resource(dev, res);
  187. if (IS_ERR(info->debug_region))
  188. return 0;
  189. info->debug_region_size = resource_size(res);
  190. info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
  191. sizeof(char), GFP_KERNEL);
  192. if (!info->debug_buffer)
  193. return -ENOMEM;
  194. /* Setup NULL termination */
  195. info->debug_buffer[info->debug_region_size] = 0;
  196. info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
  197. sizeof(debug_name) -
  198. sizeof("ti_sci_debug@")),
  199. 0444, NULL, info, &ti_sci_debug_fops);
  200. if (IS_ERR(info->d))
  201. return PTR_ERR(info->d);
  202. dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
  203. info->debug_region, info->debug_region_size, res);
  204. return 0;
  205. }
  206. /**
  207. * ti_sci_debugfs_destroy() - clean up log debug file
  208. * @pdev: platform device pointer
  209. * @info: Pointer to SCI entity information
  210. */
  211. static void ti_sci_debugfs_destroy(struct platform_device *pdev,
  212. struct ti_sci_info *info)
  213. {
  214. if (IS_ERR(info->debug_region))
  215. return;
  216. debugfs_remove(info->d);
  217. }
  218. #else /* CONFIG_DEBUG_FS */
  219. static inline int ti_sci_debugfs_create(struct platform_device *dev,
  220. struct ti_sci_info *info)
  221. {
  222. return 0;
  223. }
  224. static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
  225. struct ti_sci_info *info)
  226. {
  227. }
  228. #endif /* CONFIG_DEBUG_FS */
  229. /**
  230. * ti_sci_dump_header_dbg() - Helper to dump a message header.
  231. * @dev: Device pointer corresponding to the SCI entity
  232. * @hdr: pointer to header.
  233. */
  234. static inline void ti_sci_dump_header_dbg(struct device *dev,
  235. struct ti_sci_msg_hdr *hdr)
  236. {
  237. dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
  238. hdr->type, hdr->host, hdr->seq, hdr->flags);
  239. }
  240. /**
  241. * ti_sci_rx_callback() - mailbox client callback for receive messages
  242. * @cl: client pointer
  243. * @m: mailbox message
  244. *
  245. * Processes one received message to appropriate transfer information and
  246. * signals completion of the transfer.
  247. *
  248. * NOTE: This function will be invoked in IRQ context, hence should be
  249. * as optimal as possible.
  250. */
  251. static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
  252. {
  253. struct ti_sci_info *info = cl_to_ti_sci_info(cl);
  254. struct device *dev = info->dev;
  255. struct ti_sci_xfers_info *minfo = &info->minfo;
  256. struct ti_msgmgr_message *mbox_msg = m;
  257. struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
  258. struct ti_sci_xfer *xfer;
  259. u8 xfer_id;
  260. xfer_id = hdr->seq;
  261. /*
  262. * Are we even expecting this?
  263. * NOTE: barriers were implicit in locks used for modifying the bitmap
  264. */
  265. if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
  266. dev_err(dev, "Message for %d is not expected!\n", xfer_id);
  267. return;
  268. }
  269. xfer = &minfo->xfer_block[xfer_id];
  270. /* Is the message of valid length? */
  271. if (mbox_msg->len > info->desc->max_msg_size) {
  272. dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
  273. mbox_msg->len, info->desc->max_msg_size);
  274. ti_sci_dump_header_dbg(dev, hdr);
  275. return;
  276. }
  277. if (mbox_msg->len < xfer->rx_len) {
  278. dev_err(dev, "Recv xfer %zu < expected %d length\n",
  279. mbox_msg->len, xfer->rx_len);
  280. ti_sci_dump_header_dbg(dev, hdr);
  281. return;
  282. }
  283. ti_sci_dump_header_dbg(dev, hdr);
  284. /* Take a copy to the rx buffer.. */
  285. memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
  286. complete(&xfer->done);
  287. }
  288. /**
  289. * ti_sci_get_one_xfer() - Allocate one message
  290. * @info: Pointer to SCI entity information
  291. * @msg_type: Message type
  292. * @msg_flags: Flag to set for the message
  293. * @tx_message_size: transmit message size
  294. * @rx_message_size: receive message size
  295. *
  296. * Helper function which is used by various command functions that are
  297. * exposed to clients of this driver for allocating a message traffic event.
  298. *
  299. * This function can sleep depending on pending requests already in the system
  300. * for the SCI entity. Further, this also holds a spinlock to maintain integrity
  301. * of internal data structures.
  302. *
  303. * Return: 0 if all went fine, else corresponding error.
  304. */
  305. static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
  306. u16 msg_type, u32 msg_flags,
  307. size_t tx_message_size,
  308. size_t rx_message_size)
  309. {
  310. struct ti_sci_xfers_info *minfo = &info->minfo;
  311. struct ti_sci_xfer *xfer;
  312. struct ti_sci_msg_hdr *hdr;
  313. unsigned long flags;
  314. unsigned long bit_pos;
  315. u8 xfer_id;
  316. int ret;
  317. int timeout;
  318. /* Ensure we have sane transfer sizes */
  319. if (rx_message_size > info->desc->max_msg_size ||
  320. tx_message_size > info->desc->max_msg_size ||
  321. rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
  322. return ERR_PTR(-ERANGE);
  323. /*
  324. * Ensure we have only controlled number of pending messages.
  325. * Ideally, we might just have to wait a single message, be
  326. * conservative and wait 5 times that..
  327. */
  328. timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
  329. ret = down_timeout(&minfo->sem_xfer_count, timeout);
  330. if (ret < 0)
  331. return ERR_PTR(ret);
  332. /* Keep the locked section as small as possible */
  333. spin_lock_irqsave(&minfo->xfer_lock, flags);
  334. bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
  335. info->desc->max_msgs);
  336. set_bit(bit_pos, minfo->xfer_alloc_table);
  337. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  338. /*
  339. * We already ensured in probe that we can have max messages that can
  340. * fit in hdr.seq - NOTE: this improves access latencies
  341. * to predictable O(1) access, BUT, it opens us to risk if
  342. * remote misbehaves with corrupted message sequence responses.
  343. * If that happens, we are going to be messed up anyways..
  344. */
  345. xfer_id = (u8)bit_pos;
  346. xfer = &minfo->xfer_block[xfer_id];
  347. hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  348. xfer->tx_message.len = tx_message_size;
  349. xfer->rx_len = (u8)rx_message_size;
  350. reinit_completion(&xfer->done);
  351. hdr->seq = xfer_id;
  352. hdr->type = msg_type;
  353. hdr->host = info->host_id;
  354. hdr->flags = msg_flags;
  355. return xfer;
  356. }
  357. /**
  358. * ti_sci_put_one_xfer() - Release a message
  359. * @minfo: transfer info pointer
  360. * @xfer: message that was reserved by ti_sci_get_one_xfer
  361. *
  362. * This holds a spinlock to maintain integrity of internal data structures.
  363. */
  364. static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
  365. struct ti_sci_xfer *xfer)
  366. {
  367. unsigned long flags;
  368. struct ti_sci_msg_hdr *hdr;
  369. u8 xfer_id;
  370. hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  371. xfer_id = hdr->seq;
  372. /*
  373. * Keep the locked section as small as possible
  374. * NOTE: we might escape with smp_mb and no lock here..
  375. * but just be conservative and symmetric.
  376. */
  377. spin_lock_irqsave(&minfo->xfer_lock, flags);
  378. clear_bit(xfer_id, minfo->xfer_alloc_table);
  379. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  380. /* Increment the count for the next user to get through */
  381. up(&minfo->sem_xfer_count);
  382. }
  383. /**
  384. * ti_sci_do_xfer() - Do one transfer
  385. * @info: Pointer to SCI entity information
  386. * @xfer: Transfer to initiate and wait for response
  387. *
  388. * Return: -ETIMEDOUT in case of no response, if transmit error,
  389. * return corresponding error, else if all goes well,
  390. * return 0.
  391. */
  392. static inline int ti_sci_do_xfer(struct ti_sci_info *info,
  393. struct ti_sci_xfer *xfer)
  394. {
  395. int ret;
  396. int timeout;
  397. struct device *dev = info->dev;
  398. ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
  399. if (ret < 0)
  400. return ret;
  401. ret = 0;
  402. /* And we wait for the response. */
  403. timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
  404. if (!wait_for_completion_timeout(&xfer->done, timeout)) {
  405. dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
  406. (void *)_RET_IP_);
  407. ret = -ETIMEDOUT;
  408. }
  409. /*
  410. * NOTE: we might prefer not to need the mailbox ticker to manage the
  411. * transfer queueing since the protocol layer queues things by itself.
  412. * Unfortunately, we have to kick the mailbox framework after we have
  413. * received our message.
  414. */
  415. mbox_client_txdone(info->chan_tx, ret);
  416. return ret;
  417. }
  418. /**
  419. * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
  420. * @info: Pointer to SCI entity information
  421. *
  422. * Updates the SCI information in the internal data structure.
  423. *
  424. * Return: 0 if all went fine, else return appropriate error.
  425. */
  426. static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
  427. {
  428. struct device *dev = info->dev;
  429. struct ti_sci_handle *handle = &info->handle;
  430. struct ti_sci_version_info *ver = &handle->version;
  431. struct ti_sci_msg_resp_version *rev_info;
  432. struct ti_sci_xfer *xfer;
  433. int ret;
  434. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
  435. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  436. sizeof(struct ti_sci_msg_hdr),
  437. sizeof(*rev_info));
  438. if (IS_ERR(xfer)) {
  439. ret = PTR_ERR(xfer);
  440. dev_err(dev, "Message alloc failed(%d)\n", ret);
  441. return ret;
  442. }
  443. rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
  444. ret = ti_sci_do_xfer(info, xfer);
  445. if (ret) {
  446. dev_err(dev, "Mbox send fail %d\n", ret);
  447. goto fail;
  448. }
  449. ver->abi_major = rev_info->abi_major;
  450. ver->abi_minor = rev_info->abi_minor;
  451. ver->firmware_revision = rev_info->firmware_revision;
  452. strncpy(ver->firmware_description, rev_info->firmware_description,
  453. sizeof(ver->firmware_description));
  454. fail:
  455. ti_sci_put_one_xfer(&info->minfo, xfer);
  456. return ret;
  457. }
  458. /**
  459. * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
  460. * @r: pointer to response buffer
  461. *
  462. * Return: true if the response was an ACK, else returns false.
  463. */
  464. static inline bool ti_sci_is_response_ack(void *r)
  465. {
  466. struct ti_sci_msg_hdr *hdr = r;
  467. return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
  468. }
  469. /**
  470. * ti_sci_set_device_state() - Set device state helper
  471. * @handle: pointer to TI SCI handle
  472. * @id: Device identifier
  473. * @flags: flags to setup for the device
  474. * @state: State to move the device to
  475. *
  476. * Return: 0 if all went well, else returns appropriate error value.
  477. */
  478. static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
  479. u32 id, u32 flags, u8 state)
  480. {
  481. struct ti_sci_info *info;
  482. struct ti_sci_msg_req_set_device_state *req;
  483. struct ti_sci_msg_hdr *resp;
  484. struct ti_sci_xfer *xfer;
  485. struct device *dev;
  486. int ret = 0;
  487. if (IS_ERR(handle))
  488. return PTR_ERR(handle);
  489. if (!handle)
  490. return -EINVAL;
  491. info = handle_to_ti_sci_info(handle);
  492. dev = info->dev;
  493. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
  494. flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  495. sizeof(*req), sizeof(*resp));
  496. if (IS_ERR(xfer)) {
  497. ret = PTR_ERR(xfer);
  498. dev_err(dev, "Message alloc failed(%d)\n", ret);
  499. return ret;
  500. }
  501. req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
  502. req->id = id;
  503. req->state = state;
  504. ret = ti_sci_do_xfer(info, xfer);
  505. if (ret) {
  506. dev_err(dev, "Mbox send fail %d\n", ret);
  507. goto fail;
  508. }
  509. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  510. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  511. fail:
  512. ti_sci_put_one_xfer(&info->minfo, xfer);
  513. return ret;
  514. }
  515. /**
  516. * ti_sci_get_device_state() - Get device state helper
  517. * @handle: Handle to the device
  518. * @id: Device Identifier
  519. * @clcnt: Pointer to Context Loss Count
  520. * @resets: pointer to resets
  521. * @p_state: pointer to p_state
  522. * @c_state: pointer to c_state
  523. *
  524. * Return: 0 if all went fine, else return appropriate error.
  525. */
  526. static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
  527. u32 id, u32 *clcnt, u32 *resets,
  528. u8 *p_state, u8 *c_state)
  529. {
  530. struct ti_sci_info *info;
  531. struct ti_sci_msg_req_get_device_state *req;
  532. struct ti_sci_msg_resp_get_device_state *resp;
  533. struct ti_sci_xfer *xfer;
  534. struct device *dev;
  535. int ret = 0;
  536. if (IS_ERR(handle))
  537. return PTR_ERR(handle);
  538. if (!handle)
  539. return -EINVAL;
  540. if (!clcnt && !resets && !p_state && !c_state)
  541. return -EINVAL;
  542. info = handle_to_ti_sci_info(handle);
  543. dev = info->dev;
  544. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
  545. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  546. sizeof(*req), sizeof(*resp));
  547. if (IS_ERR(xfer)) {
  548. ret = PTR_ERR(xfer);
  549. dev_err(dev, "Message alloc failed(%d)\n", ret);
  550. return ret;
  551. }
  552. req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
  553. req->id = id;
  554. ret = ti_sci_do_xfer(info, xfer);
  555. if (ret) {
  556. dev_err(dev, "Mbox send fail %d\n", ret);
  557. goto fail;
  558. }
  559. resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
  560. if (!ti_sci_is_response_ack(resp)) {
  561. ret = -ENODEV;
  562. goto fail;
  563. }
  564. if (clcnt)
  565. *clcnt = resp->context_loss_count;
  566. if (resets)
  567. *resets = resp->resets;
  568. if (p_state)
  569. *p_state = resp->programmed_state;
  570. if (c_state)
  571. *c_state = resp->current_state;
  572. fail:
  573. ti_sci_put_one_xfer(&info->minfo, xfer);
  574. return ret;
  575. }
  576. /**
  577. * ti_sci_cmd_get_device() - command to request for device managed by TISCI
  578. * that can be shared with other hosts.
  579. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  580. * @id: Device Identifier
  581. *
  582. * Request for the device - NOTE: the client MUST maintain integrity of
  583. * usage count by balancing get_device with put_device. No refcounting is
  584. * managed by driver for that purpose.
  585. *
  586. * Return: 0 if all went fine, else return appropriate error.
  587. */
  588. static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
  589. {
  590. return ti_sci_set_device_state(handle, id, 0,
  591. MSG_DEVICE_SW_STATE_ON);
  592. }
  593. /**
  594. * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
  595. * TISCI that is exclusively owned by the
  596. * requesting host.
  597. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  598. * @id: Device Identifier
  599. *
  600. * Request for the device - NOTE: the client MUST maintain integrity of
  601. * usage count by balancing get_device with put_device. No refcounting is
  602. * managed by driver for that purpose.
  603. *
  604. * Return: 0 if all went fine, else return appropriate error.
  605. */
  606. static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
  607. u32 id)
  608. {
  609. return ti_sci_set_device_state(handle, id,
  610. MSG_FLAG_DEVICE_EXCLUSIVE,
  611. MSG_DEVICE_SW_STATE_ON);
  612. }
  613. /**
  614. * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
  615. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  616. * @id: Device Identifier
  617. *
  618. * Request for the device - NOTE: the client MUST maintain integrity of
  619. * usage count by balancing get_device with put_device. No refcounting is
  620. * managed by driver for that purpose.
  621. *
  622. * Return: 0 if all went fine, else return appropriate error.
  623. */
  624. static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
  625. {
  626. return ti_sci_set_device_state(handle, id, 0,
  627. MSG_DEVICE_SW_STATE_RETENTION);
  628. }
  629. /**
  630. * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
  631. * TISCI that is exclusively owned by
  632. * requesting host.
  633. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  634. * @id: Device Identifier
  635. *
  636. * Request for the device - NOTE: the client MUST maintain integrity of
  637. * usage count by balancing get_device with put_device. No refcounting is
  638. * managed by driver for that purpose.
  639. *
  640. * Return: 0 if all went fine, else return appropriate error.
  641. */
  642. static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
  643. u32 id)
  644. {
  645. return ti_sci_set_device_state(handle, id,
  646. MSG_FLAG_DEVICE_EXCLUSIVE,
  647. MSG_DEVICE_SW_STATE_RETENTION);
  648. }
  649. /**
  650. * ti_sci_cmd_put_device() - command to release a device managed by TISCI
  651. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  652. * @id: Device Identifier
  653. *
  654. * Request for the device - NOTE: the client MUST maintain integrity of
  655. * usage count by balancing get_device with put_device. No refcounting is
  656. * managed by driver for that purpose.
  657. *
  658. * Return: 0 if all went fine, else return appropriate error.
  659. */
  660. static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
  661. {
  662. return ti_sci_set_device_state(handle, id,
  663. 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
  664. }
  665. /**
  666. * ti_sci_cmd_dev_is_valid() - Is the device valid
  667. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  668. * @id: Device Identifier
  669. *
  670. * Return: 0 if all went fine and the device ID is valid, else return
  671. * appropriate error.
  672. */
  673. static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
  674. {
  675. u8 unused;
  676. /* check the device state which will also tell us if the ID is valid */
  677. return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
  678. }
  679. /**
  680. * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
  681. * @handle: Pointer to TISCI handle
  682. * @id: Device Identifier
  683. * @count: Pointer to Context Loss counter to populate
  684. *
  685. * Return: 0 if all went fine, else return appropriate error.
  686. */
  687. static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
  688. u32 *count)
  689. {
  690. return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
  691. }
  692. /**
  693. * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
  694. * @handle: Pointer to TISCI handle
  695. * @id: Device Identifier
  696. * @r_state: true if requested to be idle
  697. *
  698. * Return: 0 if all went fine, else return appropriate error.
  699. */
  700. static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
  701. bool *r_state)
  702. {
  703. int ret;
  704. u8 state;
  705. if (!r_state)
  706. return -EINVAL;
  707. ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
  708. if (ret)
  709. return ret;
  710. *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
  711. return 0;
  712. }
  713. /**
  714. * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
  715. * @handle: Pointer to TISCI handle
  716. * @id: Device Identifier
  717. * @r_state: true if requested to be stopped
  718. * @curr_state: true if currently stopped.
  719. *
  720. * Return: 0 if all went fine, else return appropriate error.
  721. */
  722. static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
  723. bool *r_state, bool *curr_state)
  724. {
  725. int ret;
  726. u8 p_state, c_state;
  727. if (!r_state && !curr_state)
  728. return -EINVAL;
  729. ret =
  730. ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
  731. if (ret)
  732. return ret;
  733. if (r_state)
  734. *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
  735. if (curr_state)
  736. *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
  737. return 0;
  738. }
  739. /**
  740. * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
  741. * @handle: Pointer to TISCI handle
  742. * @id: Device Identifier
  743. * @r_state: true if requested to be ON
  744. * @curr_state: true if currently ON and active
  745. *
  746. * Return: 0 if all went fine, else return appropriate error.
  747. */
  748. static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
  749. bool *r_state, bool *curr_state)
  750. {
  751. int ret;
  752. u8 p_state, c_state;
  753. if (!r_state && !curr_state)
  754. return -EINVAL;
  755. ret =
  756. ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
  757. if (ret)
  758. return ret;
  759. if (r_state)
  760. *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
  761. if (curr_state)
  762. *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
  763. return 0;
  764. }
  765. /**
  766. * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
  767. * @handle: Pointer to TISCI handle
  768. * @id: Device Identifier
  769. * @curr_state: true if currently transitioning.
  770. *
  771. * Return: 0 if all went fine, else return appropriate error.
  772. */
  773. static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
  774. bool *curr_state)
  775. {
  776. int ret;
  777. u8 state;
  778. if (!curr_state)
  779. return -EINVAL;
  780. ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
  781. if (ret)
  782. return ret;
  783. *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
  784. return 0;
  785. }
  786. /**
  787. * ti_sci_cmd_set_device_resets() - command to set resets for device managed
  788. * by TISCI
  789. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  790. * @id: Device Identifier
  791. * @reset_state: Device specific reset bit field
  792. *
  793. * Return: 0 if all went fine, else return appropriate error.
  794. */
  795. static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
  796. u32 id, u32 reset_state)
  797. {
  798. struct ti_sci_info *info;
  799. struct ti_sci_msg_req_set_device_resets *req;
  800. struct ti_sci_msg_hdr *resp;
  801. struct ti_sci_xfer *xfer;
  802. struct device *dev;
  803. int ret = 0;
  804. if (IS_ERR(handle))
  805. return PTR_ERR(handle);
  806. if (!handle)
  807. return -EINVAL;
  808. info = handle_to_ti_sci_info(handle);
  809. dev = info->dev;
  810. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
  811. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  812. sizeof(*req), sizeof(*resp));
  813. if (IS_ERR(xfer)) {
  814. ret = PTR_ERR(xfer);
  815. dev_err(dev, "Message alloc failed(%d)\n", ret);
  816. return ret;
  817. }
  818. req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
  819. req->id = id;
  820. req->resets = reset_state;
  821. ret = ti_sci_do_xfer(info, xfer);
  822. if (ret) {
  823. dev_err(dev, "Mbox send fail %d\n", ret);
  824. goto fail;
  825. }
  826. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  827. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  828. fail:
  829. ti_sci_put_one_xfer(&info->minfo, xfer);
  830. return ret;
  831. }
  832. /**
  833. * ti_sci_cmd_get_device_resets() - Get reset state for device managed
  834. * by TISCI
  835. * @handle: Pointer to TISCI handle
  836. * @id: Device Identifier
  837. * @reset_state: Pointer to reset state to populate
  838. *
  839. * Return: 0 if all went fine, else return appropriate error.
  840. */
  841. static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
  842. u32 id, u32 *reset_state)
  843. {
  844. return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
  845. NULL);
  846. }
  847. /**
  848. * ti_sci_set_clock_state() - Set clock state helper
  849. * @handle: pointer to TI SCI handle
  850. * @dev_id: Device identifier this request is for
  851. * @clk_id: Clock identifier for the device for this request.
  852. * Each device has it's own set of clock inputs. This indexes
  853. * which clock input to modify.
  854. * @flags: Header flags as needed
  855. * @state: State to request for the clock.
  856. *
  857. * Return: 0 if all went well, else returns appropriate error value.
  858. */
  859. static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
  860. u32 dev_id, u32 clk_id,
  861. u32 flags, u8 state)
  862. {
  863. struct ti_sci_info *info;
  864. struct ti_sci_msg_req_set_clock_state *req;
  865. struct ti_sci_msg_hdr *resp;
  866. struct ti_sci_xfer *xfer;
  867. struct device *dev;
  868. int ret = 0;
  869. if (IS_ERR(handle))
  870. return PTR_ERR(handle);
  871. if (!handle)
  872. return -EINVAL;
  873. info = handle_to_ti_sci_info(handle);
  874. dev = info->dev;
  875. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
  876. flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  877. sizeof(*req), sizeof(*resp));
  878. if (IS_ERR(xfer)) {
  879. ret = PTR_ERR(xfer);
  880. dev_err(dev, "Message alloc failed(%d)\n", ret);
  881. return ret;
  882. }
  883. req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
  884. req->dev_id = dev_id;
  885. if (clk_id < 255) {
  886. req->clk_id = clk_id;
  887. } else {
  888. req->clk_id = 255;
  889. req->clk_id_32 = clk_id;
  890. }
  891. req->request_state = state;
  892. ret = ti_sci_do_xfer(info, xfer);
  893. if (ret) {
  894. dev_err(dev, "Mbox send fail %d\n", ret);
  895. goto fail;
  896. }
  897. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  898. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  899. fail:
  900. ti_sci_put_one_xfer(&info->minfo, xfer);
  901. return ret;
  902. }
  903. /**
  904. * ti_sci_cmd_get_clock_state() - Get clock state helper
  905. * @handle: pointer to TI SCI handle
  906. * @dev_id: Device identifier this request is for
  907. * @clk_id: Clock identifier for the device for this request.
  908. * Each device has it's own set of clock inputs. This indexes
  909. * which clock input to modify.
  910. * @programmed_state: State requested for clock to move to
  911. * @current_state: State that the clock is currently in
  912. *
  913. * Return: 0 if all went well, else returns appropriate error value.
  914. */
  915. static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
  916. u32 dev_id, u32 clk_id,
  917. u8 *programmed_state, u8 *current_state)
  918. {
  919. struct ti_sci_info *info;
  920. struct ti_sci_msg_req_get_clock_state *req;
  921. struct ti_sci_msg_resp_get_clock_state *resp;
  922. struct ti_sci_xfer *xfer;
  923. struct device *dev;
  924. int ret = 0;
  925. if (IS_ERR(handle))
  926. return PTR_ERR(handle);
  927. if (!handle)
  928. return -EINVAL;
  929. if (!programmed_state && !current_state)
  930. return -EINVAL;
  931. info = handle_to_ti_sci_info(handle);
  932. dev = info->dev;
  933. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
  934. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  935. sizeof(*req), sizeof(*resp));
  936. if (IS_ERR(xfer)) {
  937. ret = PTR_ERR(xfer);
  938. dev_err(dev, "Message alloc failed(%d)\n", ret);
  939. return ret;
  940. }
  941. req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
  942. req->dev_id = dev_id;
  943. if (clk_id < 255) {
  944. req->clk_id = clk_id;
  945. } else {
  946. req->clk_id = 255;
  947. req->clk_id_32 = clk_id;
  948. }
  949. ret = ti_sci_do_xfer(info, xfer);
  950. if (ret) {
  951. dev_err(dev, "Mbox send fail %d\n", ret);
  952. goto fail;
  953. }
  954. resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
  955. if (!ti_sci_is_response_ack(resp)) {
  956. ret = -ENODEV;
  957. goto fail;
  958. }
  959. if (programmed_state)
  960. *programmed_state = resp->programmed_state;
  961. if (current_state)
  962. *current_state = resp->current_state;
  963. fail:
  964. ti_sci_put_one_xfer(&info->minfo, xfer);
  965. return ret;
  966. }
  967. /**
  968. * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
  969. * @handle: pointer to TI SCI handle
  970. * @dev_id: Device identifier this request is for
  971. * @clk_id: Clock identifier for the device for this request.
  972. * Each device has it's own set of clock inputs. This indexes
  973. * which clock input to modify.
  974. * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
  975. * @can_change_freq: 'true' if frequency change is desired, else 'false'
  976. * @enable_input_term: 'true' if input termination is desired, else 'false'
  977. *
  978. * Return: 0 if all went well, else returns appropriate error value.
  979. */
  980. static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
  981. u32 clk_id, bool needs_ssc,
  982. bool can_change_freq, bool enable_input_term)
  983. {
  984. u32 flags = 0;
  985. flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
  986. flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
  987. flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
  988. return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
  989. MSG_CLOCK_SW_STATE_REQ);
  990. }
  991. /**
  992. * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
  993. * @handle: pointer to TI SCI handle
  994. * @dev_id: Device identifier this request is for
  995. * @clk_id: Clock identifier for the device for this request.
  996. * Each device has it's own set of clock inputs. This indexes
  997. * which clock input to modify.
  998. *
  999. * NOTE: This clock must have been requested by get_clock previously.
  1000. *
  1001. * Return: 0 if all went well, else returns appropriate error value.
  1002. */
  1003. static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
  1004. u32 dev_id, u32 clk_id)
  1005. {
  1006. return ti_sci_set_clock_state(handle, dev_id, clk_id,
  1007. MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
  1008. MSG_CLOCK_SW_STATE_UNREQ);
  1009. }
  1010. /**
  1011. * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
  1012. * @handle: pointer to TI SCI handle
  1013. * @dev_id: Device identifier this request is for
  1014. * @clk_id: Clock identifier for the device for this request.
  1015. * Each device has it's own set of clock inputs. This indexes
  1016. * which clock input to modify.
  1017. *
  1018. * NOTE: This clock must have been requested by get_clock previously.
  1019. *
  1020. * Return: 0 if all went well, else returns appropriate error value.
  1021. */
  1022. static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
  1023. u32 dev_id, u32 clk_id)
  1024. {
  1025. return ti_sci_set_clock_state(handle, dev_id, clk_id,
  1026. MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
  1027. MSG_CLOCK_SW_STATE_AUTO);
  1028. }
  1029. /**
  1030. * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
  1031. * @handle: pointer to TI SCI handle
  1032. * @dev_id: Device identifier this request is for
  1033. * @clk_id: Clock identifier for the device for this request.
  1034. * Each device has it's own set of clock inputs. This indexes
  1035. * which clock input to modify.
  1036. * @req_state: state indicating if the clock is auto managed
  1037. *
  1038. * Return: 0 if all went well, else returns appropriate error value.
  1039. */
  1040. static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
  1041. u32 dev_id, u32 clk_id, bool *req_state)
  1042. {
  1043. u8 state = 0;
  1044. int ret;
  1045. if (!req_state)
  1046. return -EINVAL;
  1047. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
  1048. if (ret)
  1049. return ret;
  1050. *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
  1051. return 0;
  1052. }
  1053. /**
  1054. * ti_sci_cmd_clk_is_on() - Is the clock ON
  1055. * @handle: pointer to TI SCI handle
  1056. * @dev_id: Device identifier this request is for
  1057. * @clk_id: Clock identifier for the device for this request.
  1058. * Each device has it's own set of clock inputs. This indexes
  1059. * which clock input to modify.
  1060. * @req_state: state indicating if the clock is managed by us and enabled
  1061. * @curr_state: state indicating if the clock is ready for operation
  1062. *
  1063. * Return: 0 if all went well, else returns appropriate error value.
  1064. */
  1065. static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
  1066. u32 clk_id, bool *req_state, bool *curr_state)
  1067. {
  1068. u8 c_state = 0, r_state = 0;
  1069. int ret;
  1070. if (!req_state && !curr_state)
  1071. return -EINVAL;
  1072. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
  1073. &r_state, &c_state);
  1074. if (ret)
  1075. return ret;
  1076. if (req_state)
  1077. *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
  1078. if (curr_state)
  1079. *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
  1080. return 0;
  1081. }
  1082. /**
  1083. * ti_sci_cmd_clk_is_off() - Is the clock OFF
  1084. * @handle: pointer to TI SCI handle
  1085. * @dev_id: Device identifier this request is for
  1086. * @clk_id: Clock identifier for the device for this request.
  1087. * Each device has it's own set of clock inputs. This indexes
  1088. * which clock input to modify.
  1089. * @req_state: state indicating if the clock is managed by us and disabled
  1090. * @curr_state: state indicating if the clock is NOT ready for operation
  1091. *
  1092. * Return: 0 if all went well, else returns appropriate error value.
  1093. */
  1094. static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
  1095. u32 clk_id, bool *req_state, bool *curr_state)
  1096. {
  1097. u8 c_state = 0, r_state = 0;
  1098. int ret;
  1099. if (!req_state && !curr_state)
  1100. return -EINVAL;
  1101. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
  1102. &r_state, &c_state);
  1103. if (ret)
  1104. return ret;
  1105. if (req_state)
  1106. *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
  1107. if (curr_state)
  1108. *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
  1109. return 0;
  1110. }
  1111. /**
  1112. * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
  1113. * @handle: pointer to TI SCI handle
  1114. * @dev_id: Device identifier this request is for
  1115. * @clk_id: Clock identifier for the device for this request.
  1116. * Each device has it's own set of clock inputs. This indexes
  1117. * which clock input to modify.
  1118. * @parent_id: Parent clock identifier to set
  1119. *
  1120. * Return: 0 if all went well, else returns appropriate error value.
  1121. */
  1122. static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
  1123. u32 dev_id, u32 clk_id, u32 parent_id)
  1124. {
  1125. struct ti_sci_info *info;
  1126. struct ti_sci_msg_req_set_clock_parent *req;
  1127. struct ti_sci_msg_hdr *resp;
  1128. struct ti_sci_xfer *xfer;
  1129. struct device *dev;
  1130. int ret = 0;
  1131. if (IS_ERR(handle))
  1132. return PTR_ERR(handle);
  1133. if (!handle)
  1134. return -EINVAL;
  1135. info = handle_to_ti_sci_info(handle);
  1136. dev = info->dev;
  1137. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
  1138. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1139. sizeof(*req), sizeof(*resp));
  1140. if (IS_ERR(xfer)) {
  1141. ret = PTR_ERR(xfer);
  1142. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1143. return ret;
  1144. }
  1145. req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
  1146. req->dev_id = dev_id;
  1147. if (clk_id < 255) {
  1148. req->clk_id = clk_id;
  1149. } else {
  1150. req->clk_id = 255;
  1151. req->clk_id_32 = clk_id;
  1152. }
  1153. if (parent_id < 255) {
  1154. req->parent_id = parent_id;
  1155. } else {
  1156. req->parent_id = 255;
  1157. req->parent_id_32 = parent_id;
  1158. }
  1159. ret = ti_sci_do_xfer(info, xfer);
  1160. if (ret) {
  1161. dev_err(dev, "Mbox send fail %d\n", ret);
  1162. goto fail;
  1163. }
  1164. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1165. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  1166. fail:
  1167. ti_sci_put_one_xfer(&info->minfo, xfer);
  1168. return ret;
  1169. }
  1170. /**
  1171. * ti_sci_cmd_clk_get_parent() - Get current parent clock source
  1172. * @handle: pointer to TI SCI handle
  1173. * @dev_id: Device identifier this request is for
  1174. * @clk_id: Clock identifier for the device for this request.
  1175. * Each device has it's own set of clock inputs. This indexes
  1176. * which clock input to modify.
  1177. * @parent_id: Current clock parent
  1178. *
  1179. * Return: 0 if all went well, else returns appropriate error value.
  1180. */
  1181. static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
  1182. u32 dev_id, u32 clk_id, u32 *parent_id)
  1183. {
  1184. struct ti_sci_info *info;
  1185. struct ti_sci_msg_req_get_clock_parent *req;
  1186. struct ti_sci_msg_resp_get_clock_parent *resp;
  1187. struct ti_sci_xfer *xfer;
  1188. struct device *dev;
  1189. int ret = 0;
  1190. if (IS_ERR(handle))
  1191. return PTR_ERR(handle);
  1192. if (!handle || !parent_id)
  1193. return -EINVAL;
  1194. info = handle_to_ti_sci_info(handle);
  1195. dev = info->dev;
  1196. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
  1197. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1198. sizeof(*req), sizeof(*resp));
  1199. if (IS_ERR(xfer)) {
  1200. ret = PTR_ERR(xfer);
  1201. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1202. return ret;
  1203. }
  1204. req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
  1205. req->dev_id = dev_id;
  1206. if (clk_id < 255) {
  1207. req->clk_id = clk_id;
  1208. } else {
  1209. req->clk_id = 255;
  1210. req->clk_id_32 = clk_id;
  1211. }
  1212. ret = ti_sci_do_xfer(info, xfer);
  1213. if (ret) {
  1214. dev_err(dev, "Mbox send fail %d\n", ret);
  1215. goto fail;
  1216. }
  1217. resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
  1218. if (!ti_sci_is_response_ack(resp)) {
  1219. ret = -ENODEV;
  1220. } else {
  1221. if (resp->parent_id < 255)
  1222. *parent_id = resp->parent_id;
  1223. else
  1224. *parent_id = resp->parent_id_32;
  1225. }
  1226. fail:
  1227. ti_sci_put_one_xfer(&info->minfo, xfer);
  1228. return ret;
  1229. }
  1230. /**
  1231. * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
  1232. * @handle: pointer to TI SCI handle
  1233. * @dev_id: Device identifier this request is for
  1234. * @clk_id: Clock identifier for the device for this request.
  1235. * Each device has it's own set of clock inputs. This indexes
  1236. * which clock input to modify.
  1237. * @num_parents: Returns he number of parents to the current clock.
  1238. *
  1239. * Return: 0 if all went well, else returns appropriate error value.
  1240. */
  1241. static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
  1242. u32 dev_id, u32 clk_id,
  1243. u32 *num_parents)
  1244. {
  1245. struct ti_sci_info *info;
  1246. struct ti_sci_msg_req_get_clock_num_parents *req;
  1247. struct ti_sci_msg_resp_get_clock_num_parents *resp;
  1248. struct ti_sci_xfer *xfer;
  1249. struct device *dev;
  1250. int ret = 0;
  1251. if (IS_ERR(handle))
  1252. return PTR_ERR(handle);
  1253. if (!handle || !num_parents)
  1254. return -EINVAL;
  1255. info = handle_to_ti_sci_info(handle);
  1256. dev = info->dev;
  1257. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
  1258. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1259. sizeof(*req), sizeof(*resp));
  1260. if (IS_ERR(xfer)) {
  1261. ret = PTR_ERR(xfer);
  1262. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1263. return ret;
  1264. }
  1265. req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
  1266. req->dev_id = dev_id;
  1267. if (clk_id < 255) {
  1268. req->clk_id = clk_id;
  1269. } else {
  1270. req->clk_id = 255;
  1271. req->clk_id_32 = clk_id;
  1272. }
  1273. ret = ti_sci_do_xfer(info, xfer);
  1274. if (ret) {
  1275. dev_err(dev, "Mbox send fail %d\n", ret);
  1276. goto fail;
  1277. }
  1278. resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
  1279. if (!ti_sci_is_response_ack(resp)) {
  1280. ret = -ENODEV;
  1281. } else {
  1282. if (resp->num_parents < 255)
  1283. *num_parents = resp->num_parents;
  1284. else
  1285. *num_parents = resp->num_parents_32;
  1286. }
  1287. fail:
  1288. ti_sci_put_one_xfer(&info->minfo, xfer);
  1289. return ret;
  1290. }
  1291. /**
  1292. * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
  1293. * @handle: pointer to TI SCI handle
  1294. * @dev_id: Device identifier this request is for
  1295. * @clk_id: Clock identifier for the device for this request.
  1296. * Each device has it's own set of clock inputs. This indexes
  1297. * which clock input to modify.
  1298. * @min_freq: The minimum allowable frequency in Hz. This is the minimum
  1299. * allowable programmed frequency and does not account for clock
  1300. * tolerances and jitter.
  1301. * @target_freq: The target clock frequency in Hz. A frequency will be
  1302. * processed as close to this target frequency as possible.
  1303. * @max_freq: The maximum allowable frequency in Hz. This is the maximum
  1304. * allowable programmed frequency and does not account for clock
  1305. * tolerances and jitter.
  1306. * @match_freq: Frequency match in Hz response.
  1307. *
  1308. * Return: 0 if all went well, else returns appropriate error value.
  1309. */
  1310. static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
  1311. u32 dev_id, u32 clk_id, u64 min_freq,
  1312. u64 target_freq, u64 max_freq,
  1313. u64 *match_freq)
  1314. {
  1315. struct ti_sci_info *info;
  1316. struct ti_sci_msg_req_query_clock_freq *req;
  1317. struct ti_sci_msg_resp_query_clock_freq *resp;
  1318. struct ti_sci_xfer *xfer;
  1319. struct device *dev;
  1320. int ret = 0;
  1321. if (IS_ERR(handle))
  1322. return PTR_ERR(handle);
  1323. if (!handle || !match_freq)
  1324. return -EINVAL;
  1325. info = handle_to_ti_sci_info(handle);
  1326. dev = info->dev;
  1327. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
  1328. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1329. sizeof(*req), sizeof(*resp));
  1330. if (IS_ERR(xfer)) {
  1331. ret = PTR_ERR(xfer);
  1332. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1333. return ret;
  1334. }
  1335. req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
  1336. req->dev_id = dev_id;
  1337. if (clk_id < 255) {
  1338. req->clk_id = clk_id;
  1339. } else {
  1340. req->clk_id = 255;
  1341. req->clk_id_32 = clk_id;
  1342. }
  1343. req->min_freq_hz = min_freq;
  1344. req->target_freq_hz = target_freq;
  1345. req->max_freq_hz = max_freq;
  1346. ret = ti_sci_do_xfer(info, xfer);
  1347. if (ret) {
  1348. dev_err(dev, "Mbox send fail %d\n", ret);
  1349. goto fail;
  1350. }
  1351. resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
  1352. if (!ti_sci_is_response_ack(resp))
  1353. ret = -ENODEV;
  1354. else
  1355. *match_freq = resp->freq_hz;
  1356. fail:
  1357. ti_sci_put_one_xfer(&info->minfo, xfer);
  1358. return ret;
  1359. }
  1360. /**
  1361. * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
  1362. * @handle: pointer to TI SCI handle
  1363. * @dev_id: Device identifier this request is for
  1364. * @clk_id: Clock identifier for the device for this request.
  1365. * Each device has it's own set of clock inputs. This indexes
  1366. * which clock input to modify.
  1367. * @min_freq: The minimum allowable frequency in Hz. This is the minimum
  1368. * allowable programmed frequency and does not account for clock
  1369. * tolerances and jitter.
  1370. * @target_freq: The target clock frequency in Hz. A frequency will be
  1371. * processed as close to this target frequency as possible.
  1372. * @max_freq: The maximum allowable frequency in Hz. This is the maximum
  1373. * allowable programmed frequency and does not account for clock
  1374. * tolerances and jitter.
  1375. *
  1376. * Return: 0 if all went well, else returns appropriate error value.
  1377. */
  1378. static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
  1379. u32 dev_id, u32 clk_id, u64 min_freq,
  1380. u64 target_freq, u64 max_freq)
  1381. {
  1382. struct ti_sci_info *info;
  1383. struct ti_sci_msg_req_set_clock_freq *req;
  1384. struct ti_sci_msg_hdr *resp;
  1385. struct ti_sci_xfer *xfer;
  1386. struct device *dev;
  1387. int ret = 0;
  1388. if (IS_ERR(handle))
  1389. return PTR_ERR(handle);
  1390. if (!handle)
  1391. return -EINVAL;
  1392. info = handle_to_ti_sci_info(handle);
  1393. dev = info->dev;
  1394. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
  1395. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1396. sizeof(*req), sizeof(*resp));
  1397. if (IS_ERR(xfer)) {
  1398. ret = PTR_ERR(xfer);
  1399. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1400. return ret;
  1401. }
  1402. req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
  1403. req->dev_id = dev_id;
  1404. if (clk_id < 255) {
  1405. req->clk_id = clk_id;
  1406. } else {
  1407. req->clk_id = 255;
  1408. req->clk_id_32 = clk_id;
  1409. }
  1410. req->min_freq_hz = min_freq;
  1411. req->target_freq_hz = target_freq;
  1412. req->max_freq_hz = max_freq;
  1413. ret = ti_sci_do_xfer(info, xfer);
  1414. if (ret) {
  1415. dev_err(dev, "Mbox send fail %d\n", ret);
  1416. goto fail;
  1417. }
  1418. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1419. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  1420. fail:
  1421. ti_sci_put_one_xfer(&info->minfo, xfer);
  1422. return ret;
  1423. }
  1424. /**
  1425. * ti_sci_cmd_clk_get_freq() - Get current frequency
  1426. * @handle: pointer to TI SCI handle
  1427. * @dev_id: Device identifier this request is for
  1428. * @clk_id: Clock identifier for the device for this request.
  1429. * Each device has it's own set of clock inputs. This indexes
  1430. * which clock input to modify.
  1431. * @freq: Currently frequency in Hz
  1432. *
  1433. * Return: 0 if all went well, else returns appropriate error value.
  1434. */
  1435. static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
  1436. u32 dev_id, u32 clk_id, u64 *freq)
  1437. {
  1438. struct ti_sci_info *info;
  1439. struct ti_sci_msg_req_get_clock_freq *req;
  1440. struct ti_sci_msg_resp_get_clock_freq *resp;
  1441. struct ti_sci_xfer *xfer;
  1442. struct device *dev;
  1443. int ret = 0;
  1444. if (IS_ERR(handle))
  1445. return PTR_ERR(handle);
  1446. if (!handle || !freq)
  1447. return -EINVAL;
  1448. info = handle_to_ti_sci_info(handle);
  1449. dev = info->dev;
  1450. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
  1451. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1452. sizeof(*req), sizeof(*resp));
  1453. if (IS_ERR(xfer)) {
  1454. ret = PTR_ERR(xfer);
  1455. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1456. return ret;
  1457. }
  1458. req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
  1459. req->dev_id = dev_id;
  1460. if (clk_id < 255) {
  1461. req->clk_id = clk_id;
  1462. } else {
  1463. req->clk_id = 255;
  1464. req->clk_id_32 = clk_id;
  1465. }
  1466. ret = ti_sci_do_xfer(info, xfer);
  1467. if (ret) {
  1468. dev_err(dev, "Mbox send fail %d\n", ret);
  1469. goto fail;
  1470. }
  1471. resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
  1472. if (!ti_sci_is_response_ack(resp))
  1473. ret = -ENODEV;
  1474. else
  1475. *freq = resp->freq_hz;
  1476. fail:
  1477. ti_sci_put_one_xfer(&info->minfo, xfer);
  1478. return ret;
  1479. }
  1480. static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
  1481. {
  1482. struct ti_sci_info *info;
  1483. struct ti_sci_msg_req_reboot *req;
  1484. struct ti_sci_msg_hdr *resp;
  1485. struct ti_sci_xfer *xfer;
  1486. struct device *dev;
  1487. int ret = 0;
  1488. if (IS_ERR(handle))
  1489. return PTR_ERR(handle);
  1490. if (!handle)
  1491. return -EINVAL;
  1492. info = handle_to_ti_sci_info(handle);
  1493. dev = info->dev;
  1494. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
  1495. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1496. sizeof(*req), sizeof(*resp));
  1497. if (IS_ERR(xfer)) {
  1498. ret = PTR_ERR(xfer);
  1499. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1500. return ret;
  1501. }
  1502. req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
  1503. ret = ti_sci_do_xfer(info, xfer);
  1504. if (ret) {
  1505. dev_err(dev, "Mbox send fail %d\n", ret);
  1506. goto fail;
  1507. }
  1508. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1509. if (!ti_sci_is_response_ack(resp))
  1510. ret = -ENODEV;
  1511. else
  1512. ret = 0;
  1513. fail:
  1514. ti_sci_put_one_xfer(&info->minfo, xfer);
  1515. return ret;
  1516. }
  1517. static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
  1518. u16 *type)
  1519. {
  1520. struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
  1521. bool found = false;
  1522. int i;
  1523. /* If map is not provided then assume dev_id is used as type */
  1524. if (!rm_type_map) {
  1525. *type = dev_id;
  1526. return 0;
  1527. }
  1528. for (i = 0; rm_type_map[i].dev_id; i++) {
  1529. if (rm_type_map[i].dev_id == dev_id) {
  1530. *type = rm_type_map[i].type;
  1531. found = true;
  1532. break;
  1533. }
  1534. }
  1535. if (!found)
  1536. return -EINVAL;
  1537. return 0;
  1538. }
  1539. /**
  1540. * ti_sci_get_resource_range - Helper to get a range of resources assigned
  1541. * to a host. Resource is uniquely identified by
  1542. * type and subtype.
  1543. * @handle: Pointer to TISCI handle.
  1544. * @dev_id: TISCI device ID.
  1545. * @subtype: Resource assignment subtype that is being requested
  1546. * from the given device.
  1547. * @s_host: Host processor ID to which the resources are allocated
  1548. * @range_start: Start index of the resource range
  1549. * @range_num: Number of resources in the range
  1550. *
  1551. * Return: 0 if all went fine, else return appropriate error.
  1552. */
  1553. static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
  1554. u32 dev_id, u8 subtype, u8 s_host,
  1555. u16 *range_start, u16 *range_num)
  1556. {
  1557. struct ti_sci_msg_resp_get_resource_range *resp;
  1558. struct ti_sci_msg_req_get_resource_range *req;
  1559. struct ti_sci_xfer *xfer;
  1560. struct ti_sci_info *info;
  1561. struct device *dev;
  1562. u16 type;
  1563. int ret = 0;
  1564. if (IS_ERR(handle))
  1565. return PTR_ERR(handle);
  1566. if (!handle)
  1567. return -EINVAL;
  1568. info = handle_to_ti_sci_info(handle);
  1569. dev = info->dev;
  1570. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
  1571. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1572. sizeof(*req), sizeof(*resp));
  1573. if (IS_ERR(xfer)) {
  1574. ret = PTR_ERR(xfer);
  1575. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1576. return ret;
  1577. }
  1578. ret = ti_sci_get_resource_type(info, dev_id, &type);
  1579. if (ret) {
  1580. dev_err(dev, "rm type lookup failed for %u\n", dev_id);
  1581. goto fail;
  1582. }
  1583. req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
  1584. req->secondary_host = s_host;
  1585. req->type = type & MSG_RM_RESOURCE_TYPE_MASK;
  1586. req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
  1587. ret = ti_sci_do_xfer(info, xfer);
  1588. if (ret) {
  1589. dev_err(dev, "Mbox send fail %d\n", ret);
  1590. goto fail;
  1591. }
  1592. resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
  1593. if (!ti_sci_is_response_ack(resp)) {
  1594. ret = -ENODEV;
  1595. } else if (!resp->range_start && !resp->range_num) {
  1596. ret = -ENODEV;
  1597. } else {
  1598. *range_start = resp->range_start;
  1599. *range_num = resp->range_num;
  1600. };
  1601. fail:
  1602. ti_sci_put_one_xfer(&info->minfo, xfer);
  1603. return ret;
  1604. }
  1605. /**
  1606. * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
  1607. * that is same as ti sci interface host.
  1608. * @handle: Pointer to TISCI handle.
  1609. * @dev_id: TISCI device ID.
  1610. * @subtype: Resource assignment subtype that is being requested
  1611. * from the given device.
  1612. * @range_start: Start index of the resource range
  1613. * @range_num: Number of resources in the range
  1614. *
  1615. * Return: 0 if all went fine, else return appropriate error.
  1616. */
  1617. static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
  1618. u32 dev_id, u8 subtype,
  1619. u16 *range_start, u16 *range_num)
  1620. {
  1621. return ti_sci_get_resource_range(handle, dev_id, subtype,
  1622. TI_SCI_IRQ_SECONDARY_HOST_INVALID,
  1623. range_start, range_num);
  1624. }
  1625. /**
  1626. * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
  1627. * assigned to a specified host.
  1628. * @handle: Pointer to TISCI handle.
  1629. * @dev_id: TISCI device ID.
  1630. * @subtype: Resource assignment subtype that is being requested
  1631. * from the given device.
  1632. * @s_host: Host processor ID to which the resources are allocated
  1633. * @range_start: Start index of the resource range
  1634. * @range_num: Number of resources in the range
  1635. *
  1636. * Return: 0 if all went fine, else return appropriate error.
  1637. */
  1638. static
  1639. int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
  1640. u32 dev_id, u8 subtype, u8 s_host,
  1641. u16 *range_start, u16 *range_num)
  1642. {
  1643. return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
  1644. range_start, range_num);
  1645. }
  1646. /**
  1647. * ti_sci_manage_irq() - Helper api to configure/release the irq route between
  1648. * the requested source and destination
  1649. * @handle: Pointer to TISCI handle.
  1650. * @valid_params: Bit fields defining the validity of certain params
  1651. * @src_id: Device ID of the IRQ source
  1652. * @src_index: IRQ source index within the source device
  1653. * @dst_id: Device ID of the IRQ destination
  1654. * @dt_host_irq: IRQ number of the destination device
  1655. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1656. * @vint: Virtual interrupt to be used within the IA
  1657. * @global_event: Global event number to be used for the requesting event
  1658. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1659. * @s_host: Secondary host ID to which the irq/event is being
  1660. * requested for.
  1661. * @type: Request type irq set or release.
  1662. *
  1663. * Return: 0 if all went fine, else return appropriate error.
  1664. */
  1665. static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
  1666. u32 valid_params, u16 src_id, u16 src_index,
  1667. u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
  1668. u16 global_event, u8 vint_status_bit, u8 s_host,
  1669. u16 type)
  1670. {
  1671. struct ti_sci_msg_req_manage_irq *req;
  1672. struct ti_sci_msg_hdr *resp;
  1673. struct ti_sci_xfer *xfer;
  1674. struct ti_sci_info *info;
  1675. struct device *dev;
  1676. int ret = 0;
  1677. if (IS_ERR(handle))
  1678. return PTR_ERR(handle);
  1679. if (!handle)
  1680. return -EINVAL;
  1681. info = handle_to_ti_sci_info(handle);
  1682. dev = info->dev;
  1683. xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1684. sizeof(*req), sizeof(*resp));
  1685. if (IS_ERR(xfer)) {
  1686. ret = PTR_ERR(xfer);
  1687. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1688. return ret;
  1689. }
  1690. req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
  1691. req->valid_params = valid_params;
  1692. req->src_id = src_id;
  1693. req->src_index = src_index;
  1694. req->dst_id = dst_id;
  1695. req->dst_host_irq = dst_host_irq;
  1696. req->ia_id = ia_id;
  1697. req->vint = vint;
  1698. req->global_event = global_event;
  1699. req->vint_status_bit = vint_status_bit;
  1700. req->secondary_host = s_host;
  1701. ret = ti_sci_do_xfer(info, xfer);
  1702. if (ret) {
  1703. dev_err(dev, "Mbox send fail %d\n", ret);
  1704. goto fail;
  1705. }
  1706. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1707. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  1708. fail:
  1709. ti_sci_put_one_xfer(&info->minfo, xfer);
  1710. return ret;
  1711. }
  1712. /**
  1713. * ti_sci_set_irq() - Helper api to configure the irq route between the
  1714. * requested source and destination
  1715. * @handle: Pointer to TISCI handle.
  1716. * @valid_params: Bit fields defining the validity of certain params
  1717. * @src_id: Device ID of the IRQ source
  1718. * @src_index: IRQ source index within the source device
  1719. * @dst_id: Device ID of the IRQ destination
  1720. * @dt_host_irq: IRQ number of the destination device
  1721. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1722. * @vint: Virtual interrupt to be used within the IA
  1723. * @global_event: Global event number to be used for the requesting event
  1724. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1725. * @s_host: Secondary host ID to which the irq/event is being
  1726. * requested for.
  1727. *
  1728. * Return: 0 if all went fine, else return appropriate error.
  1729. */
  1730. static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
  1731. u16 src_id, u16 src_index, u16 dst_id,
  1732. u16 dst_host_irq, u16 ia_id, u16 vint,
  1733. u16 global_event, u8 vint_status_bit, u8 s_host)
  1734. {
  1735. pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
  1736. __func__, valid_params, src_id, src_index,
  1737. dst_id, dst_host_irq, ia_id, vint, global_event,
  1738. vint_status_bit);
  1739. return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
  1740. dst_id, dst_host_irq, ia_id, vint,
  1741. global_event, vint_status_bit, s_host,
  1742. TI_SCI_MSG_SET_IRQ);
  1743. }
  1744. /**
  1745. * ti_sci_free_irq() - Helper api to free the irq route between the
  1746. * requested source and destination
  1747. * @handle: Pointer to TISCI handle.
  1748. * @valid_params: Bit fields defining the validity of certain params
  1749. * @src_id: Device ID of the IRQ source
  1750. * @src_index: IRQ source index within the source device
  1751. * @dst_id: Device ID of the IRQ destination
  1752. * @dt_host_irq: IRQ number of the destination device
  1753. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1754. * @vint: Virtual interrupt to be used within the IA
  1755. * @global_event: Global event number to be used for the requesting event
  1756. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1757. * @s_host: Secondary host ID to which the irq/event is being
  1758. * requested for.
  1759. *
  1760. * Return: 0 if all went fine, else return appropriate error.
  1761. */
  1762. static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
  1763. u16 src_id, u16 src_index, u16 dst_id,
  1764. u16 dst_host_irq, u16 ia_id, u16 vint,
  1765. u16 global_event, u8 vint_status_bit, u8 s_host)
  1766. {
  1767. pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
  1768. __func__, valid_params, src_id, src_index,
  1769. dst_id, dst_host_irq, ia_id, vint, global_event,
  1770. vint_status_bit);
  1771. return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
  1772. dst_id, dst_host_irq, ia_id, vint,
  1773. global_event, vint_status_bit, s_host,
  1774. TI_SCI_MSG_FREE_IRQ);
  1775. }
  1776. /**
  1777. * ti_sci_cmd_set_direct_irq() - Configure a non-event based direct irq route
  1778. * between the requested source and destination.
  1779. * @handle: Pointer to TISCI handle.
  1780. * @src_id: Device ID of the IRQ source
  1781. * @src_index: IRQ source index within the source device
  1782. * @dst_id: Device ID of the IRQ destination
  1783. * @dt_host_irq: IRQ number of the destination device
  1784. *
  1785. * Return: 0 if all went fine, else return appropriate error.
  1786. */
  1787. static int ti_sci_cmd_set_direct_irq(const struct ti_sci_handle *handle,
  1788. u16 src_id, u16 src_index, u16 dst_id,
  1789. u16 dst_host_irq)
  1790. {
  1791. u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
  1792. return ti_sci_set_irq(handle, valid_params, src_id, src_index,
  1793. dst_id, dst_host_irq, 0, 0, 0, 0, 0);
  1794. }
  1795. /**
  1796. * ti_sci_cmd_set_event_irq() - Configure an event based irq route between the
  1797. * requested source and destination
  1798. * @handle: Pointer to TISCI handle.
  1799. * @src_id: Device ID of the IRQ source
  1800. * @src_index: IRQ source index within the source device
  1801. * @dst_id: Device ID of the IRQ destination
  1802. * @dt_host_irq: IRQ number of the destination device
  1803. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1804. * @vint: Virtual interrupt to be used within the IA
  1805. * @global_event: Global event number to be used for the requesting event
  1806. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1807. *
  1808. * Return: 0 if all went fine, else return appropriate error.
  1809. */
  1810. static int ti_sci_cmd_set_event_irq(const struct ti_sci_handle *handle,
  1811. u16 src_id, u16 src_index, u16 dst_id,
  1812. u16 dst_host_irq, u16 ia_id, u16 vint,
  1813. u16 global_event, u8 vint_status_bit)
  1814. {
  1815. u32 valid_params = MSG_FLAG_DST_ID_VALID |
  1816. MSG_FLAG_DST_HOST_IRQ_VALID | MSG_FLAG_IA_ID_VALID |
  1817. MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
  1818. MSG_FLAG_VINT_STS_BIT_VALID;
  1819. return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
  1820. dst_host_irq, ia_id, vint, global_event,
  1821. vint_status_bit, 0);
  1822. }
  1823. /**
  1824. * ti_sci_cmd_set_direct_irq_from_shost() - Configure a non-event based direct
  1825. * irq route between the source and
  1826. * destination belonging to a
  1827. * specified host.
  1828. * @handle: Pointer to TISCI handle.
  1829. * @src_id: Device ID of the IRQ source
  1830. * @src_index: IRQ source index within the source device
  1831. * @dst_id: Device ID of the IRQ destination
  1832. * @dt_host_irq: IRQ number of the destination device
  1833. * @s_host: Secondary host ID to which the irq/event is being
  1834. * requested for.
  1835. *
  1836. * Return: 0 if all went fine, else return appropriate error.
  1837. */
  1838. static
  1839. int ti_sci_cmd_set_direct_irq_from_shost(const struct ti_sci_handle *handle,
  1840. u16 src_id, u16 src_index, u16 dst_id,
  1841. u16 dst_host_irq, u8 s_host)
  1842. {
  1843. u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID |
  1844. MSG_FLAG_SHOST_VALID;
  1845. return ti_sci_set_irq(handle, valid_params, src_id, src_index,
  1846. dst_id, dst_host_irq, 0, 0, 0, 0, s_host);
  1847. }
  1848. /**
  1849. * ti_sci_cmd_set_event_irq_from_shost() - Configure an event based irq
  1850. * route between the source and
  1851. * destination belonging to a
  1852. * specified host.
  1853. * @handle: Pointer to TISCI handle.
  1854. * @src_id: Device ID of the IRQ source
  1855. * @src_index: IRQ source index within the source device
  1856. * @dst_id: Device ID of the IRQ destination
  1857. * @dt_host_irq: IRQ number of the destination device
  1858. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1859. * @vint: Virtual interrupt to be used within the IA
  1860. * @global_event: Global event number to be used for the requesting event
  1861. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1862. * @s_host: Secondary host ID to which the irq/event is being
  1863. * requested for.
  1864. *
  1865. * Return: 0 if all went fine, else return appropriate error.
  1866. */
  1867. static
  1868. int ti_sci_cmd_set_event_irq_from_shost(const struct ti_sci_handle *handle,
  1869. u16 src_id, u16 src_index, u16 dst_id,
  1870. u16 dst_host_irq, u16 ia_id, u16 vint,
  1871. u16 global_event, u8 vint_status_bit,
  1872. u8 s_host)
  1873. {
  1874. u32 valid_params = MSG_FLAG_DST_ID_VALID |
  1875. MSG_FLAG_DST_HOST_IRQ_VALID | MSG_FLAG_IA_ID_VALID |
  1876. MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
  1877. MSG_FLAG_VINT_STS_BIT_VALID | MSG_FLAG_SHOST_VALID;
  1878. return ti_sci_set_irq(handle, valid_params, src_id, src_index,
  1879. dst_id, dst_host_irq, ia_id, vint,
  1880. global_event, vint_status_bit, s_host);
  1881. }
  1882. /**
  1883. * ti_sci_cmd_set_event_irq_to_poll() - Configure an event based irq
  1884. * in polling mode
  1885. * @handle: Pointer to TISCI handle.
  1886. * @src_id: Device ID of the IRQ source
  1887. * @src_index: IRQ source index within the source device
  1888. * @dst_id: Device ID of the IRQ destination
  1889. * @dt_host_irq: IRQ number of the destination device
  1890. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1891. * @vint: Virtual interrupt to be used within the IA
  1892. * @global_event: Global event number to be used for the requesting event
  1893. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1894. * @s_host: Secondary host ID to which the irq/event is being
  1895. * requested for.
  1896. *
  1897. * Return: 0 if all went fine, else return appropriate error.
  1898. */
  1899. static int ti_sci_cmd_set_event_irq_to_poll(const struct ti_sci_handle *handle,
  1900. u16 src_id, u16 src_index,
  1901. u16 ia_id, u16 vint,
  1902. u16 global_event,
  1903. u8 vint_status_bit)
  1904. {
  1905. u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
  1906. MSG_FLAG_GLB_EVNT_VALID |
  1907. MSG_FLAG_VINT_STS_BIT_VALID;
  1908. return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
  1909. ia_id, vint, global_event, vint_status_bit, 0);
  1910. }
  1911. /**
  1912. * ti_sci_cmd_free_direct_irq() - Free a non-event based direct irq route
  1913. * between the requested source and destination.
  1914. * @handle: Pointer to TISCI handle.
  1915. * @src_id: Device ID of the IRQ source
  1916. * @src_index: IRQ source index within the source device
  1917. * @dst_id: Device ID of the IRQ destination
  1918. * @dt_host_irq: IRQ number of the destination device
  1919. *
  1920. * Return: 0 if all went fine, else return appropriate error.
  1921. */
  1922. static int ti_sci_cmd_free_direct_irq(const struct ti_sci_handle *handle,
  1923. u16 src_id, u16 src_index, u16 dst_id,
  1924. u16 dst_host_irq)
  1925. {
  1926. u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
  1927. return ti_sci_free_irq(handle, valid_params, src_id, src_index,
  1928. dst_id, dst_host_irq, 0, 0, 0, 0, 0);
  1929. }
  1930. /**
  1931. * ti_sci_cmd_free_event_irq() - Free an event based irq route between the
  1932. * requested source and destination
  1933. * @handle: Pointer to TISCI handle.
  1934. * @src_id: Device ID of the IRQ source
  1935. * @src_index: IRQ source index within the source device
  1936. * @dst_id: Device ID of the IRQ destination
  1937. * @dt_host_irq: IRQ number of the destination device
  1938. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1939. * @vint: Virtual interrupt to be used within the IA
  1940. * @global_event: Global event number to be used for the requesting event
  1941. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1942. *
  1943. * Return: 0 if all went fine, else return appropriate error.
  1944. */
  1945. static int ti_sci_cmd_free_event_irq(const struct ti_sci_handle *handle,
  1946. u16 src_id, u16 src_index, u16 dst_id,
  1947. u16 dst_host_irq, u16 ia_id, u16 vint,
  1948. u16 global_event, u8 vint_status_bit)
  1949. {
  1950. u32 valid_params = MSG_FLAG_DST_ID_VALID |
  1951. MSG_FLAG_DST_HOST_IRQ_VALID | MSG_FLAG_IA_ID_VALID |
  1952. MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
  1953. MSG_FLAG_VINT_STS_BIT_VALID;
  1954. return ti_sci_free_irq(handle, valid_params, src_id, src_index,
  1955. dst_id, dst_host_irq, ia_id, vint,
  1956. global_event, vint_status_bit, 0);
  1957. }
  1958. /**
  1959. * ti_sci_cmd_free_direct_irq_from_shost() - Free a non-event based direct irq
  1960. * route between the source and
  1961. * destination belonging to a
  1962. * specified host.
  1963. * @handle: Pointer to TISCI handle.
  1964. * @src_id: Device ID of the IRQ source
  1965. * @src_index: IRQ source index within the source device
  1966. * @dst_id: Device ID of the IRQ destination
  1967. * @dt_host_irq: IRQ number of the destination device
  1968. * @s_host: Secondary host ID to which the irq/event is being
  1969. * requested for.
  1970. *
  1971. * Return: 0 if all went fine, else return appropriate error.
  1972. */
  1973. static
  1974. int ti_sci_cmd_free_direct_irq_from_shost(const struct ti_sci_handle *handle,
  1975. u16 src_id, u16 src_index, u16 dst_id,
  1976. u16 dst_host_irq, u8 s_host)
  1977. {
  1978. u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID |
  1979. MSG_FLAG_SHOST_VALID;
  1980. return ti_sci_free_irq(handle, valid_params, src_id, src_index,
  1981. dst_id, dst_host_irq, 0, 0, 0, 0, s_host);
  1982. }
  1983. /**
  1984. * ti_sci_cmd_free_event_irq_from_shost() - Free an event based irq
  1985. * route between the source and
  1986. * destination belonging to a
  1987. * specified host.
  1988. * @handle: Pointer to TISCI handle.
  1989. * @src_id: Device ID of the IRQ source
  1990. * @src_index: IRQ source index within the source device
  1991. * @dst_id: Device ID of the IRQ destination
  1992. * @dt_host_irq: IRQ number of the destination device
  1993. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1994. * @vint: Virtual interrupt to be used within the IA
  1995. * @global_event: Global event number to be used for the requesting event
  1996. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1997. * @s_host: Secondary host ID to which the irq/event is being
  1998. * requested for.
  1999. *
  2000. * Return: 0 if all went fine, else return appropriate error.
  2001. */
  2002. static
  2003. int ti_sci_cmd_free_event_irq_from_shost(const struct ti_sci_handle *handle,
  2004. u16 src_id, u16 src_index, u16 dst_id,
  2005. u16 dst_host_irq, u16 ia_id, u16 vint,
  2006. u16 global_event, u8 vint_status_bit,
  2007. u8 s_host)
  2008. {
  2009. u32 valid_params = MSG_FLAG_DST_ID_VALID |
  2010. MSG_FLAG_DST_HOST_IRQ_VALID | MSG_FLAG_IA_ID_VALID |
  2011. MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
  2012. MSG_FLAG_VINT_STS_BIT_VALID | MSG_FLAG_SHOST_VALID;
  2013. return ti_sci_free_irq(handle, valid_params, src_id, src_index,
  2014. dst_id, dst_host_irq, ia_id, vint,
  2015. global_event, vint_status_bit, s_host);
  2016. }
  2017. /**
  2018. * ti_sci_cmd_free_event_irq_to_poll() - Free an event based irq
  2019. * in polling mode
  2020. * @handle: Pointer to TISCI handle.
  2021. * @src_id: Device ID of the IRQ source
  2022. * @src_index: IRQ source index within the source device
  2023. * @dst_id: Device ID of the IRQ destination
  2024. * @dt_host_irq: IRQ number of the destination device
  2025. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  2026. * @vint: Virtual interrupt to be used within the IA
  2027. * @global_event: Global event number to be used for the requesting event
  2028. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  2029. * @s_host: Secondary host ID to which the irq/event is being
  2030. * requested for.
  2031. *
  2032. * Return: 0 if all went fine, else return appropriate error.
  2033. */
  2034. static int ti_sci_cmd_free_event_irq_to_poll(const struct ti_sci_handle *handle,
  2035. u16 src_id, u16 src_index,
  2036. u16 ia_id, u16 vint,
  2037. u16 global_event,
  2038. u8 vint_status_bit)
  2039. {
  2040. u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
  2041. MSG_FLAG_GLB_EVNT_VALID |
  2042. MSG_FLAG_VINT_STS_BIT_VALID;
  2043. return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
  2044. ia_id, vint, global_event, vint_status_bit, 0);
  2045. }
  2046. /**
  2047. * ti_sci_cmd_ring_config() - configure RA ring
  2048. * @handle: pointer to TI SCI handle
  2049. * @valid_params: Bitfield defining validity of ring configuration parameters.
  2050. * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
  2051. * @index: Ring index.
  2052. * @addr_lo: The ring base address lo 32 bits
  2053. * @addr_hi: The ring base address hi 32 bits
  2054. * @count: Number of ring elements.
  2055. * @mode: The mode of the ring
  2056. * @size: The ring element size.
  2057. * @order_id: Specifies the ring's bus order ID.
  2058. *
  2059. * Return: 0 if all went well, else returns appropriate error value.
  2060. *
  2061. * See @ti_sci_msg_rm_ring_cfg_req for more info.
  2062. */
  2063. static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
  2064. u32 valid_params, u16 nav_id, u16 index,
  2065. u32 addr_lo, u32 addr_hi, u32 count,
  2066. u8 mode, u8 size, u8 order_id)
  2067. {
  2068. struct ti_sci_msg_rm_ring_cfg_resp *resp;
  2069. struct ti_sci_msg_rm_ring_cfg_req *req;
  2070. struct ti_sci_xfer *xfer;
  2071. struct ti_sci_info *info;
  2072. struct device *dev;
  2073. int ret = 0;
  2074. if (IS_ERR_OR_NULL(handle))
  2075. return -EINVAL;
  2076. info = handle_to_ti_sci_info(handle);
  2077. dev = info->dev;
  2078. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
  2079. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2080. sizeof(*req), sizeof(*resp));
  2081. if (IS_ERR(xfer)) {
  2082. ret = PTR_ERR(xfer);
  2083. dev_err(info->dev, "RM_RA:Message config failed(%d)\n", ret);
  2084. return ret;
  2085. }
  2086. req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
  2087. req->valid_params = valid_params;
  2088. req->nav_id = nav_id;
  2089. req->index = index;
  2090. req->addr_lo = addr_lo;
  2091. req->addr_hi = addr_hi;
  2092. req->count = count;
  2093. req->mode = mode;
  2094. req->size = size;
  2095. req->order_id = order_id;
  2096. ret = ti_sci_do_xfer(info, xfer);
  2097. if (ret) {
  2098. dev_err(info->dev, "RM_RA:Mbox config send fail %d\n", ret);
  2099. goto fail;
  2100. }
  2101. resp = (struct ti_sci_msg_rm_ring_cfg_resp *)xfer->xfer_buf;
  2102. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2103. fail:
  2104. ti_sci_put_one_xfer(&info->minfo, xfer);
  2105. dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
  2106. return ret;
  2107. }
  2108. /**
  2109. * ti_sci_cmd_ring_get_config() - get RA ring configuration
  2110. * @handle: pointer to TI SCI handle
  2111. * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
  2112. * @index: Ring index.
  2113. * @addr_lo: returns ring's base address lo 32 bits
  2114. * @addr_hi: returns ring's base address hi 32 bits
  2115. * @count: returns number of ring elements.
  2116. * @mode: returns mode of the ring
  2117. * @size: returns ring element size.
  2118. * @order_id: returns ring's bus order ID.
  2119. *
  2120. * Return: 0 if all went well, else returns appropriate error value.
  2121. *
  2122. * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
  2123. */
  2124. static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
  2125. u32 nav_id, u32 index, u8 *mode,
  2126. u32 *addr_lo, u32 *addr_hi,
  2127. u32 *count, u8 *size, u8 *order_id)
  2128. {
  2129. struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
  2130. struct ti_sci_msg_rm_ring_get_cfg_req *req;
  2131. struct ti_sci_xfer *xfer;
  2132. struct ti_sci_info *info;
  2133. struct device *dev;
  2134. int ret = 0;
  2135. if (IS_ERR_OR_NULL(handle))
  2136. return -EINVAL;
  2137. info = handle_to_ti_sci_info(handle);
  2138. dev = info->dev;
  2139. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
  2140. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2141. sizeof(*req), sizeof(*resp));
  2142. if (IS_ERR(xfer)) {
  2143. ret = PTR_ERR(xfer);
  2144. dev_err(info->dev,
  2145. "RM_RA:Message get config failed(%d)\n", ret);
  2146. return ret;
  2147. }
  2148. req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
  2149. req->nav_id = nav_id;
  2150. req->index = index;
  2151. ret = ti_sci_do_xfer(info, xfer);
  2152. if (ret) {
  2153. dev_err(info->dev, "RM_RA:Mbox get config send fail %d\n", ret);
  2154. goto fail;
  2155. }
  2156. resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
  2157. if (!ti_sci_is_response_ack(resp)) {
  2158. ret = -ENODEV;
  2159. } else {
  2160. if (mode)
  2161. *mode = resp->mode;
  2162. if (addr_lo)
  2163. *addr_lo = resp->addr_lo;
  2164. if (addr_hi)
  2165. *addr_hi = resp->addr_hi;
  2166. if (count)
  2167. *count = resp->count;
  2168. if (size)
  2169. *size = resp->size;
  2170. if (order_id)
  2171. *order_id = resp->order_id;
  2172. };
  2173. fail:
  2174. ti_sci_put_one_xfer(&info->minfo, xfer);
  2175. dev_dbg(info->dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
  2176. return ret;
  2177. }
  2178. static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
  2179. u32 nav_id, u32 src_thread, u32 dst_thread)
  2180. {
  2181. struct ti_sci_msg_hdr *resp;
  2182. struct ti_sci_msg_psil_pair *req;
  2183. struct ti_sci_xfer *xfer;
  2184. struct ti_sci_info *info;
  2185. struct device *dev;
  2186. int ret = 0;
  2187. if (IS_ERR(handle))
  2188. return PTR_ERR(handle);
  2189. if (!handle)
  2190. return -EINVAL;
  2191. info = handle_to_ti_sci_info(handle);
  2192. dev = info->dev;
  2193. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
  2194. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2195. sizeof(*req), sizeof(*resp));
  2196. if (IS_ERR(xfer)) {
  2197. ret = PTR_ERR(xfer);
  2198. dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
  2199. return ret;
  2200. }
  2201. req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
  2202. req->nav_id = nav_id;
  2203. req->src_thread = src_thread;
  2204. req->dst_thread = dst_thread;
  2205. ret = ti_sci_do_xfer(info, xfer);
  2206. if (ret) {
  2207. dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
  2208. goto fail;
  2209. }
  2210. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  2211. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  2212. fail:
  2213. ti_sci_put_one_xfer(&info->minfo, xfer);
  2214. return ret;
  2215. }
  2216. static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
  2217. u32 nav_id, u32 src_thread, u32 dst_thread)
  2218. {
  2219. struct ti_sci_msg_hdr *resp;
  2220. struct ti_sci_msg_psil_unpair *req;
  2221. struct ti_sci_xfer *xfer;
  2222. struct ti_sci_info *info;
  2223. struct device *dev;
  2224. int ret = 0;
  2225. if (IS_ERR(handle))
  2226. return PTR_ERR(handle);
  2227. if (!handle)
  2228. return -EINVAL;
  2229. info = handle_to_ti_sci_info(handle);
  2230. dev = info->dev;
  2231. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
  2232. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2233. sizeof(*req), sizeof(*resp));
  2234. if (IS_ERR(xfer)) {
  2235. ret = PTR_ERR(xfer);
  2236. dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
  2237. return ret;
  2238. }
  2239. req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
  2240. req->nav_id = nav_id;
  2241. req->src_thread = src_thread;
  2242. req->dst_thread = dst_thread;
  2243. ret = ti_sci_do_xfer(info, xfer);
  2244. if (ret) {
  2245. dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
  2246. goto fail;
  2247. }
  2248. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  2249. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  2250. fail:
  2251. ti_sci_put_one_xfer(&info->minfo, xfer);
  2252. return ret;
  2253. }
  2254. static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
  2255. const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
  2256. {
  2257. struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp;
  2258. struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
  2259. struct ti_sci_xfer *xfer;
  2260. struct ti_sci_info *info;
  2261. struct device *dev;
  2262. int ret = 0;
  2263. if (IS_ERR_OR_NULL(handle))
  2264. return -EINVAL;
  2265. info = handle_to_ti_sci_info(handle);
  2266. dev = info->dev;
  2267. xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
  2268. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2269. sizeof(*req), sizeof(*resp));
  2270. if (IS_ERR(xfer)) {
  2271. ret = PTR_ERR(xfer);
  2272. dev_err(info->dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
  2273. return ret;
  2274. }
  2275. req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
  2276. req->valid_params = params->valid_params;
  2277. req->nav_id = params->nav_id;
  2278. req->index = params->index;
  2279. req->tx_pause_on_err = params->tx_pause_on_err;
  2280. req->tx_filt_einfo = params->tx_filt_einfo;
  2281. req->tx_filt_pswords = params->tx_filt_pswords;
  2282. req->tx_atype = params->tx_atype;
  2283. req->tx_chan_type = params->tx_chan_type;
  2284. req->tx_supr_tdpkt = params->tx_supr_tdpkt;
  2285. req->tx_fetch_size = params->tx_fetch_size;
  2286. req->tx_credit_count = params->tx_credit_count;
  2287. req->txcq_qnum = params->txcq_qnum;
  2288. req->tx_priority = params->tx_priority;
  2289. req->tx_qos = params->tx_qos;
  2290. req->tx_orderid = params->tx_orderid;
  2291. req->fdepth = params->fdepth;
  2292. req->tx_sched_priority = params->tx_sched_priority;
  2293. req->tx_burst_size = params->tx_burst_size;
  2294. ret = ti_sci_do_xfer(info, xfer);
  2295. if (ret) {
  2296. dev_err(info->dev, "Mbox send TX_CH_CFG fail %d\n", ret);
  2297. goto fail;
  2298. }
  2299. resp =
  2300. (struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *)xfer->xfer_buf;
  2301. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  2302. fail:
  2303. ti_sci_put_one_xfer(&info->minfo, xfer);
  2304. dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
  2305. return ret;
  2306. }
  2307. static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
  2308. const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
  2309. {
  2310. struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp;
  2311. struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
  2312. struct ti_sci_xfer *xfer;
  2313. struct ti_sci_info *info;
  2314. struct device *dev;
  2315. int ret = 0;
  2316. if (IS_ERR_OR_NULL(handle))
  2317. return -EINVAL;
  2318. info = handle_to_ti_sci_info(handle);
  2319. dev = info->dev;
  2320. xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
  2321. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2322. sizeof(*req), sizeof(*resp));
  2323. if (IS_ERR(xfer)) {
  2324. ret = PTR_ERR(xfer);
  2325. dev_err(info->dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
  2326. return ret;
  2327. }
  2328. req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
  2329. req->valid_params = params->valid_params;
  2330. req->nav_id = params->nav_id;
  2331. req->index = params->index;
  2332. req->rx_fetch_size = params->rx_fetch_size;
  2333. req->rxcq_qnum = params->rxcq_qnum;
  2334. req->rx_priority = params->rx_priority;
  2335. req->rx_qos = params->rx_qos;
  2336. req->rx_orderid = params->rx_orderid;
  2337. req->rx_sched_priority = params->rx_sched_priority;
  2338. req->flowid_start = params->flowid_start;
  2339. req->flowid_cnt = params->flowid_cnt;
  2340. req->rx_pause_on_err = params->rx_pause_on_err;
  2341. req->rx_atype = params->rx_atype;
  2342. req->rx_chan_type = params->rx_chan_type;
  2343. req->rx_ignore_short = params->rx_ignore_short;
  2344. req->rx_ignore_long = params->rx_ignore_long;
  2345. req->rx_burst_size = params->rx_burst_size;
  2346. ret = ti_sci_do_xfer(info, xfer);
  2347. if (ret) {
  2348. dev_err(info->dev, "Mbox send RX_CH_CFG fail %d\n", ret);
  2349. goto fail;
  2350. }
  2351. resp =
  2352. (struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *)xfer->xfer_buf;
  2353. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  2354. fail:
  2355. ti_sci_put_one_xfer(&info->minfo, xfer);
  2356. dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
  2357. return ret;
  2358. }
  2359. static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
  2360. const struct ti_sci_msg_rm_udmap_flow_cfg *params)
  2361. {
  2362. struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp;
  2363. struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
  2364. struct ti_sci_xfer *xfer;
  2365. struct ti_sci_info *info;
  2366. struct device *dev;
  2367. int ret = 0;
  2368. if (IS_ERR_OR_NULL(handle))
  2369. return -EINVAL;
  2370. info = handle_to_ti_sci_info(handle);
  2371. dev = info->dev;
  2372. xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
  2373. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2374. sizeof(*req), sizeof(*resp));
  2375. if (IS_ERR(xfer)) {
  2376. ret = PTR_ERR(xfer);
  2377. dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
  2378. return ret;
  2379. }
  2380. req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
  2381. req->valid_params = params->valid_params;
  2382. req->nav_id = params->nav_id;
  2383. req->flow_index = params->flow_index;
  2384. req->rx_einfo_present = params->rx_einfo_present;
  2385. req->rx_psinfo_present = params->rx_psinfo_present;
  2386. req->rx_error_handling = params->rx_error_handling;
  2387. req->rx_desc_type = params->rx_desc_type;
  2388. req->rx_sop_offset = params->rx_sop_offset;
  2389. req->rx_dest_qnum = params->rx_dest_qnum;
  2390. req->rx_src_tag_hi = params->rx_src_tag_hi;
  2391. req->rx_src_tag_lo = params->rx_src_tag_lo;
  2392. req->rx_dest_tag_hi = params->rx_dest_tag_hi;
  2393. req->rx_dest_tag_lo = params->rx_dest_tag_lo;
  2394. req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
  2395. req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
  2396. req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
  2397. req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
  2398. req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
  2399. req->rx_fdq1_qnum = params->rx_fdq1_qnum;
  2400. req->rx_fdq2_qnum = params->rx_fdq2_qnum;
  2401. req->rx_fdq3_qnum = params->rx_fdq3_qnum;
  2402. req->rx_ps_location = params->rx_ps_location;
  2403. ret = ti_sci_do_xfer(info, xfer);
  2404. if (ret) {
  2405. dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
  2406. goto fail;
  2407. }
  2408. resp =
  2409. (struct ti_sci_msg_rm_udmap_flow_cfg_resp *)xfer->xfer_buf;
  2410. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  2411. fail:
  2412. ti_sci_put_one_xfer(&info->minfo, xfer);
  2413. dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
  2414. return ret;
  2415. }
  2416. /**
  2417. * ti_sci_cmd_proc_request() - Command to request a physical processor control
  2418. * @handle: Pointer to TI SCI handle
  2419. * @proc_id: Processor ID this request is for
  2420. *
  2421. * Return: 0 if all went well, else returns appropriate error value.
  2422. */
  2423. static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
  2424. u8 proc_id)
  2425. {
  2426. struct ti_sci_msg_req_proc_request *req;
  2427. struct ti_sci_msg_hdr *resp;
  2428. struct ti_sci_info *info;
  2429. struct ti_sci_xfer *xfer;
  2430. struct device *dev;
  2431. int ret = 0;
  2432. if (!handle)
  2433. return -EINVAL;
  2434. if (IS_ERR(handle))
  2435. return PTR_ERR(handle);
  2436. info = handle_to_ti_sci_info(handle);
  2437. dev = info->dev;
  2438. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
  2439. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2440. sizeof(*req), sizeof(*resp));
  2441. if (IS_ERR(xfer)) {
  2442. ret = PTR_ERR(xfer);
  2443. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2444. return ret;
  2445. }
  2446. req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
  2447. req->processor_id = proc_id;
  2448. ret = ti_sci_do_xfer(info, xfer);
  2449. if (ret) {
  2450. dev_err(dev, "Mbox send fail %d\n", ret);
  2451. goto fail;
  2452. }
  2453. resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  2454. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2455. fail:
  2456. ti_sci_put_one_xfer(&info->minfo, xfer);
  2457. return ret;
  2458. }
  2459. /**
  2460. * ti_sci_cmd_proc_release() - Command to release a physical processor control
  2461. * @handle: Pointer to TI SCI handle
  2462. * @proc_id: Processor ID this request is for
  2463. *
  2464. * Return: 0 if all went well, else returns appropriate error value.
  2465. */
  2466. static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
  2467. u8 proc_id)
  2468. {
  2469. struct ti_sci_msg_req_proc_release *req;
  2470. struct ti_sci_msg_hdr *resp;
  2471. struct ti_sci_info *info;
  2472. struct ti_sci_xfer *xfer;
  2473. struct device *dev;
  2474. int ret = 0;
  2475. if (!handle)
  2476. return -EINVAL;
  2477. if (IS_ERR(handle))
  2478. return PTR_ERR(handle);
  2479. info = handle_to_ti_sci_info(handle);
  2480. dev = info->dev;
  2481. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
  2482. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2483. sizeof(*req), sizeof(*resp));
  2484. if (IS_ERR(xfer)) {
  2485. ret = PTR_ERR(xfer);
  2486. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2487. return ret;
  2488. }
  2489. req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
  2490. req->processor_id = proc_id;
  2491. ret = ti_sci_do_xfer(info, xfer);
  2492. if (ret) {
  2493. dev_err(dev, "Mbox send fail %d\n", ret);
  2494. goto fail;
  2495. }
  2496. resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  2497. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2498. fail:
  2499. ti_sci_put_one_xfer(&info->minfo, xfer);
  2500. return ret;
  2501. }
  2502. /**
  2503. * ti_sci_cmd_proc_handover() - Command to handover a physical processor
  2504. * control to a host in the processor's access
  2505. * control list.
  2506. * @handle: Pointer to TI SCI handle
  2507. * @proc_id: Processor ID this request is for
  2508. * @host_id: Host ID to get the control of the processor
  2509. *
  2510. * Return: 0 if all went well, else returns appropriate error value.
  2511. */
  2512. static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
  2513. u8 proc_id, u8 host_id)
  2514. {
  2515. struct ti_sci_msg_req_proc_handover *req;
  2516. struct ti_sci_msg_hdr *resp;
  2517. struct ti_sci_info *info;
  2518. struct ti_sci_xfer *xfer;
  2519. struct device *dev;
  2520. int ret = 0;
  2521. if (!handle)
  2522. return -EINVAL;
  2523. if (IS_ERR(handle))
  2524. return PTR_ERR(handle);
  2525. info = handle_to_ti_sci_info(handle);
  2526. dev = info->dev;
  2527. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
  2528. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2529. sizeof(*req), sizeof(*resp));
  2530. if (IS_ERR(xfer)) {
  2531. ret = PTR_ERR(xfer);
  2532. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2533. return ret;
  2534. }
  2535. req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
  2536. req->processor_id = proc_id;
  2537. req->host_id = host_id;
  2538. ret = ti_sci_do_xfer(info, xfer);
  2539. if (ret) {
  2540. dev_err(dev, "Mbox send fail %d\n", ret);
  2541. goto fail;
  2542. }
  2543. resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  2544. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2545. fail:
  2546. ti_sci_put_one_xfer(&info->minfo, xfer);
  2547. return ret;
  2548. }
  2549. /**
  2550. * ti_sci_cmd_proc_set_config() - Command to set the processor boot
  2551. * configuration flags
  2552. * @handle: Pointer to TI SCI handle
  2553. * @proc_id: Processor ID this request is for
  2554. * @config_flags_set: Configuration flags to be set
  2555. * @config_flags_clear: Configuration flags to be cleared.
  2556. *
  2557. * Return: 0 if all went well, else returns appropriate error value.
  2558. */
  2559. static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
  2560. u8 proc_id, u64 bootvector,
  2561. u32 config_flags_set,
  2562. u32 config_flags_clear)
  2563. {
  2564. struct ti_sci_msg_req_set_config *req;
  2565. struct ti_sci_msg_hdr *resp;
  2566. struct ti_sci_info *info;
  2567. struct ti_sci_xfer *xfer;
  2568. struct device *dev;
  2569. int ret = 0;
  2570. if (!handle)
  2571. return -EINVAL;
  2572. if (IS_ERR(handle))
  2573. return PTR_ERR(handle);
  2574. info = handle_to_ti_sci_info(handle);
  2575. dev = info->dev;
  2576. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
  2577. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2578. sizeof(*req), sizeof(*resp));
  2579. if (IS_ERR(xfer)) {
  2580. ret = PTR_ERR(xfer);
  2581. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2582. return ret;
  2583. }
  2584. req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
  2585. req->processor_id = proc_id;
  2586. req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
  2587. req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
  2588. TI_SCI_ADDR_HIGH_SHIFT;
  2589. req->config_flags_set = config_flags_set;
  2590. req->config_flags_clear = config_flags_clear;
  2591. ret = ti_sci_do_xfer(info, xfer);
  2592. if (ret) {
  2593. dev_err(dev, "Mbox send fail %d\n", ret);
  2594. goto fail;
  2595. }
  2596. resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  2597. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2598. fail:
  2599. ti_sci_put_one_xfer(&info->minfo, xfer);
  2600. return ret;
  2601. }
  2602. /**
  2603. * ti_sci_cmd_proc_set_control() - Command to set the processor boot
  2604. * control flags
  2605. * @handle: Pointer to TI SCI handle
  2606. * @proc_id: Processor ID this request is for
  2607. * @control_flags_set: Control flags to be set
  2608. * @control_flags_clear: Control flags to be cleared
  2609. *
  2610. * Return: 0 if all went well, else returns appropriate error value.
  2611. */
  2612. static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
  2613. u8 proc_id, u32 control_flags_set,
  2614. u32 control_flags_clear)
  2615. {
  2616. struct ti_sci_msg_req_set_ctrl *req;
  2617. struct ti_sci_msg_hdr *resp;
  2618. struct ti_sci_info *info;
  2619. struct ti_sci_xfer *xfer;
  2620. struct device *dev;
  2621. int ret = 0;
  2622. if (!handle)
  2623. return -EINVAL;
  2624. if (IS_ERR(handle))
  2625. return PTR_ERR(handle);
  2626. info = handle_to_ti_sci_info(handle);
  2627. dev = info->dev;
  2628. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
  2629. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2630. sizeof(*req), sizeof(*resp));
  2631. if (IS_ERR(xfer)) {
  2632. ret = PTR_ERR(xfer);
  2633. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2634. return ret;
  2635. }
  2636. req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
  2637. req->processor_id = proc_id;
  2638. req->control_flags_set = control_flags_set;
  2639. req->control_flags_clear = control_flags_clear;
  2640. ret = ti_sci_do_xfer(info, xfer);
  2641. if (ret) {
  2642. dev_err(dev, "Mbox send fail %d\n", ret);
  2643. goto fail;
  2644. }
  2645. resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  2646. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2647. fail:
  2648. ti_sci_put_one_xfer(&info->minfo, xfer);
  2649. return ret;
  2650. }
  2651. /**
  2652. * ti_sci_cmd_get_boot_status() - Command to get the processor boot status
  2653. * @handle: Pointer to TI SCI handle
  2654. * @proc_id: Processor ID this request is for
  2655. *
  2656. * Return: 0 if all went well, else returns appropriate error value.
  2657. */
  2658. static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
  2659. u8 proc_id, u64 *bv, u32 *cfg_flags,
  2660. u32 *ctrl_flags, u32 *sts_flags)
  2661. {
  2662. struct ti_sci_msg_resp_get_status *resp;
  2663. struct ti_sci_msg_req_get_status *req;
  2664. struct ti_sci_info *info;
  2665. struct ti_sci_xfer *xfer;
  2666. struct device *dev;
  2667. int ret = 0;
  2668. if (!handle)
  2669. return -EINVAL;
  2670. if (IS_ERR(handle))
  2671. return PTR_ERR(handle);
  2672. info = handle_to_ti_sci_info(handle);
  2673. dev = info->dev;
  2674. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
  2675. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2676. sizeof(*req), sizeof(*resp));
  2677. if (IS_ERR(xfer)) {
  2678. ret = PTR_ERR(xfer);
  2679. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2680. return ret;
  2681. }
  2682. req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
  2683. req->processor_id = proc_id;
  2684. ret = ti_sci_do_xfer(info, xfer);
  2685. if (ret) {
  2686. dev_err(dev, "Mbox send fail %d\n", ret);
  2687. goto fail;
  2688. }
  2689. resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
  2690. if (!ti_sci_is_response_ack(resp)) {
  2691. ret = -ENODEV;
  2692. } else {
  2693. *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
  2694. (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
  2695. TI_SCI_ADDR_HIGH_MASK);
  2696. *cfg_flags = resp->config_flags;
  2697. *ctrl_flags = resp->control_flags;
  2698. *sts_flags = resp->status_flags;
  2699. }
  2700. fail:
  2701. ti_sci_put_one_xfer(&info->minfo, xfer);
  2702. return ret;
  2703. }
  2704. /*
  2705. * ti_sci_setup_ops() - Setup the operations structures
  2706. * @info: pointer to TISCI pointer
  2707. */
  2708. static void ti_sci_setup_ops(struct ti_sci_info *info)
  2709. {
  2710. struct ti_sci_ops *ops = &info->handle.ops;
  2711. struct ti_sci_core_ops *core_ops = &ops->core_ops;
  2712. struct ti_sci_dev_ops *dops = &ops->dev_ops;
  2713. struct ti_sci_clk_ops *cops = &ops->clk_ops;
  2714. struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
  2715. struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
  2716. struct ti_sci_proc_ops *pops = &ops->proc_ops;
  2717. struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
  2718. struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
  2719. struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
  2720. core_ops->reboot_device = ti_sci_cmd_core_reboot;
  2721. dops->get_device = ti_sci_cmd_get_device;
  2722. dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
  2723. dops->idle_device = ti_sci_cmd_idle_device;
  2724. dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
  2725. dops->put_device = ti_sci_cmd_put_device;
  2726. dops->is_valid = ti_sci_cmd_dev_is_valid;
  2727. dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
  2728. dops->is_idle = ti_sci_cmd_dev_is_idle;
  2729. dops->is_stop = ti_sci_cmd_dev_is_stop;
  2730. dops->is_on = ti_sci_cmd_dev_is_on;
  2731. dops->is_transitioning = ti_sci_cmd_dev_is_trans;
  2732. dops->set_device_resets = ti_sci_cmd_set_device_resets;
  2733. dops->get_device_resets = ti_sci_cmd_get_device_resets;
  2734. cops->get_clock = ti_sci_cmd_get_clock;
  2735. cops->idle_clock = ti_sci_cmd_idle_clock;
  2736. cops->put_clock = ti_sci_cmd_put_clock;
  2737. cops->is_auto = ti_sci_cmd_clk_is_auto;
  2738. cops->is_on = ti_sci_cmd_clk_is_on;
  2739. cops->is_off = ti_sci_cmd_clk_is_off;
  2740. cops->set_parent = ti_sci_cmd_clk_set_parent;
  2741. cops->get_parent = ti_sci_cmd_clk_get_parent;
  2742. cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
  2743. cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
  2744. cops->set_freq = ti_sci_cmd_clk_set_freq;
  2745. cops->get_freq = ti_sci_cmd_clk_get_freq;
  2746. rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
  2747. rm_core_ops->get_range_from_shost =
  2748. ti_sci_cmd_get_resource_range_from_shost;
  2749. iops->set_direct_irq = ti_sci_cmd_set_direct_irq;
  2750. iops->set_event_irq = ti_sci_cmd_set_event_irq;
  2751. iops->set_direct_irq_from_shost = ti_sci_cmd_set_direct_irq_from_shost;
  2752. iops->set_event_irq_from_shost = ti_sci_cmd_set_event_irq_from_shost;
  2753. iops->set_event_irq_to_poll = ti_sci_cmd_set_event_irq_to_poll;
  2754. iops->free_direct_irq = ti_sci_cmd_free_direct_irq;
  2755. iops->free_event_irq = ti_sci_cmd_free_event_irq;
  2756. iops->free_direct_irq_from_shost =
  2757. ti_sci_cmd_free_direct_irq_from_shost;
  2758. iops->free_event_irq_from_shost = ti_sci_cmd_free_event_irq_from_shost;
  2759. iops->free_event_irq_to_poll = ti_sci_cmd_free_event_irq_to_poll;
  2760. pops->request = ti_sci_cmd_proc_request;
  2761. pops->release = ti_sci_cmd_proc_release;
  2762. pops->handover = ti_sci_cmd_proc_handover;
  2763. pops->set_config = ti_sci_cmd_proc_set_config;
  2764. pops->set_control = ti_sci_cmd_proc_set_control;
  2765. pops->get_status = ti_sci_cmd_proc_get_status;
  2766. rops->config = ti_sci_cmd_ring_config;
  2767. rops->get_config = ti_sci_cmd_ring_get_config;
  2768. psilops->pair = ti_sci_cmd_rm_psil_pair;
  2769. psilops->unpair = ti_sci_cmd_rm_psil_unpair;
  2770. udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
  2771. udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
  2772. udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
  2773. }
  2774. /**
  2775. * ti_sci_get_handle() - Get the TI SCI handle for a device
  2776. * @dev: Pointer to device for which we want SCI handle
  2777. *
  2778. * NOTE: The function does not track individual clients of the framework
  2779. * and is expected to be maintained by caller of TI SCI protocol library.
  2780. * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
  2781. * Return: pointer to handle if successful, else:
  2782. * -EPROBE_DEFER if the instance is not ready
  2783. * -ENODEV if the required node handler is missing
  2784. * -EINVAL if invalid conditions are encountered.
  2785. */
  2786. const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
  2787. {
  2788. struct device_node *ti_sci_np;
  2789. struct list_head *p;
  2790. struct ti_sci_handle *handle = NULL;
  2791. struct ti_sci_info *info;
  2792. if (!dev) {
  2793. pr_err("I need a device pointer\n");
  2794. return ERR_PTR(-EINVAL);
  2795. }
  2796. ti_sci_np = of_get_parent(dev->of_node);
  2797. if (!ti_sci_np) {
  2798. dev_err(dev, "No OF information\n");
  2799. return ERR_PTR(-EINVAL);
  2800. }
  2801. mutex_lock(&ti_sci_list_mutex);
  2802. list_for_each(p, &ti_sci_list) {
  2803. info = list_entry(p, struct ti_sci_info, node);
  2804. if (ti_sci_np == info->dev->of_node) {
  2805. handle = &info->handle;
  2806. info->users++;
  2807. break;
  2808. }
  2809. }
  2810. mutex_unlock(&ti_sci_list_mutex);
  2811. of_node_put(ti_sci_np);
  2812. if (!handle)
  2813. return ERR_PTR(-EPROBE_DEFER);
  2814. return handle;
  2815. }
  2816. EXPORT_SYMBOL_GPL(ti_sci_get_handle);
  2817. /**
  2818. * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
  2819. * @handle: Handle acquired by ti_sci_get_handle
  2820. *
  2821. * NOTE: The function does not track individual clients of the framework
  2822. * and is expected to be maintained by caller of TI SCI protocol library.
  2823. * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
  2824. *
  2825. * Return: 0 is successfully released
  2826. * if an error pointer was passed, it returns the error value back,
  2827. * if null was passed, it returns -EINVAL;
  2828. */
  2829. int ti_sci_put_handle(const struct ti_sci_handle *handle)
  2830. {
  2831. struct ti_sci_info *info;
  2832. if (IS_ERR(handle))
  2833. return PTR_ERR(handle);
  2834. if (!handle)
  2835. return -EINVAL;
  2836. info = handle_to_ti_sci_info(handle);
  2837. mutex_lock(&ti_sci_list_mutex);
  2838. if (!WARN_ON(!info->users))
  2839. info->users--;
  2840. mutex_unlock(&ti_sci_list_mutex);
  2841. return 0;
  2842. }
  2843. EXPORT_SYMBOL_GPL(ti_sci_put_handle);
  2844. static void devm_ti_sci_release(struct device *dev, void *res)
  2845. {
  2846. const struct ti_sci_handle **ptr = res;
  2847. const struct ti_sci_handle *handle = *ptr;
  2848. int ret;
  2849. ret = ti_sci_put_handle(handle);
  2850. if (ret)
  2851. dev_err(dev, "failed to put handle %d\n", ret);
  2852. }
  2853. /**
  2854. * devm_ti_sci_get_handle() - Managed get handle
  2855. * @dev: device for which we want SCI handle for.
  2856. *
  2857. * NOTE: This releases the handle once the device resources are
  2858. * no longer needed. MUST NOT BE released with ti_sci_put_handle.
  2859. * The function does not track individual clients of the framework
  2860. * and is expected to be maintained by caller of TI SCI protocol library.
  2861. *
  2862. * Return: 0 if all went fine, else corresponding error.
  2863. */
  2864. const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
  2865. {
  2866. const struct ti_sci_handle **ptr;
  2867. const struct ti_sci_handle *handle;
  2868. ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
  2869. if (!ptr)
  2870. return ERR_PTR(-ENOMEM);
  2871. handle = ti_sci_get_handle(dev);
  2872. if (!IS_ERR(handle)) {
  2873. *ptr = handle;
  2874. devres_add(dev, ptr);
  2875. } else {
  2876. devres_free(ptr);
  2877. }
  2878. return handle;
  2879. }
  2880. EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
  2881. /**
  2882. * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
  2883. * @np: device node
  2884. * @propname: property name containing phandle on TISCI node
  2885. *
  2886. * NOTE: The function does not track individual clients of the framework
  2887. * and is expected to be maintained by caller of TI SCI protocol library.
  2888. * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
  2889. * Return: pointer to handle if successful, else:
  2890. * -EPROBE_DEFER if the instance is not ready
  2891. * -ENODEV if the required node handler is missing
  2892. * -EINVAL if invalid conditions are encountered.
  2893. */
  2894. const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
  2895. const char *property)
  2896. {
  2897. struct ti_sci_handle *handle = NULL;
  2898. struct device_node *ti_sci_np;
  2899. struct ti_sci_info *info;
  2900. struct list_head *p;
  2901. if (!np) {
  2902. pr_err("I need a device pointer\n");
  2903. return ERR_PTR(-EINVAL);
  2904. }
  2905. ti_sci_np = of_parse_phandle(np, property, 0);
  2906. if (!ti_sci_np)
  2907. return ERR_PTR(-ENODEV);
  2908. mutex_lock(&ti_sci_list_mutex);
  2909. list_for_each(p, &ti_sci_list) {
  2910. info = list_entry(p, struct ti_sci_info, node);
  2911. if (ti_sci_np == info->dev->of_node) {
  2912. handle = &info->handle;
  2913. info->users++;
  2914. break;
  2915. }
  2916. }
  2917. mutex_unlock(&ti_sci_list_mutex);
  2918. of_node_put(ti_sci_np);
  2919. if (!handle)
  2920. return ERR_PTR(-EPROBE_DEFER);
  2921. return handle;
  2922. }
  2923. EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
  2924. /**
  2925. * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
  2926. * @dev: Device pointer requesting TISCI handle
  2927. * @propname: property name containing phandle on TISCI node
  2928. *
  2929. * NOTE: This releases the handle once the device resources are
  2930. * no longer needed. MUST NOT BE released with ti_sci_put_handle.
  2931. * The function does not track individual clients of the framework
  2932. * and is expected to be maintained by caller of TI SCI protocol library.
  2933. *
  2934. * Return: 0 if all went fine, else corresponding error.
  2935. */
  2936. const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
  2937. const char *property)
  2938. {
  2939. const struct ti_sci_handle *handle;
  2940. const struct ti_sci_handle **ptr;
  2941. ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
  2942. if (!ptr)
  2943. return ERR_PTR(-ENOMEM);
  2944. handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
  2945. if (!IS_ERR(handle)) {
  2946. *ptr = handle;
  2947. devres_add(dev, ptr);
  2948. } else {
  2949. devres_free(ptr);
  2950. }
  2951. return handle;
  2952. }
  2953. EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
  2954. /*
  2955. * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
  2956. * @res: Pointer to the TISCI resource
  2957. *
  2958. * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
  2959. */
  2960. u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
  2961. {
  2962. unsigned long flags;
  2963. u16 set, free_bit;
  2964. raw_spin_lock_irqsave(&res->lock, flags);
  2965. for (set = 0; set < res->sets; set++) {
  2966. free_bit = find_first_zero_bit(res->desc[set].res_map,
  2967. res->desc[set].num);
  2968. if (free_bit != res->desc[set].num) {
  2969. set_bit(free_bit, res->desc[set].res_map);
  2970. raw_spin_unlock_irqrestore(&res->lock, flags);
  2971. return res->desc[set].start + free_bit;
  2972. }
  2973. }
  2974. raw_spin_unlock_irqrestore(&res->lock, flags);
  2975. return TI_SCI_RESOURCE_NULL;
  2976. }
  2977. EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
  2978. /**
  2979. * ti_sci_release_resource() - Release a resource from TISCI resource.
  2980. * @res: Pointer to the TISCI resource
  2981. */
  2982. void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
  2983. {
  2984. unsigned long flags;
  2985. u16 set;
  2986. raw_spin_lock_irqsave(&res->lock, flags);
  2987. for (set = 0; set < res->sets; set++) {
  2988. if (res->desc[set].start <= id &&
  2989. (res->desc[set].num + res->desc[set].start) > id)
  2990. clear_bit(id - res->desc[set].start,
  2991. res->desc[set].res_map);
  2992. }
  2993. raw_spin_unlock_irqrestore(&res->lock, flags);
  2994. }
  2995. EXPORT_SYMBOL_GPL(ti_sci_release_resource);
  2996. /**
  2997. * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
  2998. * @handle: TISCI handle
  2999. * @dev: Device pointer to which the resource is assigned
  3000. * @of_prop: property name by which the resource are represented
  3001. *
  3002. * Note: This function expects of_prop to be in the form of tuples
  3003. * <type, subtype>. Allocates and initializes ti_sci_resource structure
  3004. * for each of_prop. Client driver can directly call
  3005. * ti_sci_(get_free, release)_resource apis for handling the resource.
  3006. *
  3007. * Return: Pointer to ti_sci_resource if all went well else appropriate
  3008. * error pointer.
  3009. */
  3010. struct ti_sci_resource *
  3011. devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
  3012. struct device *dev, u32 dev_id, char *of_prop)
  3013. {
  3014. u32 resource_subtype;
  3015. u16 resource_type;
  3016. struct ti_sci_resource *res;
  3017. bool valid_set = false;
  3018. int sets, i, ret;
  3019. res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
  3020. if (!res)
  3021. return ERR_PTR(-ENOMEM);
  3022. sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
  3023. sizeof(u32));
  3024. if (sets < 0) {
  3025. dev_err(dev, "%s resource type ids not available\n", of_prop);
  3026. return ERR_PTR(sets);
  3027. }
  3028. res->sets = sets;
  3029. res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
  3030. GFP_KERNEL);
  3031. if (!res->desc)
  3032. return ERR_PTR(-ENOMEM);
  3033. ret = ti_sci_get_resource_type(handle_to_ti_sci_info(handle), dev_id,
  3034. &resource_type);
  3035. if (ret) {
  3036. dev_err(dev, "No valid resource type for %u\n", dev_id);
  3037. return ERR_PTR(-EINVAL);
  3038. }
  3039. for (i = 0; i < res->sets; i++) {
  3040. ret = of_property_read_u32_index(dev_of_node(dev), of_prop, i,
  3041. &resource_subtype);
  3042. if (ret)
  3043. return ERR_PTR(-EINVAL);
  3044. ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
  3045. resource_subtype,
  3046. &res->desc[i].start,
  3047. &res->desc[i].num);
  3048. if (ret) {
  3049. dev_dbg(dev, "type %d subtype %d not allocated for host %d\n",
  3050. resource_type, resource_subtype,
  3051. handle_to_ti_sci_info(handle)->host_id);
  3052. res->desc[i].start = 0;
  3053. res->desc[i].num = 0;
  3054. continue;
  3055. }
  3056. valid_set = true;
  3057. dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n",
  3058. resource_type, resource_subtype, res->desc[i].start,
  3059. res->desc[i].num);
  3060. res->desc[i].res_map =
  3061. devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
  3062. sizeof(*res->desc[i].res_map), GFP_KERNEL);
  3063. if (!res->desc[i].res_map)
  3064. return ERR_PTR(-ENOMEM);
  3065. }
  3066. raw_spin_lock_init(&res->lock);
  3067. if (valid_set)
  3068. return res;
  3069. return ERR_PTR(-EINVAL);
  3070. }
  3071. EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
  3072. static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
  3073. void *cmd)
  3074. {
  3075. struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
  3076. const struct ti_sci_handle *handle = &info->handle;
  3077. ti_sci_cmd_core_reboot(handle);
  3078. /* call fail OR pass, we should not be here in the first place */
  3079. return NOTIFY_BAD;
  3080. }
  3081. /* Description for K2G */
  3082. static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
  3083. .default_host_id = 2,
  3084. /* Conservative duration */
  3085. .max_rx_timeout_ms = 10000,
  3086. /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
  3087. .max_msgs = 20,
  3088. .max_msg_size = 64,
  3089. .rm_type_map = NULL,
  3090. };
  3091. static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
  3092. {.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */
  3093. {.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */
  3094. {.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */
  3095. {.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */
  3096. {.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */
  3097. {.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */
  3098. {.dev_id = 0, .type = 0x000}, /* end of table */
  3099. };
  3100. /* Description for AM654 */
  3101. static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
  3102. .default_host_id = 12,
  3103. /* Conservative duration */
  3104. .max_rx_timeout_ms = 10000,
  3105. /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
  3106. .max_msgs = 20,
  3107. .max_msg_size = 60,
  3108. .rm_type_map = ti_sci_am654_rm_type_map,
  3109. };
  3110. static const struct of_device_id ti_sci_of_match[] = {
  3111. {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
  3112. {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
  3113. { /* Sentinel */ },
  3114. };
  3115. MODULE_DEVICE_TABLE(of, ti_sci_of_match);
  3116. static int ti_sci_probe(struct platform_device *pdev)
  3117. {
  3118. struct device *dev = &pdev->dev;
  3119. const struct of_device_id *of_id;
  3120. const struct ti_sci_desc *desc;
  3121. struct ti_sci_xfer *xfer;
  3122. struct ti_sci_info *info = NULL;
  3123. struct ti_sci_xfers_info *minfo;
  3124. struct mbox_client *cl;
  3125. int ret = -EINVAL;
  3126. int i;
  3127. int reboot = 0;
  3128. u32 h_id;
  3129. of_id = of_match_device(ti_sci_of_match, dev);
  3130. if (!of_id) {
  3131. dev_err(dev, "OF data missing\n");
  3132. return -EINVAL;
  3133. }
  3134. desc = of_id->data;
  3135. info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
  3136. if (!info)
  3137. return -ENOMEM;
  3138. info->dev = dev;
  3139. info->desc = desc;
  3140. ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
  3141. /* if the property is not present in DT, use a default from desc */
  3142. if (ret < 0) {
  3143. info->host_id = info->desc->default_host_id;
  3144. } else {
  3145. if (!h_id) {
  3146. dev_warn(dev, "Host ID 0 is reserved for firmware\n");
  3147. info->host_id = info->desc->default_host_id;
  3148. } else {
  3149. info->host_id = h_id;
  3150. }
  3151. }
  3152. reboot = of_property_read_bool(dev->of_node,
  3153. "ti,system-reboot-controller");
  3154. INIT_LIST_HEAD(&info->node);
  3155. minfo = &info->minfo;
  3156. /*
  3157. * Pre-allocate messages
  3158. * NEVER allocate more than what we can indicate in hdr.seq
  3159. * if we have data description bug, force a fix..
  3160. */
  3161. if (WARN_ON(desc->max_msgs >=
  3162. 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
  3163. return -EINVAL;
  3164. minfo->xfer_block = devm_kcalloc(dev,
  3165. desc->max_msgs,
  3166. sizeof(*minfo->xfer_block),
  3167. GFP_KERNEL);
  3168. if (!minfo->xfer_block)
  3169. return -ENOMEM;
  3170. minfo->xfer_alloc_table = devm_kcalloc(dev,
  3171. BITS_TO_LONGS(desc->max_msgs),
  3172. sizeof(unsigned long),
  3173. GFP_KERNEL);
  3174. if (!minfo->xfer_alloc_table)
  3175. return -ENOMEM;
  3176. bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
  3177. /* Pre-initialize the buffer pointer to pre-allocated buffers */
  3178. for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
  3179. xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
  3180. GFP_KERNEL);
  3181. if (!xfer->xfer_buf)
  3182. return -ENOMEM;
  3183. xfer->tx_message.buf = xfer->xfer_buf;
  3184. init_completion(&xfer->done);
  3185. }
  3186. ret = ti_sci_debugfs_create(pdev, info);
  3187. if (ret)
  3188. dev_warn(dev, "Failed to create debug file\n");
  3189. platform_set_drvdata(pdev, info);
  3190. cl = &info->cl;
  3191. cl->dev = dev;
  3192. cl->tx_block = false;
  3193. cl->rx_callback = ti_sci_rx_callback;
  3194. cl->knows_txdone = true;
  3195. spin_lock_init(&minfo->xfer_lock);
  3196. sema_init(&minfo->sem_xfer_count, desc->max_msgs);
  3197. info->chan_rx = mbox_request_channel_byname(cl, "rx");
  3198. if (IS_ERR(info->chan_rx)) {
  3199. ret = PTR_ERR(info->chan_rx);
  3200. goto out;
  3201. }
  3202. info->chan_tx = mbox_request_channel_byname(cl, "tx");
  3203. if (IS_ERR(info->chan_tx)) {
  3204. ret = PTR_ERR(info->chan_tx);
  3205. goto out;
  3206. }
  3207. ret = ti_sci_cmd_get_revision(info);
  3208. if (ret) {
  3209. dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
  3210. goto out;
  3211. }
  3212. ti_sci_setup_ops(info);
  3213. if (reboot) {
  3214. info->nb.notifier_call = tisci_reboot_handler;
  3215. info->nb.priority = 128;
  3216. ret = register_restart_handler(&info->nb);
  3217. if (ret) {
  3218. dev_err(dev, "reboot registration fail(%d)\n", ret);
  3219. return ret;
  3220. }
  3221. }
  3222. dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
  3223. info->handle.version.abi_major, info->handle.version.abi_minor,
  3224. info->handle.version.firmware_revision,
  3225. info->handle.version.firmware_description);
  3226. mutex_lock(&ti_sci_list_mutex);
  3227. list_add_tail(&info->node, &ti_sci_list);
  3228. mutex_unlock(&ti_sci_list_mutex);
  3229. return of_platform_populate(dev->of_node, NULL, NULL, dev);
  3230. out:
  3231. if (!IS_ERR(info->chan_tx))
  3232. mbox_free_channel(info->chan_tx);
  3233. if (!IS_ERR(info->chan_rx))
  3234. mbox_free_channel(info->chan_rx);
  3235. debugfs_remove(info->d);
  3236. return ret;
  3237. }
  3238. static int ti_sci_remove(struct platform_device *pdev)
  3239. {
  3240. struct ti_sci_info *info;
  3241. struct device *dev = &pdev->dev;
  3242. int ret = 0;
  3243. of_platform_depopulate(dev);
  3244. info = platform_get_drvdata(pdev);
  3245. if (info->nb.notifier_call)
  3246. unregister_restart_handler(&info->nb);
  3247. mutex_lock(&ti_sci_list_mutex);
  3248. if (info->users)
  3249. ret = -EBUSY;
  3250. else
  3251. list_del(&info->node);
  3252. mutex_unlock(&ti_sci_list_mutex);
  3253. if (!ret) {
  3254. ti_sci_debugfs_destroy(pdev, info);
  3255. /* Safe to free channels since no more users */
  3256. mbox_free_channel(info->chan_tx);
  3257. mbox_free_channel(info->chan_rx);
  3258. }
  3259. return ret;
  3260. }
  3261. static struct platform_driver ti_sci_driver = {
  3262. .probe = ti_sci_probe,
  3263. .remove = ti_sci_remove,
  3264. .driver = {
  3265. .name = "ti-sci",
  3266. .of_match_table = of_match_ptr(ti_sci_of_match),
  3267. },
  3268. };
  3269. module_platform_driver(ti_sci_driver);
  3270. MODULE_LICENSE("GPL v2");
  3271. MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
  3272. MODULE_AUTHOR("Nishanth Menon");
  3273. MODULE_ALIAS("platform:ti-sci");