spi.c 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741
  1. /*
  2. * SPI init/core code
  3. *
  4. * Copyright (C) 2005 David Brownell
  5. * Copyright (C) 2008 Secret Lab Technologies Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/device.h>
  19. #include <linux/init.h>
  20. #include <linux/cache.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/mutex.h>
  24. #include <linux/of_device.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/clk/clk-conf.h>
  27. #include <linux/slab.h>
  28. #include <linux/mod_devicetable.h>
  29. #include <linux/spi/spi.h>
  30. #include <linux/of_gpio.h>
  31. #include <linux/pm_runtime.h>
  32. #include <linux/pm_domain.h>
  33. #include <linux/export.h>
  34. #include <linux/sched/rt.h>
  35. #include <linux/delay.h>
  36. #include <linux/kthread.h>
  37. #include <linux/ioport.h>
  38. #include <linux/acpi.h>
  39. #define CREATE_TRACE_POINTS
  40. #include <trace/events/spi.h>
  41. static void spidev_release(struct device *dev)
  42. {
  43. struct spi_device *spi = to_spi_device(dev);
  44. /* spi masters may cleanup for released devices */
  45. if (spi->master->cleanup)
  46. spi->master->cleanup(spi);
  47. spi_master_put(spi->master);
  48. kfree(spi);
  49. }
  50. static ssize_t
  51. modalias_show(struct device *dev, struct device_attribute *a, char *buf)
  52. {
  53. const struct spi_device *spi = to_spi_device(dev);
  54. int len;
  55. len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
  56. if (len != -ENODEV)
  57. return len;
  58. return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
  59. }
  60. static DEVICE_ATTR_RO(modalias);
  61. #define SPI_STATISTICS_ATTRS(field, file) \
  62. static ssize_t spi_master_##field##_show(struct device *dev, \
  63. struct device_attribute *attr, \
  64. char *buf) \
  65. { \
  66. struct spi_master *master = container_of(dev, \
  67. struct spi_master, dev); \
  68. return spi_statistics_##field##_show(&master->statistics, buf); \
  69. } \
  70. static struct device_attribute dev_attr_spi_master_##field = { \
  71. .attr = { .name = file, .mode = S_IRUGO }, \
  72. .show = spi_master_##field##_show, \
  73. }; \
  74. static ssize_t spi_device_##field##_show(struct device *dev, \
  75. struct device_attribute *attr, \
  76. char *buf) \
  77. { \
  78. struct spi_device *spi = to_spi_device(dev); \
  79. return spi_statistics_##field##_show(&spi->statistics, buf); \
  80. } \
  81. static struct device_attribute dev_attr_spi_device_##field = { \
  82. .attr = { .name = file, .mode = S_IRUGO }, \
  83. .show = spi_device_##field##_show, \
  84. }
  85. #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
  86. static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
  87. char *buf) \
  88. { \
  89. unsigned long flags; \
  90. ssize_t len; \
  91. spin_lock_irqsave(&stat->lock, flags); \
  92. len = sprintf(buf, format_string, stat->field); \
  93. spin_unlock_irqrestore(&stat->lock, flags); \
  94. return len; \
  95. } \
  96. SPI_STATISTICS_ATTRS(name, file)
  97. #define SPI_STATISTICS_SHOW(field, format_string) \
  98. SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
  99. field, format_string)
  100. SPI_STATISTICS_SHOW(messages, "%lu");
  101. SPI_STATISTICS_SHOW(transfers, "%lu");
  102. SPI_STATISTICS_SHOW(errors, "%lu");
  103. SPI_STATISTICS_SHOW(timedout, "%lu");
  104. SPI_STATISTICS_SHOW(spi_sync, "%lu");
  105. SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
  106. SPI_STATISTICS_SHOW(spi_async, "%lu");
  107. SPI_STATISTICS_SHOW(bytes, "%llu");
  108. SPI_STATISTICS_SHOW(bytes_rx, "%llu");
  109. SPI_STATISTICS_SHOW(bytes_tx, "%llu");
  110. #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
  111. SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
  112. "transfer_bytes_histo_" number, \
  113. transfer_bytes_histo[index], "%lu")
  114. SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
  115. SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
  116. SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
  117. SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
  118. SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
  119. SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
  120. SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
  121. SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
  122. SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
  123. SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
  124. SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
  125. SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
  126. SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
  127. SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
  128. SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
  129. SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
  130. SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
  131. static struct attribute *spi_dev_attrs[] = {
  132. &dev_attr_modalias.attr,
  133. NULL,
  134. };
  135. static const struct attribute_group spi_dev_group = {
  136. .attrs = spi_dev_attrs,
  137. };
  138. static struct attribute *spi_device_statistics_attrs[] = {
  139. &dev_attr_spi_device_messages.attr,
  140. &dev_attr_spi_device_transfers.attr,
  141. &dev_attr_spi_device_errors.attr,
  142. &dev_attr_spi_device_timedout.attr,
  143. &dev_attr_spi_device_spi_sync.attr,
  144. &dev_attr_spi_device_spi_sync_immediate.attr,
  145. &dev_attr_spi_device_spi_async.attr,
  146. &dev_attr_spi_device_bytes.attr,
  147. &dev_attr_spi_device_bytes_rx.attr,
  148. &dev_attr_spi_device_bytes_tx.attr,
  149. &dev_attr_spi_device_transfer_bytes_histo0.attr,
  150. &dev_attr_spi_device_transfer_bytes_histo1.attr,
  151. &dev_attr_spi_device_transfer_bytes_histo2.attr,
  152. &dev_attr_spi_device_transfer_bytes_histo3.attr,
  153. &dev_attr_spi_device_transfer_bytes_histo4.attr,
  154. &dev_attr_spi_device_transfer_bytes_histo5.attr,
  155. &dev_attr_spi_device_transfer_bytes_histo6.attr,
  156. &dev_attr_spi_device_transfer_bytes_histo7.attr,
  157. &dev_attr_spi_device_transfer_bytes_histo8.attr,
  158. &dev_attr_spi_device_transfer_bytes_histo9.attr,
  159. &dev_attr_spi_device_transfer_bytes_histo10.attr,
  160. &dev_attr_spi_device_transfer_bytes_histo11.attr,
  161. &dev_attr_spi_device_transfer_bytes_histo12.attr,
  162. &dev_attr_spi_device_transfer_bytes_histo13.attr,
  163. &dev_attr_spi_device_transfer_bytes_histo14.attr,
  164. &dev_attr_spi_device_transfer_bytes_histo15.attr,
  165. &dev_attr_spi_device_transfer_bytes_histo16.attr,
  166. NULL,
  167. };
  168. static const struct attribute_group spi_device_statistics_group = {
  169. .name = "statistics",
  170. .attrs = spi_device_statistics_attrs,
  171. };
  172. static const struct attribute_group *spi_dev_groups[] = {
  173. &spi_dev_group,
  174. &spi_device_statistics_group,
  175. NULL,
  176. };
  177. static struct attribute *spi_master_statistics_attrs[] = {
  178. &dev_attr_spi_master_messages.attr,
  179. &dev_attr_spi_master_transfers.attr,
  180. &dev_attr_spi_master_errors.attr,
  181. &dev_attr_spi_master_timedout.attr,
  182. &dev_attr_spi_master_spi_sync.attr,
  183. &dev_attr_spi_master_spi_sync_immediate.attr,
  184. &dev_attr_spi_master_spi_async.attr,
  185. &dev_attr_spi_master_bytes.attr,
  186. &dev_attr_spi_master_bytes_rx.attr,
  187. &dev_attr_spi_master_bytes_tx.attr,
  188. &dev_attr_spi_master_transfer_bytes_histo0.attr,
  189. &dev_attr_spi_master_transfer_bytes_histo1.attr,
  190. &dev_attr_spi_master_transfer_bytes_histo2.attr,
  191. &dev_attr_spi_master_transfer_bytes_histo3.attr,
  192. &dev_attr_spi_master_transfer_bytes_histo4.attr,
  193. &dev_attr_spi_master_transfer_bytes_histo5.attr,
  194. &dev_attr_spi_master_transfer_bytes_histo6.attr,
  195. &dev_attr_spi_master_transfer_bytes_histo7.attr,
  196. &dev_attr_spi_master_transfer_bytes_histo8.attr,
  197. &dev_attr_spi_master_transfer_bytes_histo9.attr,
  198. &dev_attr_spi_master_transfer_bytes_histo10.attr,
  199. &dev_attr_spi_master_transfer_bytes_histo11.attr,
  200. &dev_attr_spi_master_transfer_bytes_histo12.attr,
  201. &dev_attr_spi_master_transfer_bytes_histo13.attr,
  202. &dev_attr_spi_master_transfer_bytes_histo14.attr,
  203. &dev_attr_spi_master_transfer_bytes_histo15.attr,
  204. &dev_attr_spi_master_transfer_bytes_histo16.attr,
  205. NULL,
  206. };
  207. static const struct attribute_group spi_master_statistics_group = {
  208. .name = "statistics",
  209. .attrs = spi_master_statistics_attrs,
  210. };
  211. static const struct attribute_group *spi_master_groups[] = {
  212. &spi_master_statistics_group,
  213. NULL,
  214. };
  215. void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
  216. struct spi_transfer *xfer,
  217. struct spi_master *master)
  218. {
  219. unsigned long flags;
  220. int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
  221. if (l2len < 0)
  222. l2len = 0;
  223. spin_lock_irqsave(&stats->lock, flags);
  224. stats->transfers++;
  225. stats->transfer_bytes_histo[l2len]++;
  226. stats->bytes += xfer->len;
  227. if ((xfer->tx_buf) &&
  228. (xfer->tx_buf != master->dummy_tx))
  229. stats->bytes_tx += xfer->len;
  230. if ((xfer->rx_buf) &&
  231. (xfer->rx_buf != master->dummy_rx))
  232. stats->bytes_rx += xfer->len;
  233. spin_unlock_irqrestore(&stats->lock, flags);
  234. }
  235. EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
  236. /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
  237. * and the sysfs version makes coldplug work too.
  238. */
  239. static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
  240. const struct spi_device *sdev)
  241. {
  242. while (id->name[0]) {
  243. if (!strcmp(sdev->modalias, id->name))
  244. return id;
  245. id++;
  246. }
  247. return NULL;
  248. }
  249. const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
  250. {
  251. const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
  252. return spi_match_id(sdrv->id_table, sdev);
  253. }
  254. EXPORT_SYMBOL_GPL(spi_get_device_id);
  255. static int spi_match_device(struct device *dev, struct device_driver *drv)
  256. {
  257. const struct spi_device *spi = to_spi_device(dev);
  258. const struct spi_driver *sdrv = to_spi_driver(drv);
  259. /* Attempt an OF style match */
  260. if (of_driver_match_device(dev, drv))
  261. return 1;
  262. /* Then try ACPI */
  263. if (acpi_driver_match_device(dev, drv))
  264. return 1;
  265. if (sdrv->id_table)
  266. return !!spi_match_id(sdrv->id_table, spi);
  267. return strcmp(spi->modalias, drv->name) == 0;
  268. }
  269. static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
  270. {
  271. const struct spi_device *spi = to_spi_device(dev);
  272. int rc;
  273. rc = acpi_device_uevent_modalias(dev, env);
  274. if (rc != -ENODEV)
  275. return rc;
  276. add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
  277. return 0;
  278. }
  279. struct bus_type spi_bus_type = {
  280. .name = "spi",
  281. .dev_groups = spi_dev_groups,
  282. .match = spi_match_device,
  283. .uevent = spi_uevent,
  284. };
  285. EXPORT_SYMBOL_GPL(spi_bus_type);
  286. static int spi_drv_probe(struct device *dev)
  287. {
  288. const struct spi_driver *sdrv = to_spi_driver(dev->driver);
  289. struct spi_device *spi = to_spi_device(dev);
  290. int ret;
  291. ret = of_clk_set_defaults(dev->of_node, false);
  292. if (ret)
  293. return ret;
  294. if (dev->of_node) {
  295. spi->irq = of_irq_get(dev->of_node, 0);
  296. if (spi->irq == -EPROBE_DEFER)
  297. return -EPROBE_DEFER;
  298. if (spi->irq < 0)
  299. spi->irq = 0;
  300. }
  301. ret = dev_pm_domain_attach(dev, true);
  302. if (ret != -EPROBE_DEFER) {
  303. ret = sdrv->probe(spi);
  304. if (ret)
  305. dev_pm_domain_detach(dev, true);
  306. }
  307. return ret;
  308. }
  309. static int spi_drv_remove(struct device *dev)
  310. {
  311. const struct spi_driver *sdrv = to_spi_driver(dev->driver);
  312. int ret;
  313. ret = sdrv->remove(to_spi_device(dev));
  314. dev_pm_domain_detach(dev, true);
  315. return ret;
  316. }
  317. static void spi_drv_shutdown(struct device *dev)
  318. {
  319. const struct spi_driver *sdrv = to_spi_driver(dev->driver);
  320. sdrv->shutdown(to_spi_device(dev));
  321. }
  322. /**
  323. * __spi_register_driver - register a SPI driver
  324. * @owner: owner module of the driver to register
  325. * @sdrv: the driver to register
  326. * Context: can sleep
  327. *
  328. * Return: zero on success, else a negative error code.
  329. */
  330. int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
  331. {
  332. sdrv->driver.owner = owner;
  333. sdrv->driver.bus = &spi_bus_type;
  334. if (sdrv->probe)
  335. sdrv->driver.probe = spi_drv_probe;
  336. if (sdrv->remove)
  337. sdrv->driver.remove = spi_drv_remove;
  338. if (sdrv->shutdown)
  339. sdrv->driver.shutdown = spi_drv_shutdown;
  340. return driver_register(&sdrv->driver);
  341. }
  342. EXPORT_SYMBOL_GPL(__spi_register_driver);
  343. /*-------------------------------------------------------------------------*/
  344. /* SPI devices should normally not be created by SPI device drivers; that
  345. * would make them board-specific. Similarly with SPI master drivers.
  346. * Device registration normally goes into like arch/.../mach.../board-YYY.c
  347. * with other readonly (flashable) information about mainboard devices.
  348. */
  349. struct boardinfo {
  350. struct list_head list;
  351. struct spi_board_info board_info;
  352. };
  353. static LIST_HEAD(board_list);
  354. static LIST_HEAD(spi_master_list);
  355. /*
  356. * Used to protect add/del opertion for board_info list and
  357. * spi_master list, and their matching process
  358. */
  359. static DEFINE_MUTEX(board_lock);
  360. /**
  361. * spi_alloc_device - Allocate a new SPI device
  362. * @master: Controller to which device is connected
  363. * Context: can sleep
  364. *
  365. * Allows a driver to allocate and initialize a spi_device without
  366. * registering it immediately. This allows a driver to directly
  367. * fill the spi_device with device parameters before calling
  368. * spi_add_device() on it.
  369. *
  370. * Caller is responsible to call spi_add_device() on the returned
  371. * spi_device structure to add it to the SPI master. If the caller
  372. * needs to discard the spi_device without adding it, then it should
  373. * call spi_dev_put() on it.
  374. *
  375. * Return: a pointer to the new device, or NULL.
  376. */
  377. struct spi_device *spi_alloc_device(struct spi_master *master)
  378. {
  379. struct spi_device *spi;
  380. if (!spi_master_get(master))
  381. return NULL;
  382. spi = kzalloc(sizeof(*spi), GFP_KERNEL);
  383. if (!spi) {
  384. spi_master_put(master);
  385. return NULL;
  386. }
  387. spi->master = master;
  388. spi->dev.parent = &master->dev;
  389. spi->dev.bus = &spi_bus_type;
  390. spi->dev.release = spidev_release;
  391. spi->cs_gpio = -ENOENT;
  392. spin_lock_init(&spi->statistics.lock);
  393. device_initialize(&spi->dev);
  394. return spi;
  395. }
  396. EXPORT_SYMBOL_GPL(spi_alloc_device);
  397. static void spi_dev_set_name(struct spi_device *spi)
  398. {
  399. struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
  400. if (adev) {
  401. dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
  402. return;
  403. }
  404. dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
  405. spi->chip_select);
  406. }
  407. static int spi_dev_check(struct device *dev, void *data)
  408. {
  409. struct spi_device *spi = to_spi_device(dev);
  410. struct spi_device *new_spi = data;
  411. if (spi->master == new_spi->master &&
  412. spi->chip_select == new_spi->chip_select)
  413. return -EBUSY;
  414. return 0;
  415. }
  416. /**
  417. * spi_add_device - Add spi_device allocated with spi_alloc_device
  418. * @spi: spi_device to register
  419. *
  420. * Companion function to spi_alloc_device. Devices allocated with
  421. * spi_alloc_device can be added onto the spi bus with this function.
  422. *
  423. * Return: 0 on success; negative errno on failure
  424. */
  425. int spi_add_device(struct spi_device *spi)
  426. {
  427. static DEFINE_MUTEX(spi_add_lock);
  428. struct spi_master *master = spi->master;
  429. struct device *dev = master->dev.parent;
  430. int status;
  431. /* Chipselects are numbered 0..max; validate. */
  432. if (spi->chip_select >= master->num_chipselect) {
  433. dev_err(dev, "cs%d >= max %d\n",
  434. spi->chip_select,
  435. master->num_chipselect);
  436. return -EINVAL;
  437. }
  438. /* Set the bus ID string */
  439. spi_dev_set_name(spi);
  440. /* We need to make sure there's no other device with this
  441. * chipselect **BEFORE** we call setup(), else we'll trash
  442. * its configuration. Lock against concurrent add() calls.
  443. */
  444. mutex_lock(&spi_add_lock);
  445. status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
  446. if (status) {
  447. dev_err(dev, "chipselect %d already in use\n",
  448. spi->chip_select);
  449. goto done;
  450. }
  451. if (master->cs_gpios)
  452. spi->cs_gpio = master->cs_gpios[spi->chip_select];
  453. /* Drivers may modify this initial i/o setup, but will
  454. * normally rely on the device being setup. Devices
  455. * using SPI_CS_HIGH can't coexist well otherwise...
  456. */
  457. status = spi_setup(spi);
  458. if (status < 0) {
  459. dev_err(dev, "can't setup %s, status %d\n",
  460. dev_name(&spi->dev), status);
  461. goto done;
  462. }
  463. /* Device may be bound to an active driver when this returns */
  464. status = device_add(&spi->dev);
  465. if (status < 0)
  466. dev_err(dev, "can't add %s, status %d\n",
  467. dev_name(&spi->dev), status);
  468. else
  469. dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
  470. done:
  471. mutex_unlock(&spi_add_lock);
  472. return status;
  473. }
  474. EXPORT_SYMBOL_GPL(spi_add_device);
  475. /**
  476. * spi_new_device - instantiate one new SPI device
  477. * @master: Controller to which device is connected
  478. * @chip: Describes the SPI device
  479. * Context: can sleep
  480. *
  481. * On typical mainboards, this is purely internal; and it's not needed
  482. * after board init creates the hard-wired devices. Some development
  483. * platforms may not be able to use spi_register_board_info though, and
  484. * this is exported so that for example a USB or parport based adapter
  485. * driver could add devices (which it would learn about out-of-band).
  486. *
  487. * Return: the new device, or NULL.
  488. */
  489. struct spi_device *spi_new_device(struct spi_master *master,
  490. struct spi_board_info *chip)
  491. {
  492. struct spi_device *proxy;
  493. int status;
  494. /* NOTE: caller did any chip->bus_num checks necessary.
  495. *
  496. * Also, unless we change the return value convention to use
  497. * error-or-pointer (not NULL-or-pointer), troubleshootability
  498. * suggests syslogged diagnostics are best here (ugh).
  499. */
  500. proxy = spi_alloc_device(master);
  501. if (!proxy)
  502. return NULL;
  503. WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
  504. proxy->chip_select = chip->chip_select;
  505. proxy->max_speed_hz = chip->max_speed_hz;
  506. proxy->mode = chip->mode;
  507. proxy->irq = chip->irq;
  508. strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
  509. proxy->dev.platform_data = (void *) chip->platform_data;
  510. proxy->controller_data = chip->controller_data;
  511. proxy->controller_state = NULL;
  512. status = spi_add_device(proxy);
  513. if (status < 0) {
  514. spi_dev_put(proxy);
  515. return NULL;
  516. }
  517. return proxy;
  518. }
  519. EXPORT_SYMBOL_GPL(spi_new_device);
  520. /**
  521. * spi_unregister_device - unregister a single SPI device
  522. * @spi: spi_device to unregister
  523. *
  524. * Start making the passed SPI device vanish. Normally this would be handled
  525. * by spi_unregister_master().
  526. */
  527. void spi_unregister_device(struct spi_device *spi)
  528. {
  529. if (!spi)
  530. return;
  531. if (spi->dev.of_node)
  532. of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
  533. device_unregister(&spi->dev);
  534. }
  535. EXPORT_SYMBOL_GPL(spi_unregister_device);
  536. static void spi_match_master_to_boardinfo(struct spi_master *master,
  537. struct spi_board_info *bi)
  538. {
  539. struct spi_device *dev;
  540. if (master->bus_num != bi->bus_num)
  541. return;
  542. dev = spi_new_device(master, bi);
  543. if (!dev)
  544. dev_err(master->dev.parent, "can't create new device for %s\n",
  545. bi->modalias);
  546. }
  547. /**
  548. * spi_register_board_info - register SPI devices for a given board
  549. * @info: array of chip descriptors
  550. * @n: how many descriptors are provided
  551. * Context: can sleep
  552. *
  553. * Board-specific early init code calls this (probably during arch_initcall)
  554. * with segments of the SPI device table. Any device nodes are created later,
  555. * after the relevant parent SPI controller (bus_num) is defined. We keep
  556. * this table of devices forever, so that reloading a controller driver will
  557. * not make Linux forget about these hard-wired devices.
  558. *
  559. * Other code can also call this, e.g. a particular add-on board might provide
  560. * SPI devices through its expansion connector, so code initializing that board
  561. * would naturally declare its SPI devices.
  562. *
  563. * The board info passed can safely be __initdata ... but be careful of
  564. * any embedded pointers (platform_data, etc), they're copied as-is.
  565. *
  566. * Return: zero on success, else a negative error code.
  567. */
  568. int spi_register_board_info(struct spi_board_info const *info, unsigned n)
  569. {
  570. struct boardinfo *bi;
  571. int i;
  572. if (!n)
  573. return -EINVAL;
  574. bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
  575. if (!bi)
  576. return -ENOMEM;
  577. for (i = 0; i < n; i++, bi++, info++) {
  578. struct spi_master *master;
  579. memcpy(&bi->board_info, info, sizeof(*info));
  580. mutex_lock(&board_lock);
  581. list_add_tail(&bi->list, &board_list);
  582. list_for_each_entry(master, &spi_master_list, list)
  583. spi_match_master_to_boardinfo(master, &bi->board_info);
  584. mutex_unlock(&board_lock);
  585. }
  586. return 0;
  587. }
  588. /*-------------------------------------------------------------------------*/
  589. static void spi_set_cs(struct spi_device *spi, bool enable)
  590. {
  591. if (spi->mode & SPI_CS_HIGH)
  592. enable = !enable;
  593. if (gpio_is_valid(spi->cs_gpio))
  594. gpio_set_value(spi->cs_gpio, !enable);
  595. else if (spi->master->set_cs)
  596. spi->master->set_cs(spi, !enable);
  597. }
  598. #ifdef CONFIG_HAS_DMA
  599. static int spi_map_buf(struct spi_master *master, struct device *dev,
  600. struct sg_table *sgt, void *buf, size_t len,
  601. enum dma_data_direction dir)
  602. {
  603. const bool vmalloced_buf = is_vmalloc_addr(buf);
  604. int desc_len;
  605. int sgs;
  606. struct page *vm_page;
  607. void *sg_buf;
  608. size_t min;
  609. int i, ret;
  610. if (vmalloced_buf) {
  611. desc_len = PAGE_SIZE;
  612. sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
  613. } else {
  614. desc_len = master->max_dma_len;
  615. sgs = DIV_ROUND_UP(len, desc_len);
  616. }
  617. ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
  618. if (ret != 0)
  619. return ret;
  620. for (i = 0; i < sgs; i++) {
  621. if (vmalloced_buf) {
  622. min = min_t(size_t,
  623. len, desc_len - offset_in_page(buf));
  624. vm_page = vmalloc_to_page(buf);
  625. if (!vm_page) {
  626. sg_free_table(sgt);
  627. return -ENOMEM;
  628. }
  629. sg_set_page(&sgt->sgl[i], vm_page,
  630. min, offset_in_page(buf));
  631. } else {
  632. min = min_t(size_t, len, desc_len);
  633. sg_buf = buf;
  634. sg_set_buf(&sgt->sgl[i], sg_buf, min);
  635. }
  636. buf += min;
  637. len -= min;
  638. }
  639. ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
  640. if (!ret)
  641. ret = -ENOMEM;
  642. if (ret < 0) {
  643. sg_free_table(sgt);
  644. return ret;
  645. }
  646. sgt->nents = ret;
  647. return 0;
  648. }
  649. static void spi_unmap_buf(struct spi_master *master, struct device *dev,
  650. struct sg_table *sgt, enum dma_data_direction dir)
  651. {
  652. if (sgt->orig_nents) {
  653. dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
  654. sg_free_table(sgt);
  655. }
  656. }
  657. static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
  658. {
  659. struct device *tx_dev, *rx_dev;
  660. struct spi_transfer *xfer;
  661. int ret;
  662. if (!master->can_dma)
  663. return 0;
  664. if (master->dma_tx)
  665. tx_dev = master->dma_tx->device->dev;
  666. else
  667. tx_dev = &master->dev;
  668. if (master->dma_rx)
  669. rx_dev = master->dma_rx->device->dev;
  670. else
  671. rx_dev = &master->dev;
  672. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  673. if (!master->can_dma(master, msg->spi, xfer))
  674. continue;
  675. if (xfer->tx_buf != NULL) {
  676. ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
  677. (void *)xfer->tx_buf, xfer->len,
  678. DMA_TO_DEVICE);
  679. if (ret != 0)
  680. return ret;
  681. }
  682. if (xfer->rx_buf != NULL) {
  683. ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
  684. xfer->rx_buf, xfer->len,
  685. DMA_FROM_DEVICE);
  686. if (ret != 0) {
  687. spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
  688. DMA_TO_DEVICE);
  689. return ret;
  690. }
  691. }
  692. }
  693. master->cur_msg_mapped = true;
  694. return 0;
  695. }
  696. static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
  697. {
  698. struct spi_transfer *xfer;
  699. struct device *tx_dev, *rx_dev;
  700. if (!master->cur_msg_mapped || !master->can_dma)
  701. return 0;
  702. if (master->dma_tx)
  703. tx_dev = master->dma_tx->device->dev;
  704. else
  705. tx_dev = &master->dev;
  706. if (master->dma_rx)
  707. rx_dev = master->dma_rx->device->dev;
  708. else
  709. rx_dev = &master->dev;
  710. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  711. if (!master->can_dma(master, msg->spi, xfer))
  712. continue;
  713. spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
  714. spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
  715. }
  716. return 0;
  717. }
  718. #else /* !CONFIG_HAS_DMA */
  719. static inline int __spi_map_msg(struct spi_master *master,
  720. struct spi_message *msg)
  721. {
  722. return 0;
  723. }
  724. static inline int __spi_unmap_msg(struct spi_master *master,
  725. struct spi_message *msg)
  726. {
  727. return 0;
  728. }
  729. #endif /* !CONFIG_HAS_DMA */
  730. static inline int spi_unmap_msg(struct spi_master *master,
  731. struct spi_message *msg)
  732. {
  733. struct spi_transfer *xfer;
  734. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  735. /*
  736. * Restore the original value of tx_buf or rx_buf if they are
  737. * NULL.
  738. */
  739. if (xfer->tx_buf == master->dummy_tx)
  740. xfer->tx_buf = NULL;
  741. if (xfer->rx_buf == master->dummy_rx)
  742. xfer->rx_buf = NULL;
  743. }
  744. return __spi_unmap_msg(master, msg);
  745. }
  746. static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
  747. {
  748. struct spi_transfer *xfer;
  749. void *tmp;
  750. unsigned int max_tx, max_rx;
  751. if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
  752. max_tx = 0;
  753. max_rx = 0;
  754. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  755. if ((master->flags & SPI_MASTER_MUST_TX) &&
  756. !xfer->tx_buf)
  757. max_tx = max(xfer->len, max_tx);
  758. if ((master->flags & SPI_MASTER_MUST_RX) &&
  759. !xfer->rx_buf)
  760. max_rx = max(xfer->len, max_rx);
  761. }
  762. if (max_tx) {
  763. tmp = krealloc(master->dummy_tx, max_tx,
  764. GFP_KERNEL | GFP_DMA);
  765. if (!tmp)
  766. return -ENOMEM;
  767. master->dummy_tx = tmp;
  768. memset(tmp, 0, max_tx);
  769. }
  770. if (max_rx) {
  771. tmp = krealloc(master->dummy_rx, max_rx,
  772. GFP_KERNEL | GFP_DMA);
  773. if (!tmp)
  774. return -ENOMEM;
  775. master->dummy_rx = tmp;
  776. }
  777. if (max_tx || max_rx) {
  778. list_for_each_entry(xfer, &msg->transfers,
  779. transfer_list) {
  780. if (!xfer->tx_buf)
  781. xfer->tx_buf = master->dummy_tx;
  782. if (!xfer->rx_buf)
  783. xfer->rx_buf = master->dummy_rx;
  784. }
  785. }
  786. }
  787. return __spi_map_msg(master, msg);
  788. }
  789. /*
  790. * spi_transfer_one_message - Default implementation of transfer_one_message()
  791. *
  792. * This is a standard implementation of transfer_one_message() for
  793. * drivers which impelment a transfer_one() operation. It provides
  794. * standard handling of delays and chip select management.
  795. */
  796. static int spi_transfer_one_message(struct spi_master *master,
  797. struct spi_message *msg)
  798. {
  799. struct spi_transfer *xfer;
  800. bool keep_cs = false;
  801. int ret = 0;
  802. unsigned long ms = 1;
  803. struct spi_statistics *statm = &master->statistics;
  804. struct spi_statistics *stats = &msg->spi->statistics;
  805. spi_set_cs(msg->spi, true);
  806. SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
  807. SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
  808. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  809. trace_spi_transfer_start(msg, xfer);
  810. spi_statistics_add_transfer_stats(statm, xfer, master);
  811. spi_statistics_add_transfer_stats(stats, xfer, master);
  812. if (xfer->tx_buf || xfer->rx_buf) {
  813. reinit_completion(&master->xfer_completion);
  814. ret = master->transfer_one(master, msg->spi, xfer);
  815. if (ret < 0) {
  816. SPI_STATISTICS_INCREMENT_FIELD(statm,
  817. errors);
  818. SPI_STATISTICS_INCREMENT_FIELD(stats,
  819. errors);
  820. dev_err(&msg->spi->dev,
  821. "SPI transfer failed: %d\n", ret);
  822. goto out;
  823. }
  824. if (ret > 0) {
  825. ret = 0;
  826. ms = xfer->len * 8 * 1000 / xfer->speed_hz;
  827. ms += ms + 100; /* some tolerance */
  828. ms = wait_for_completion_timeout(&master->xfer_completion,
  829. msecs_to_jiffies(ms));
  830. }
  831. if (ms == 0) {
  832. SPI_STATISTICS_INCREMENT_FIELD(statm,
  833. timedout);
  834. SPI_STATISTICS_INCREMENT_FIELD(stats,
  835. timedout);
  836. dev_err(&msg->spi->dev,
  837. "SPI transfer timed out\n");
  838. msg->status = -ETIMEDOUT;
  839. }
  840. } else {
  841. if (xfer->len)
  842. dev_err(&msg->spi->dev,
  843. "Bufferless transfer has length %u\n",
  844. xfer->len);
  845. }
  846. trace_spi_transfer_stop(msg, xfer);
  847. if (msg->status != -EINPROGRESS)
  848. goto out;
  849. if (xfer->delay_usecs)
  850. udelay(xfer->delay_usecs);
  851. if (xfer->cs_change) {
  852. if (list_is_last(&xfer->transfer_list,
  853. &msg->transfers)) {
  854. keep_cs = true;
  855. } else {
  856. spi_set_cs(msg->spi, false);
  857. udelay(10);
  858. spi_set_cs(msg->spi, true);
  859. }
  860. }
  861. msg->actual_length += xfer->len;
  862. }
  863. out:
  864. if (ret != 0 || !keep_cs)
  865. spi_set_cs(msg->spi, false);
  866. if (msg->status == -EINPROGRESS)
  867. msg->status = ret;
  868. if (msg->status && master->handle_err)
  869. master->handle_err(master, msg);
  870. spi_finalize_current_message(master);
  871. return ret;
  872. }
  873. /**
  874. * spi_finalize_current_transfer - report completion of a transfer
  875. * @master: the master reporting completion
  876. *
  877. * Called by SPI drivers using the core transfer_one_message()
  878. * implementation to notify it that the current interrupt driven
  879. * transfer has finished and the next one may be scheduled.
  880. */
  881. void spi_finalize_current_transfer(struct spi_master *master)
  882. {
  883. complete(&master->xfer_completion);
  884. }
  885. EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
  886. /**
  887. * __spi_pump_messages - function which processes spi message queue
  888. * @master: master to process queue for
  889. * @in_kthread: true if we are in the context of the message pump thread
  890. *
  891. * This function checks if there is any spi message in the queue that
  892. * needs processing and if so call out to the driver to initialize hardware
  893. * and transfer each message.
  894. *
  895. * Note that it is called both from the kthread itself and also from
  896. * inside spi_sync(); the queue extraction handling at the top of the
  897. * function should deal with this safely.
  898. */
  899. static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
  900. {
  901. unsigned long flags;
  902. bool was_busy = false;
  903. int ret;
  904. /* Lock queue */
  905. spin_lock_irqsave(&master->queue_lock, flags);
  906. /* Make sure we are not already running a message */
  907. if (master->cur_msg) {
  908. spin_unlock_irqrestore(&master->queue_lock, flags);
  909. return;
  910. }
  911. /* If another context is idling the device then defer */
  912. if (master->idling) {
  913. queue_kthread_work(&master->kworker, &master->pump_messages);
  914. spin_unlock_irqrestore(&master->queue_lock, flags);
  915. return;
  916. }
  917. /* Check if the queue is idle */
  918. if (list_empty(&master->queue) || !master->running) {
  919. if (!master->busy) {
  920. spin_unlock_irqrestore(&master->queue_lock, flags);
  921. return;
  922. }
  923. /* Only do teardown in the thread */
  924. if (!in_kthread) {
  925. queue_kthread_work(&master->kworker,
  926. &master->pump_messages);
  927. spin_unlock_irqrestore(&master->queue_lock, flags);
  928. return;
  929. }
  930. master->busy = false;
  931. master->idling = true;
  932. spin_unlock_irqrestore(&master->queue_lock, flags);
  933. kfree(master->dummy_rx);
  934. master->dummy_rx = NULL;
  935. kfree(master->dummy_tx);
  936. master->dummy_tx = NULL;
  937. if (master->unprepare_transfer_hardware &&
  938. master->unprepare_transfer_hardware(master))
  939. dev_err(&master->dev,
  940. "failed to unprepare transfer hardware\n");
  941. if (master->auto_runtime_pm) {
  942. pm_runtime_mark_last_busy(master->dev.parent);
  943. pm_runtime_put_autosuspend(master->dev.parent);
  944. }
  945. trace_spi_master_idle(master);
  946. spin_lock_irqsave(&master->queue_lock, flags);
  947. master->idling = false;
  948. spin_unlock_irqrestore(&master->queue_lock, flags);
  949. return;
  950. }
  951. /* Extract head of queue */
  952. master->cur_msg =
  953. list_first_entry(&master->queue, struct spi_message, queue);
  954. list_del_init(&master->cur_msg->queue);
  955. if (master->busy)
  956. was_busy = true;
  957. else
  958. master->busy = true;
  959. spin_unlock_irqrestore(&master->queue_lock, flags);
  960. if (!was_busy && master->auto_runtime_pm) {
  961. ret = pm_runtime_get_sync(master->dev.parent);
  962. if (ret < 0) {
  963. dev_err(&master->dev, "Failed to power device: %d\n",
  964. ret);
  965. return;
  966. }
  967. }
  968. if (!was_busy)
  969. trace_spi_master_busy(master);
  970. if (!was_busy && master->prepare_transfer_hardware) {
  971. ret = master->prepare_transfer_hardware(master);
  972. if (ret) {
  973. dev_err(&master->dev,
  974. "failed to prepare transfer hardware\n");
  975. if (master->auto_runtime_pm)
  976. pm_runtime_put(master->dev.parent);
  977. return;
  978. }
  979. }
  980. trace_spi_message_start(master->cur_msg);
  981. if (master->prepare_message) {
  982. ret = master->prepare_message(master, master->cur_msg);
  983. if (ret) {
  984. dev_err(&master->dev,
  985. "failed to prepare message: %d\n", ret);
  986. master->cur_msg->status = ret;
  987. spi_finalize_current_message(master);
  988. return;
  989. }
  990. master->cur_msg_prepared = true;
  991. }
  992. ret = spi_map_msg(master, master->cur_msg);
  993. if (ret) {
  994. master->cur_msg->status = ret;
  995. spi_finalize_current_message(master);
  996. return;
  997. }
  998. ret = master->transfer_one_message(master, master->cur_msg);
  999. if (ret) {
  1000. dev_err(&master->dev,
  1001. "failed to transfer one message from queue\n");
  1002. return;
  1003. }
  1004. }
  1005. /**
  1006. * spi_pump_messages - kthread work function which processes spi message queue
  1007. * @work: pointer to kthread work struct contained in the master struct
  1008. */
  1009. static void spi_pump_messages(struct kthread_work *work)
  1010. {
  1011. struct spi_master *master =
  1012. container_of(work, struct spi_master, pump_messages);
  1013. __spi_pump_messages(master, true);
  1014. }
  1015. static int spi_init_queue(struct spi_master *master)
  1016. {
  1017. struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
  1018. master->running = false;
  1019. master->busy = false;
  1020. init_kthread_worker(&master->kworker);
  1021. master->kworker_task = kthread_run(kthread_worker_fn,
  1022. &master->kworker, "%s",
  1023. dev_name(&master->dev));
  1024. if (IS_ERR(master->kworker_task)) {
  1025. dev_err(&master->dev, "failed to create message pump task\n");
  1026. return PTR_ERR(master->kworker_task);
  1027. }
  1028. init_kthread_work(&master->pump_messages, spi_pump_messages);
  1029. /*
  1030. * Master config will indicate if this controller should run the
  1031. * message pump with high (realtime) priority to reduce the transfer
  1032. * latency on the bus by minimising the delay between a transfer
  1033. * request and the scheduling of the message pump thread. Without this
  1034. * setting the message pump thread will remain at default priority.
  1035. */
  1036. if (master->rt) {
  1037. dev_info(&master->dev,
  1038. "will run message pump with realtime priority\n");
  1039. sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
  1040. }
  1041. return 0;
  1042. }
  1043. /**
  1044. * spi_get_next_queued_message() - called by driver to check for queued
  1045. * messages
  1046. * @master: the master to check for queued messages
  1047. *
  1048. * If there are more messages in the queue, the next message is returned from
  1049. * this call.
  1050. *
  1051. * Return: the next message in the queue, else NULL if the queue is empty.
  1052. */
  1053. struct spi_message *spi_get_next_queued_message(struct spi_master *master)
  1054. {
  1055. struct spi_message *next;
  1056. unsigned long flags;
  1057. /* get a pointer to the next message, if any */
  1058. spin_lock_irqsave(&master->queue_lock, flags);
  1059. next = list_first_entry_or_null(&master->queue, struct spi_message,
  1060. queue);
  1061. spin_unlock_irqrestore(&master->queue_lock, flags);
  1062. return next;
  1063. }
  1064. EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
  1065. /**
  1066. * spi_finalize_current_message() - the current message is complete
  1067. * @master: the master to return the message to
  1068. *
  1069. * Called by the driver to notify the core that the message in the front of the
  1070. * queue is complete and can be removed from the queue.
  1071. */
  1072. void spi_finalize_current_message(struct spi_master *master)
  1073. {
  1074. struct spi_message *mesg;
  1075. unsigned long flags;
  1076. int ret;
  1077. spin_lock_irqsave(&master->queue_lock, flags);
  1078. mesg = master->cur_msg;
  1079. spin_unlock_irqrestore(&master->queue_lock, flags);
  1080. spi_unmap_msg(master, mesg);
  1081. if (master->cur_msg_prepared && master->unprepare_message) {
  1082. ret = master->unprepare_message(master, mesg);
  1083. if (ret) {
  1084. dev_err(&master->dev,
  1085. "failed to unprepare message: %d\n", ret);
  1086. }
  1087. }
  1088. spin_lock_irqsave(&master->queue_lock, flags);
  1089. master->cur_msg = NULL;
  1090. master->cur_msg_prepared = false;
  1091. queue_kthread_work(&master->kworker, &master->pump_messages);
  1092. spin_unlock_irqrestore(&master->queue_lock, flags);
  1093. trace_spi_message_done(mesg);
  1094. mesg->state = NULL;
  1095. if (mesg->complete)
  1096. mesg->complete(mesg->context);
  1097. }
  1098. EXPORT_SYMBOL_GPL(spi_finalize_current_message);
  1099. static int spi_start_queue(struct spi_master *master)
  1100. {
  1101. unsigned long flags;
  1102. spin_lock_irqsave(&master->queue_lock, flags);
  1103. if (master->running || master->busy) {
  1104. spin_unlock_irqrestore(&master->queue_lock, flags);
  1105. return -EBUSY;
  1106. }
  1107. master->running = true;
  1108. master->cur_msg = NULL;
  1109. spin_unlock_irqrestore(&master->queue_lock, flags);
  1110. queue_kthread_work(&master->kworker, &master->pump_messages);
  1111. return 0;
  1112. }
  1113. static int spi_stop_queue(struct spi_master *master)
  1114. {
  1115. unsigned long flags;
  1116. unsigned limit = 500;
  1117. int ret = 0;
  1118. spin_lock_irqsave(&master->queue_lock, flags);
  1119. /*
  1120. * This is a bit lame, but is optimized for the common execution path.
  1121. * A wait_queue on the master->busy could be used, but then the common
  1122. * execution path (pump_messages) would be required to call wake_up or
  1123. * friends on every SPI message. Do this instead.
  1124. */
  1125. while ((!list_empty(&master->queue) || master->busy) && limit--) {
  1126. spin_unlock_irqrestore(&master->queue_lock, flags);
  1127. usleep_range(10000, 11000);
  1128. spin_lock_irqsave(&master->queue_lock, flags);
  1129. }
  1130. if (!list_empty(&master->queue) || master->busy)
  1131. ret = -EBUSY;
  1132. else
  1133. master->running = false;
  1134. spin_unlock_irqrestore(&master->queue_lock, flags);
  1135. if (ret) {
  1136. dev_warn(&master->dev,
  1137. "could not stop message queue\n");
  1138. return ret;
  1139. }
  1140. return ret;
  1141. }
  1142. static int spi_destroy_queue(struct spi_master *master)
  1143. {
  1144. int ret;
  1145. ret = spi_stop_queue(master);
  1146. /*
  1147. * flush_kthread_worker will block until all work is done.
  1148. * If the reason that stop_queue timed out is that the work will never
  1149. * finish, then it does no good to call flush/stop thread, so
  1150. * return anyway.
  1151. */
  1152. if (ret) {
  1153. dev_err(&master->dev, "problem destroying queue\n");
  1154. return ret;
  1155. }
  1156. flush_kthread_worker(&master->kworker);
  1157. kthread_stop(master->kworker_task);
  1158. return 0;
  1159. }
  1160. static int __spi_queued_transfer(struct spi_device *spi,
  1161. struct spi_message *msg,
  1162. bool need_pump)
  1163. {
  1164. struct spi_master *master = spi->master;
  1165. unsigned long flags;
  1166. spin_lock_irqsave(&master->queue_lock, flags);
  1167. if (!master->running) {
  1168. spin_unlock_irqrestore(&master->queue_lock, flags);
  1169. return -ESHUTDOWN;
  1170. }
  1171. msg->actual_length = 0;
  1172. msg->status = -EINPROGRESS;
  1173. list_add_tail(&msg->queue, &master->queue);
  1174. if (!master->busy && need_pump)
  1175. queue_kthread_work(&master->kworker, &master->pump_messages);
  1176. spin_unlock_irqrestore(&master->queue_lock, flags);
  1177. return 0;
  1178. }
  1179. /**
  1180. * spi_queued_transfer - transfer function for queued transfers
  1181. * @spi: spi device which is requesting transfer
  1182. * @msg: spi message which is to handled is queued to driver queue
  1183. *
  1184. * Return: zero on success, else a negative error code.
  1185. */
  1186. static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
  1187. {
  1188. return __spi_queued_transfer(spi, msg, true);
  1189. }
  1190. static int spi_master_initialize_queue(struct spi_master *master)
  1191. {
  1192. int ret;
  1193. master->transfer = spi_queued_transfer;
  1194. if (!master->transfer_one_message)
  1195. master->transfer_one_message = spi_transfer_one_message;
  1196. /* Initialize and start queue */
  1197. ret = spi_init_queue(master);
  1198. if (ret) {
  1199. dev_err(&master->dev, "problem initializing queue\n");
  1200. goto err_init_queue;
  1201. }
  1202. master->queued = true;
  1203. ret = spi_start_queue(master);
  1204. if (ret) {
  1205. dev_err(&master->dev, "problem starting queue\n");
  1206. goto err_start_queue;
  1207. }
  1208. return 0;
  1209. err_start_queue:
  1210. spi_destroy_queue(master);
  1211. err_init_queue:
  1212. return ret;
  1213. }
  1214. /*-------------------------------------------------------------------------*/
  1215. #if defined(CONFIG_OF)
  1216. static struct spi_device *
  1217. of_register_spi_device(struct spi_master *master, struct device_node *nc)
  1218. {
  1219. struct spi_device *spi;
  1220. int rc;
  1221. u32 value;
  1222. /* Alloc an spi_device */
  1223. spi = spi_alloc_device(master);
  1224. if (!spi) {
  1225. dev_err(&master->dev, "spi_device alloc error for %s\n",
  1226. nc->full_name);
  1227. rc = -ENOMEM;
  1228. goto err_out;
  1229. }
  1230. /* Select device driver */
  1231. rc = of_modalias_node(nc, spi->modalias,
  1232. sizeof(spi->modalias));
  1233. if (rc < 0) {
  1234. dev_err(&master->dev, "cannot find modalias for %s\n",
  1235. nc->full_name);
  1236. goto err_out;
  1237. }
  1238. /* Device address */
  1239. rc = of_property_read_u32(nc, "reg", &value);
  1240. if (rc) {
  1241. dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
  1242. nc->full_name, rc);
  1243. goto err_out;
  1244. }
  1245. spi->chip_select = value;
  1246. /* Mode (clock phase/polarity/etc.) */
  1247. if (of_find_property(nc, "spi-cpha", NULL))
  1248. spi->mode |= SPI_CPHA;
  1249. if (of_find_property(nc, "spi-cpol", NULL))
  1250. spi->mode |= SPI_CPOL;
  1251. if (of_find_property(nc, "spi-cs-high", NULL))
  1252. spi->mode |= SPI_CS_HIGH;
  1253. if (of_find_property(nc, "spi-3wire", NULL))
  1254. spi->mode |= SPI_3WIRE;
  1255. if (of_find_property(nc, "spi-lsb-first", NULL))
  1256. spi->mode |= SPI_LSB_FIRST;
  1257. /* Device DUAL/QUAD mode */
  1258. if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
  1259. switch (value) {
  1260. case 1:
  1261. break;
  1262. case 2:
  1263. spi->mode |= SPI_TX_DUAL;
  1264. break;
  1265. case 4:
  1266. spi->mode |= SPI_TX_QUAD;
  1267. break;
  1268. default:
  1269. dev_warn(&master->dev,
  1270. "spi-tx-bus-width %d not supported\n",
  1271. value);
  1272. break;
  1273. }
  1274. }
  1275. if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
  1276. switch (value) {
  1277. case 1:
  1278. break;
  1279. case 2:
  1280. spi->mode |= SPI_RX_DUAL;
  1281. break;
  1282. case 4:
  1283. spi->mode |= SPI_RX_QUAD;
  1284. break;
  1285. default:
  1286. dev_warn(&master->dev,
  1287. "spi-rx-bus-width %d not supported\n",
  1288. value);
  1289. break;
  1290. }
  1291. }
  1292. /* Device speed */
  1293. rc = of_property_read_u32(nc, "spi-max-frequency", &value);
  1294. if (rc) {
  1295. dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
  1296. nc->full_name, rc);
  1297. goto err_out;
  1298. }
  1299. spi->max_speed_hz = value;
  1300. /* Store a pointer to the node in the device structure */
  1301. of_node_get(nc);
  1302. spi->dev.of_node = nc;
  1303. /* Register the new device */
  1304. rc = spi_add_device(spi);
  1305. if (rc) {
  1306. dev_err(&master->dev, "spi_device register error %s\n",
  1307. nc->full_name);
  1308. goto err_out;
  1309. }
  1310. return spi;
  1311. err_out:
  1312. spi_dev_put(spi);
  1313. return ERR_PTR(rc);
  1314. }
  1315. /**
  1316. * of_register_spi_devices() - Register child devices onto the SPI bus
  1317. * @master: Pointer to spi_master device
  1318. *
  1319. * Registers an spi_device for each child node of master node which has a 'reg'
  1320. * property.
  1321. */
  1322. static void of_register_spi_devices(struct spi_master *master)
  1323. {
  1324. struct spi_device *spi;
  1325. struct device_node *nc;
  1326. if (!master->dev.of_node)
  1327. return;
  1328. for_each_available_child_of_node(master->dev.of_node, nc) {
  1329. if (of_node_test_and_set_flag(nc, OF_POPULATED))
  1330. continue;
  1331. spi = of_register_spi_device(master, nc);
  1332. if (IS_ERR(spi))
  1333. dev_warn(&master->dev, "Failed to create SPI device for %s\n",
  1334. nc->full_name);
  1335. }
  1336. }
  1337. #else
  1338. static void of_register_spi_devices(struct spi_master *master) { }
  1339. #endif
  1340. #ifdef CONFIG_ACPI
  1341. static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
  1342. {
  1343. struct spi_device *spi = data;
  1344. if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
  1345. struct acpi_resource_spi_serialbus *sb;
  1346. sb = &ares->data.spi_serial_bus;
  1347. if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
  1348. spi->chip_select = sb->device_selection;
  1349. spi->max_speed_hz = sb->connection_speed;
  1350. if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
  1351. spi->mode |= SPI_CPHA;
  1352. if (sb->clock_polarity == ACPI_SPI_START_HIGH)
  1353. spi->mode |= SPI_CPOL;
  1354. if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
  1355. spi->mode |= SPI_CS_HIGH;
  1356. }
  1357. } else if (spi->irq < 0) {
  1358. struct resource r;
  1359. if (acpi_dev_resource_interrupt(ares, 0, &r))
  1360. spi->irq = r.start;
  1361. }
  1362. /* Always tell the ACPI core to skip this resource */
  1363. return 1;
  1364. }
  1365. static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
  1366. void *data, void **return_value)
  1367. {
  1368. struct spi_master *master = data;
  1369. struct list_head resource_list;
  1370. struct acpi_device *adev;
  1371. struct spi_device *spi;
  1372. int ret;
  1373. if (acpi_bus_get_device(handle, &adev))
  1374. return AE_OK;
  1375. if (acpi_bus_get_status(adev) || !adev->status.present)
  1376. return AE_OK;
  1377. spi = spi_alloc_device(master);
  1378. if (!spi) {
  1379. dev_err(&master->dev, "failed to allocate SPI device for %s\n",
  1380. dev_name(&adev->dev));
  1381. return AE_NO_MEMORY;
  1382. }
  1383. ACPI_COMPANION_SET(&spi->dev, adev);
  1384. spi->irq = -1;
  1385. INIT_LIST_HEAD(&resource_list);
  1386. ret = acpi_dev_get_resources(adev, &resource_list,
  1387. acpi_spi_add_resource, spi);
  1388. acpi_dev_free_resource_list(&resource_list);
  1389. if (ret < 0 || !spi->max_speed_hz) {
  1390. spi_dev_put(spi);
  1391. return AE_OK;
  1392. }
  1393. if (spi->irq < 0)
  1394. spi->irq = acpi_dev_gpio_irq_get(adev, 0);
  1395. adev->power.flags.ignore_parent = true;
  1396. strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
  1397. if (spi_add_device(spi)) {
  1398. adev->power.flags.ignore_parent = false;
  1399. dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
  1400. dev_name(&adev->dev));
  1401. spi_dev_put(spi);
  1402. }
  1403. return AE_OK;
  1404. }
  1405. static void acpi_register_spi_devices(struct spi_master *master)
  1406. {
  1407. acpi_status status;
  1408. acpi_handle handle;
  1409. handle = ACPI_HANDLE(master->dev.parent);
  1410. if (!handle)
  1411. return;
  1412. status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
  1413. acpi_spi_add_device, NULL,
  1414. master, NULL);
  1415. if (ACPI_FAILURE(status))
  1416. dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
  1417. }
  1418. #else
  1419. static inline void acpi_register_spi_devices(struct spi_master *master) {}
  1420. #endif /* CONFIG_ACPI */
  1421. static void spi_master_release(struct device *dev)
  1422. {
  1423. struct spi_master *master;
  1424. master = container_of(dev, struct spi_master, dev);
  1425. kfree(master);
  1426. }
  1427. static struct class spi_master_class = {
  1428. .name = "spi_master",
  1429. .owner = THIS_MODULE,
  1430. .dev_release = spi_master_release,
  1431. .dev_groups = spi_master_groups,
  1432. };
  1433. /**
  1434. * spi_alloc_master - allocate SPI master controller
  1435. * @dev: the controller, possibly using the platform_bus
  1436. * @size: how much zeroed driver-private data to allocate; the pointer to this
  1437. * memory is in the driver_data field of the returned device,
  1438. * accessible with spi_master_get_devdata().
  1439. * Context: can sleep
  1440. *
  1441. * This call is used only by SPI master controller drivers, which are the
  1442. * only ones directly touching chip registers. It's how they allocate
  1443. * an spi_master structure, prior to calling spi_register_master().
  1444. *
  1445. * This must be called from context that can sleep.
  1446. *
  1447. * The caller is responsible for assigning the bus number and initializing
  1448. * the master's methods before calling spi_register_master(); and (after errors
  1449. * adding the device) calling spi_master_put() to prevent a memory leak.
  1450. *
  1451. * Return: the SPI master structure on success, else NULL.
  1452. */
  1453. struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
  1454. {
  1455. struct spi_master *master;
  1456. if (!dev)
  1457. return NULL;
  1458. master = kzalloc(size + sizeof(*master), GFP_KERNEL);
  1459. if (!master)
  1460. return NULL;
  1461. device_initialize(&master->dev);
  1462. master->bus_num = -1;
  1463. master->num_chipselect = 1;
  1464. master->dev.class = &spi_master_class;
  1465. master->dev.parent = dev;
  1466. spi_master_set_devdata(master, &master[1]);
  1467. return master;
  1468. }
  1469. EXPORT_SYMBOL_GPL(spi_alloc_master);
  1470. #ifdef CONFIG_OF
  1471. static int of_spi_register_master(struct spi_master *master)
  1472. {
  1473. int nb, i, *cs;
  1474. struct device_node *np = master->dev.of_node;
  1475. if (!np)
  1476. return 0;
  1477. nb = of_gpio_named_count(np, "cs-gpios");
  1478. master->num_chipselect = max_t(int, nb, master->num_chipselect);
  1479. /* Return error only for an incorrectly formed cs-gpios property */
  1480. if (nb == 0 || nb == -ENOENT)
  1481. return 0;
  1482. else if (nb < 0)
  1483. return nb;
  1484. cs = devm_kzalloc(&master->dev,
  1485. sizeof(int) * master->num_chipselect,
  1486. GFP_KERNEL);
  1487. master->cs_gpios = cs;
  1488. if (!master->cs_gpios)
  1489. return -ENOMEM;
  1490. for (i = 0; i < master->num_chipselect; i++)
  1491. cs[i] = -ENOENT;
  1492. for (i = 0; i < nb; i++)
  1493. cs[i] = of_get_named_gpio(np, "cs-gpios", i);
  1494. return 0;
  1495. }
  1496. #else
  1497. static int of_spi_register_master(struct spi_master *master)
  1498. {
  1499. return 0;
  1500. }
  1501. #endif
  1502. /**
  1503. * spi_register_master - register SPI master controller
  1504. * @master: initialized master, originally from spi_alloc_master()
  1505. * Context: can sleep
  1506. *
  1507. * SPI master controllers connect to their drivers using some non-SPI bus,
  1508. * such as the platform bus. The final stage of probe() in that code
  1509. * includes calling spi_register_master() to hook up to this SPI bus glue.
  1510. *
  1511. * SPI controllers use board specific (often SOC specific) bus numbers,
  1512. * and board-specific addressing for SPI devices combines those numbers
  1513. * with chip select numbers. Since SPI does not directly support dynamic
  1514. * device identification, boards need configuration tables telling which
  1515. * chip is at which address.
  1516. *
  1517. * This must be called from context that can sleep. It returns zero on
  1518. * success, else a negative error code (dropping the master's refcount).
  1519. * After a successful return, the caller is responsible for calling
  1520. * spi_unregister_master().
  1521. *
  1522. * Return: zero on success, else a negative error code.
  1523. */
  1524. int spi_register_master(struct spi_master *master)
  1525. {
  1526. static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
  1527. struct device *dev = master->dev.parent;
  1528. struct boardinfo *bi;
  1529. int status = -ENODEV;
  1530. int dynamic = 0;
  1531. if (!dev)
  1532. return -ENODEV;
  1533. status = of_spi_register_master(master);
  1534. if (status)
  1535. return status;
  1536. /* even if it's just one always-selected device, there must
  1537. * be at least one chipselect
  1538. */
  1539. if (master->num_chipselect == 0)
  1540. return -EINVAL;
  1541. if ((master->bus_num < 0) && master->dev.of_node)
  1542. master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
  1543. /* convention: dynamically assigned bus IDs count down from the max */
  1544. if (master->bus_num < 0) {
  1545. /* FIXME switch to an IDR based scheme, something like
  1546. * I2C now uses, so we can't run out of "dynamic" IDs
  1547. */
  1548. master->bus_num = atomic_dec_return(&dyn_bus_id);
  1549. dynamic = 1;
  1550. }
  1551. INIT_LIST_HEAD(&master->queue);
  1552. spin_lock_init(&master->queue_lock);
  1553. spin_lock_init(&master->bus_lock_spinlock);
  1554. mutex_init(&master->bus_lock_mutex);
  1555. master->bus_lock_flag = 0;
  1556. init_completion(&master->xfer_completion);
  1557. if (!master->max_dma_len)
  1558. master->max_dma_len = INT_MAX;
  1559. /* register the device, then userspace will see it.
  1560. * registration fails if the bus ID is in use.
  1561. */
  1562. dev_set_name(&master->dev, "spi%u", master->bus_num);
  1563. status = device_add(&master->dev);
  1564. if (status < 0)
  1565. goto done;
  1566. dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
  1567. dynamic ? " (dynamic)" : "");
  1568. /* If we're using a queued driver, start the queue */
  1569. if (master->transfer)
  1570. dev_info(dev, "master is unqueued, this is deprecated\n");
  1571. else {
  1572. status = spi_master_initialize_queue(master);
  1573. if (status) {
  1574. device_del(&master->dev);
  1575. goto done;
  1576. }
  1577. }
  1578. /* add statistics */
  1579. spin_lock_init(&master->statistics.lock);
  1580. mutex_lock(&board_lock);
  1581. list_add_tail(&master->list, &spi_master_list);
  1582. list_for_each_entry(bi, &board_list, list)
  1583. spi_match_master_to_boardinfo(master, &bi->board_info);
  1584. mutex_unlock(&board_lock);
  1585. /* Register devices from the device tree and ACPI */
  1586. of_register_spi_devices(master);
  1587. acpi_register_spi_devices(master);
  1588. done:
  1589. return status;
  1590. }
  1591. EXPORT_SYMBOL_GPL(spi_register_master);
  1592. static void devm_spi_unregister(struct device *dev, void *res)
  1593. {
  1594. spi_unregister_master(*(struct spi_master **)res);
  1595. }
  1596. /**
  1597. * dev_spi_register_master - register managed SPI master controller
  1598. * @dev: device managing SPI master
  1599. * @master: initialized master, originally from spi_alloc_master()
  1600. * Context: can sleep
  1601. *
  1602. * Register a SPI device as with spi_register_master() which will
  1603. * automatically be unregister
  1604. *
  1605. * Return: zero on success, else a negative error code.
  1606. */
  1607. int devm_spi_register_master(struct device *dev, struct spi_master *master)
  1608. {
  1609. struct spi_master **ptr;
  1610. int ret;
  1611. ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
  1612. if (!ptr)
  1613. return -ENOMEM;
  1614. ret = spi_register_master(master);
  1615. if (!ret) {
  1616. *ptr = master;
  1617. devres_add(dev, ptr);
  1618. } else {
  1619. devres_free(ptr);
  1620. }
  1621. return ret;
  1622. }
  1623. EXPORT_SYMBOL_GPL(devm_spi_register_master);
  1624. static int __unregister(struct device *dev, void *null)
  1625. {
  1626. spi_unregister_device(to_spi_device(dev));
  1627. return 0;
  1628. }
  1629. /**
  1630. * spi_unregister_master - unregister SPI master controller
  1631. * @master: the master being unregistered
  1632. * Context: can sleep
  1633. *
  1634. * This call is used only by SPI master controller drivers, which are the
  1635. * only ones directly touching chip registers.
  1636. *
  1637. * This must be called from context that can sleep.
  1638. */
  1639. void spi_unregister_master(struct spi_master *master)
  1640. {
  1641. int dummy;
  1642. if (master->queued) {
  1643. if (spi_destroy_queue(master))
  1644. dev_err(&master->dev, "queue remove failed\n");
  1645. }
  1646. mutex_lock(&board_lock);
  1647. list_del(&master->list);
  1648. mutex_unlock(&board_lock);
  1649. dummy = device_for_each_child(&master->dev, NULL, __unregister);
  1650. device_unregister(&master->dev);
  1651. }
  1652. EXPORT_SYMBOL_GPL(spi_unregister_master);
  1653. int spi_master_suspend(struct spi_master *master)
  1654. {
  1655. int ret;
  1656. /* Basically no-ops for non-queued masters */
  1657. if (!master->queued)
  1658. return 0;
  1659. ret = spi_stop_queue(master);
  1660. if (ret)
  1661. dev_err(&master->dev, "queue stop failed\n");
  1662. return ret;
  1663. }
  1664. EXPORT_SYMBOL_GPL(spi_master_suspend);
  1665. int spi_master_resume(struct spi_master *master)
  1666. {
  1667. int ret;
  1668. if (!master->queued)
  1669. return 0;
  1670. ret = spi_start_queue(master);
  1671. if (ret)
  1672. dev_err(&master->dev, "queue restart failed\n");
  1673. return ret;
  1674. }
  1675. EXPORT_SYMBOL_GPL(spi_master_resume);
  1676. static int __spi_master_match(struct device *dev, const void *data)
  1677. {
  1678. struct spi_master *m;
  1679. const u16 *bus_num = data;
  1680. m = container_of(dev, struct spi_master, dev);
  1681. return m->bus_num == *bus_num;
  1682. }
  1683. /**
  1684. * spi_busnum_to_master - look up master associated with bus_num
  1685. * @bus_num: the master's bus number
  1686. * Context: can sleep
  1687. *
  1688. * This call may be used with devices that are registered after
  1689. * arch init time. It returns a refcounted pointer to the relevant
  1690. * spi_master (which the caller must release), or NULL if there is
  1691. * no such master registered.
  1692. *
  1693. * Return: the SPI master structure on success, else NULL.
  1694. */
  1695. struct spi_master *spi_busnum_to_master(u16 bus_num)
  1696. {
  1697. struct device *dev;
  1698. struct spi_master *master = NULL;
  1699. dev = class_find_device(&spi_master_class, NULL, &bus_num,
  1700. __spi_master_match);
  1701. if (dev)
  1702. master = container_of(dev, struct spi_master, dev);
  1703. /* reference got in class_find_device */
  1704. return master;
  1705. }
  1706. EXPORT_SYMBOL_GPL(spi_busnum_to_master);
  1707. /*-------------------------------------------------------------------------*/
  1708. /* Core methods for SPI master protocol drivers. Some of the
  1709. * other core methods are currently defined as inline functions.
  1710. */
  1711. static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
  1712. {
  1713. if (master->bits_per_word_mask) {
  1714. /* Only 32 bits fit in the mask */
  1715. if (bits_per_word > 32)
  1716. return -EINVAL;
  1717. if (!(master->bits_per_word_mask &
  1718. SPI_BPW_MASK(bits_per_word)))
  1719. return -EINVAL;
  1720. }
  1721. return 0;
  1722. }
  1723. /**
  1724. * spi_setup - setup SPI mode and clock rate
  1725. * @spi: the device whose settings are being modified
  1726. * Context: can sleep, and no requests are queued to the device
  1727. *
  1728. * SPI protocol drivers may need to update the transfer mode if the
  1729. * device doesn't work with its default. They may likewise need
  1730. * to update clock rates or word sizes from initial values. This function
  1731. * changes those settings, and must be called from a context that can sleep.
  1732. * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
  1733. * effect the next time the device is selected and data is transferred to
  1734. * or from it. When this function returns, the spi device is deselected.
  1735. *
  1736. * Note that this call will fail if the protocol driver specifies an option
  1737. * that the underlying controller or its driver does not support. For
  1738. * example, not all hardware supports wire transfers using nine bit words,
  1739. * LSB-first wire encoding, or active-high chipselects.
  1740. *
  1741. * Return: zero on success, else a negative error code.
  1742. */
  1743. int spi_setup(struct spi_device *spi)
  1744. {
  1745. unsigned bad_bits, ugly_bits;
  1746. int status;
  1747. /* check mode to prevent that DUAL and QUAD set at the same time
  1748. */
  1749. if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
  1750. ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
  1751. dev_err(&spi->dev,
  1752. "setup: can not select dual and quad at the same time\n");
  1753. return -EINVAL;
  1754. }
  1755. /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
  1756. */
  1757. if ((spi->mode & SPI_3WIRE) && (spi->mode &
  1758. (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
  1759. return -EINVAL;
  1760. /* help drivers fail *cleanly* when they need options
  1761. * that aren't supported with their current master
  1762. */
  1763. bad_bits = spi->mode & ~spi->master->mode_bits;
  1764. ugly_bits = bad_bits &
  1765. (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
  1766. if (ugly_bits) {
  1767. dev_warn(&spi->dev,
  1768. "setup: ignoring unsupported mode bits %x\n",
  1769. ugly_bits);
  1770. spi->mode &= ~ugly_bits;
  1771. bad_bits &= ~ugly_bits;
  1772. }
  1773. if (bad_bits) {
  1774. dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
  1775. bad_bits);
  1776. return -EINVAL;
  1777. }
  1778. if (!spi->bits_per_word)
  1779. spi->bits_per_word = 8;
  1780. status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
  1781. if (status)
  1782. return status;
  1783. if (!spi->max_speed_hz)
  1784. spi->max_speed_hz = spi->master->max_speed_hz;
  1785. if (spi->master->setup)
  1786. status = spi->master->setup(spi);
  1787. spi_set_cs(spi, false);
  1788. dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
  1789. (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
  1790. (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
  1791. (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
  1792. (spi->mode & SPI_3WIRE) ? "3wire, " : "",
  1793. (spi->mode & SPI_LOOP) ? "loopback, " : "",
  1794. spi->bits_per_word, spi->max_speed_hz,
  1795. status);
  1796. return status;
  1797. }
  1798. EXPORT_SYMBOL_GPL(spi_setup);
  1799. static int __spi_validate(struct spi_device *spi, struct spi_message *message)
  1800. {
  1801. struct spi_master *master = spi->master;
  1802. struct spi_transfer *xfer;
  1803. int w_size;
  1804. if (list_empty(&message->transfers))
  1805. return -EINVAL;
  1806. /* Half-duplex links include original MicroWire, and ones with
  1807. * only one data pin like SPI_3WIRE (switches direction) or where
  1808. * either MOSI or MISO is missing. They can also be caused by
  1809. * software limitations.
  1810. */
  1811. if ((master->flags & SPI_MASTER_HALF_DUPLEX)
  1812. || (spi->mode & SPI_3WIRE)) {
  1813. unsigned flags = master->flags;
  1814. list_for_each_entry(xfer, &message->transfers, transfer_list) {
  1815. if (xfer->rx_buf && xfer->tx_buf)
  1816. return -EINVAL;
  1817. if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
  1818. return -EINVAL;
  1819. if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
  1820. return -EINVAL;
  1821. }
  1822. }
  1823. /**
  1824. * Set transfer bits_per_word and max speed as spi device default if
  1825. * it is not set for this transfer.
  1826. * Set transfer tx_nbits and rx_nbits as single transfer default
  1827. * (SPI_NBITS_SINGLE) if it is not set for this transfer.
  1828. */
  1829. message->frame_length = 0;
  1830. list_for_each_entry(xfer, &message->transfers, transfer_list) {
  1831. message->frame_length += xfer->len;
  1832. if (!xfer->bits_per_word)
  1833. xfer->bits_per_word = spi->bits_per_word;
  1834. if (!xfer->speed_hz)
  1835. xfer->speed_hz = spi->max_speed_hz;
  1836. if (!xfer->speed_hz)
  1837. xfer->speed_hz = master->max_speed_hz;
  1838. if (master->max_speed_hz &&
  1839. xfer->speed_hz > master->max_speed_hz)
  1840. xfer->speed_hz = master->max_speed_hz;
  1841. if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
  1842. return -EINVAL;
  1843. /*
  1844. * SPI transfer length should be multiple of SPI word size
  1845. * where SPI word size should be power-of-two multiple
  1846. */
  1847. if (xfer->bits_per_word <= 8)
  1848. w_size = 1;
  1849. else if (xfer->bits_per_word <= 16)
  1850. w_size = 2;
  1851. else
  1852. w_size = 4;
  1853. /* No partial transfers accepted */
  1854. if (xfer->len % w_size)
  1855. return -EINVAL;
  1856. if (xfer->speed_hz && master->min_speed_hz &&
  1857. xfer->speed_hz < master->min_speed_hz)
  1858. return -EINVAL;
  1859. if (xfer->tx_buf && !xfer->tx_nbits)
  1860. xfer->tx_nbits = SPI_NBITS_SINGLE;
  1861. if (xfer->rx_buf && !xfer->rx_nbits)
  1862. xfer->rx_nbits = SPI_NBITS_SINGLE;
  1863. /* check transfer tx/rx_nbits:
  1864. * 1. check the value matches one of single, dual and quad
  1865. * 2. check tx/rx_nbits match the mode in spi_device
  1866. */
  1867. if (xfer->tx_buf) {
  1868. if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
  1869. xfer->tx_nbits != SPI_NBITS_DUAL &&
  1870. xfer->tx_nbits != SPI_NBITS_QUAD)
  1871. return -EINVAL;
  1872. if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
  1873. !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
  1874. return -EINVAL;
  1875. if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
  1876. !(spi->mode & SPI_TX_QUAD))
  1877. return -EINVAL;
  1878. }
  1879. /* check transfer rx_nbits */
  1880. if (xfer->rx_buf) {
  1881. if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
  1882. xfer->rx_nbits != SPI_NBITS_DUAL &&
  1883. xfer->rx_nbits != SPI_NBITS_QUAD)
  1884. return -EINVAL;
  1885. if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
  1886. !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
  1887. return -EINVAL;
  1888. if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
  1889. !(spi->mode & SPI_RX_QUAD))
  1890. return -EINVAL;
  1891. }
  1892. }
  1893. message->status = -EINPROGRESS;
  1894. return 0;
  1895. }
  1896. static int __spi_async(struct spi_device *spi, struct spi_message *message)
  1897. {
  1898. struct spi_master *master = spi->master;
  1899. message->spi = spi;
  1900. SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
  1901. SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
  1902. trace_spi_message_submit(message);
  1903. return master->transfer(spi, message);
  1904. }
  1905. /**
  1906. * spi_async - asynchronous SPI transfer
  1907. * @spi: device with which data will be exchanged
  1908. * @message: describes the data transfers, including completion callback
  1909. * Context: any (irqs may be blocked, etc)
  1910. *
  1911. * This call may be used in_irq and other contexts which can't sleep,
  1912. * as well as from task contexts which can sleep.
  1913. *
  1914. * The completion callback is invoked in a context which can't sleep.
  1915. * Before that invocation, the value of message->status is undefined.
  1916. * When the callback is issued, message->status holds either zero (to
  1917. * indicate complete success) or a negative error code. After that
  1918. * callback returns, the driver which issued the transfer request may
  1919. * deallocate the associated memory; it's no longer in use by any SPI
  1920. * core or controller driver code.
  1921. *
  1922. * Note that although all messages to a spi_device are handled in
  1923. * FIFO order, messages may go to different devices in other orders.
  1924. * Some device might be higher priority, or have various "hard" access
  1925. * time requirements, for example.
  1926. *
  1927. * On detection of any fault during the transfer, processing of
  1928. * the entire message is aborted, and the device is deselected.
  1929. * Until returning from the associated message completion callback,
  1930. * no other spi_message queued to that device will be processed.
  1931. * (This rule applies equally to all the synchronous transfer calls,
  1932. * which are wrappers around this core asynchronous primitive.)
  1933. *
  1934. * Return: zero on success, else a negative error code.
  1935. */
  1936. int spi_async(struct spi_device *spi, struct spi_message *message)
  1937. {
  1938. struct spi_master *master = spi->master;
  1939. int ret;
  1940. unsigned long flags;
  1941. ret = __spi_validate(spi, message);
  1942. if (ret != 0)
  1943. return ret;
  1944. spin_lock_irqsave(&master->bus_lock_spinlock, flags);
  1945. if (master->bus_lock_flag)
  1946. ret = -EBUSY;
  1947. else
  1948. ret = __spi_async(spi, message);
  1949. spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
  1950. return ret;
  1951. }
  1952. EXPORT_SYMBOL_GPL(spi_async);
  1953. /**
  1954. * spi_async_locked - version of spi_async with exclusive bus usage
  1955. * @spi: device with which data will be exchanged
  1956. * @message: describes the data transfers, including completion callback
  1957. * Context: any (irqs may be blocked, etc)
  1958. *
  1959. * This call may be used in_irq and other contexts which can't sleep,
  1960. * as well as from task contexts which can sleep.
  1961. *
  1962. * The completion callback is invoked in a context which can't sleep.
  1963. * Before that invocation, the value of message->status is undefined.
  1964. * When the callback is issued, message->status holds either zero (to
  1965. * indicate complete success) or a negative error code. After that
  1966. * callback returns, the driver which issued the transfer request may
  1967. * deallocate the associated memory; it's no longer in use by any SPI
  1968. * core or controller driver code.
  1969. *
  1970. * Note that although all messages to a spi_device are handled in
  1971. * FIFO order, messages may go to different devices in other orders.
  1972. * Some device might be higher priority, or have various "hard" access
  1973. * time requirements, for example.
  1974. *
  1975. * On detection of any fault during the transfer, processing of
  1976. * the entire message is aborted, and the device is deselected.
  1977. * Until returning from the associated message completion callback,
  1978. * no other spi_message queued to that device will be processed.
  1979. * (This rule applies equally to all the synchronous transfer calls,
  1980. * which are wrappers around this core asynchronous primitive.)
  1981. *
  1982. * Return: zero on success, else a negative error code.
  1983. */
  1984. int spi_async_locked(struct spi_device *spi, struct spi_message *message)
  1985. {
  1986. struct spi_master *master = spi->master;
  1987. int ret;
  1988. unsigned long flags;
  1989. ret = __spi_validate(spi, message);
  1990. if (ret != 0)
  1991. return ret;
  1992. spin_lock_irqsave(&master->bus_lock_spinlock, flags);
  1993. ret = __spi_async(spi, message);
  1994. spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
  1995. return ret;
  1996. }
  1997. EXPORT_SYMBOL_GPL(spi_async_locked);
  1998. /*-------------------------------------------------------------------------*/
  1999. /* Utility methods for SPI master protocol drivers, layered on
  2000. * top of the core. Some other utility methods are defined as
  2001. * inline functions.
  2002. */
  2003. static void spi_complete(void *arg)
  2004. {
  2005. complete(arg);
  2006. }
  2007. static int __spi_sync(struct spi_device *spi, struct spi_message *message,
  2008. int bus_locked)
  2009. {
  2010. DECLARE_COMPLETION_ONSTACK(done);
  2011. int status;
  2012. struct spi_master *master = spi->master;
  2013. unsigned long flags;
  2014. status = __spi_validate(spi, message);
  2015. if (status != 0)
  2016. return status;
  2017. message->complete = spi_complete;
  2018. message->context = &done;
  2019. message->spi = spi;
  2020. SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
  2021. SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
  2022. if (!bus_locked)
  2023. mutex_lock(&master->bus_lock_mutex);
  2024. /* If we're not using the legacy transfer method then we will
  2025. * try to transfer in the calling context so special case.
  2026. * This code would be less tricky if we could remove the
  2027. * support for driver implemented message queues.
  2028. */
  2029. if (master->transfer == spi_queued_transfer) {
  2030. spin_lock_irqsave(&master->bus_lock_spinlock, flags);
  2031. trace_spi_message_submit(message);
  2032. status = __spi_queued_transfer(spi, message, false);
  2033. spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
  2034. } else {
  2035. status = spi_async_locked(spi, message);
  2036. }
  2037. if (!bus_locked)
  2038. mutex_unlock(&master->bus_lock_mutex);
  2039. if (status == 0) {
  2040. /* Push out the messages in the calling context if we
  2041. * can.
  2042. */
  2043. if (master->transfer == spi_queued_transfer) {
  2044. SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
  2045. spi_sync_immediate);
  2046. SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
  2047. spi_sync_immediate);
  2048. __spi_pump_messages(master, false);
  2049. }
  2050. wait_for_completion(&done);
  2051. status = message->status;
  2052. }
  2053. message->context = NULL;
  2054. return status;
  2055. }
  2056. /**
  2057. * spi_sync - blocking/synchronous SPI data transfers
  2058. * @spi: device with which data will be exchanged
  2059. * @message: describes the data transfers
  2060. * Context: can sleep
  2061. *
  2062. * This call may only be used from a context that may sleep. The sleep
  2063. * is non-interruptible, and has no timeout. Low-overhead controller
  2064. * drivers may DMA directly into and out of the message buffers.
  2065. *
  2066. * Note that the SPI device's chip select is active during the message,
  2067. * and then is normally disabled between messages. Drivers for some
  2068. * frequently-used devices may want to minimize costs of selecting a chip,
  2069. * by leaving it selected in anticipation that the next message will go
  2070. * to the same chip. (That may increase power usage.)
  2071. *
  2072. * Also, the caller is guaranteeing that the memory associated with the
  2073. * message will not be freed before this call returns.
  2074. *
  2075. * Return: zero on success, else a negative error code.
  2076. */
  2077. int spi_sync(struct spi_device *spi, struct spi_message *message)
  2078. {
  2079. return __spi_sync(spi, message, 0);
  2080. }
  2081. EXPORT_SYMBOL_GPL(spi_sync);
  2082. /**
  2083. * spi_sync_locked - version of spi_sync with exclusive bus usage
  2084. * @spi: device with which data will be exchanged
  2085. * @message: describes the data transfers
  2086. * Context: can sleep
  2087. *
  2088. * This call may only be used from a context that may sleep. The sleep
  2089. * is non-interruptible, and has no timeout. Low-overhead controller
  2090. * drivers may DMA directly into and out of the message buffers.
  2091. *
  2092. * This call should be used by drivers that require exclusive access to the
  2093. * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
  2094. * be released by a spi_bus_unlock call when the exclusive access is over.
  2095. *
  2096. * Return: zero on success, else a negative error code.
  2097. */
  2098. int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
  2099. {
  2100. return __spi_sync(spi, message, 1);
  2101. }
  2102. EXPORT_SYMBOL_GPL(spi_sync_locked);
  2103. /**
  2104. * spi_bus_lock - obtain a lock for exclusive SPI bus usage
  2105. * @master: SPI bus master that should be locked for exclusive bus access
  2106. * Context: can sleep
  2107. *
  2108. * This call may only be used from a context that may sleep. The sleep
  2109. * is non-interruptible, and has no timeout.
  2110. *
  2111. * This call should be used by drivers that require exclusive access to the
  2112. * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
  2113. * exclusive access is over. Data transfer must be done by spi_sync_locked
  2114. * and spi_async_locked calls when the SPI bus lock is held.
  2115. *
  2116. * Return: always zero.
  2117. */
  2118. int spi_bus_lock(struct spi_master *master)
  2119. {
  2120. unsigned long flags;
  2121. mutex_lock(&master->bus_lock_mutex);
  2122. spin_lock_irqsave(&master->bus_lock_spinlock, flags);
  2123. master->bus_lock_flag = 1;
  2124. spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
  2125. /* mutex remains locked until spi_bus_unlock is called */
  2126. return 0;
  2127. }
  2128. EXPORT_SYMBOL_GPL(spi_bus_lock);
  2129. /**
  2130. * spi_bus_unlock - release the lock for exclusive SPI bus usage
  2131. * @master: SPI bus master that was locked for exclusive bus access
  2132. * Context: can sleep
  2133. *
  2134. * This call may only be used from a context that may sleep. The sleep
  2135. * is non-interruptible, and has no timeout.
  2136. *
  2137. * This call releases an SPI bus lock previously obtained by an spi_bus_lock
  2138. * call.
  2139. *
  2140. * Return: always zero.
  2141. */
  2142. int spi_bus_unlock(struct spi_master *master)
  2143. {
  2144. master->bus_lock_flag = 0;
  2145. mutex_unlock(&master->bus_lock_mutex);
  2146. return 0;
  2147. }
  2148. EXPORT_SYMBOL_GPL(spi_bus_unlock);
  2149. /* portable code must never pass more than 32 bytes */
  2150. #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
  2151. static u8 *buf;
  2152. /**
  2153. * spi_write_then_read - SPI synchronous write followed by read
  2154. * @spi: device with which data will be exchanged
  2155. * @txbuf: data to be written (need not be dma-safe)
  2156. * @n_tx: size of txbuf, in bytes
  2157. * @rxbuf: buffer into which data will be read (need not be dma-safe)
  2158. * @n_rx: size of rxbuf, in bytes
  2159. * Context: can sleep
  2160. *
  2161. * This performs a half duplex MicroWire style transaction with the
  2162. * device, sending txbuf and then reading rxbuf. The return value
  2163. * is zero for success, else a negative errno status code.
  2164. * This call may only be used from a context that may sleep.
  2165. *
  2166. * Parameters to this routine are always copied using a small buffer;
  2167. * portable code should never use this for more than 32 bytes.
  2168. * Performance-sensitive or bulk transfer code should instead use
  2169. * spi_{async,sync}() calls with dma-safe buffers.
  2170. *
  2171. * Return: zero on success, else a negative error code.
  2172. */
  2173. int spi_write_then_read(struct spi_device *spi,
  2174. const void *txbuf, unsigned n_tx,
  2175. void *rxbuf, unsigned n_rx)
  2176. {
  2177. static DEFINE_MUTEX(lock);
  2178. int status;
  2179. struct spi_message message;
  2180. struct spi_transfer x[2];
  2181. u8 *local_buf;
  2182. /* Use preallocated DMA-safe buffer if we can. We can't avoid
  2183. * copying here, (as a pure convenience thing), but we can
  2184. * keep heap costs out of the hot path unless someone else is
  2185. * using the pre-allocated buffer or the transfer is too large.
  2186. */
  2187. if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
  2188. local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
  2189. GFP_KERNEL | GFP_DMA);
  2190. if (!local_buf)
  2191. return -ENOMEM;
  2192. } else {
  2193. local_buf = buf;
  2194. }
  2195. spi_message_init(&message);
  2196. memset(x, 0, sizeof(x));
  2197. if (n_tx) {
  2198. x[0].len = n_tx;
  2199. spi_message_add_tail(&x[0], &message);
  2200. }
  2201. if (n_rx) {
  2202. x[1].len = n_rx;
  2203. spi_message_add_tail(&x[1], &message);
  2204. }
  2205. memcpy(local_buf, txbuf, n_tx);
  2206. x[0].tx_buf = local_buf;
  2207. x[1].rx_buf = local_buf + n_tx;
  2208. /* do the i/o */
  2209. status = spi_sync(spi, &message);
  2210. if (status == 0)
  2211. memcpy(rxbuf, x[1].rx_buf, n_rx);
  2212. if (x[0].tx_buf == buf)
  2213. mutex_unlock(&lock);
  2214. else
  2215. kfree(local_buf);
  2216. return status;
  2217. }
  2218. EXPORT_SYMBOL_GPL(spi_write_then_read);
  2219. /*-------------------------------------------------------------------------*/
  2220. #if IS_ENABLED(CONFIG_OF_DYNAMIC)
  2221. static int __spi_of_device_match(struct device *dev, void *data)
  2222. {
  2223. return dev->of_node == data;
  2224. }
  2225. /* must call put_device() when done with returned spi_device device */
  2226. static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
  2227. {
  2228. struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
  2229. __spi_of_device_match);
  2230. return dev ? to_spi_device(dev) : NULL;
  2231. }
  2232. static int __spi_of_master_match(struct device *dev, const void *data)
  2233. {
  2234. return dev->of_node == data;
  2235. }
  2236. /* the spi masters are not using spi_bus, so we find it with another way */
  2237. static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
  2238. {
  2239. struct device *dev;
  2240. dev = class_find_device(&spi_master_class, NULL, node,
  2241. __spi_of_master_match);
  2242. if (!dev)
  2243. return NULL;
  2244. /* reference got in class_find_device */
  2245. return container_of(dev, struct spi_master, dev);
  2246. }
  2247. static int of_spi_notify(struct notifier_block *nb, unsigned long action,
  2248. void *arg)
  2249. {
  2250. struct of_reconfig_data *rd = arg;
  2251. struct spi_master *master;
  2252. struct spi_device *spi;
  2253. switch (of_reconfig_get_state_change(action, arg)) {
  2254. case OF_RECONFIG_CHANGE_ADD:
  2255. master = of_find_spi_master_by_node(rd->dn->parent);
  2256. if (master == NULL)
  2257. return NOTIFY_OK; /* not for us */
  2258. if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
  2259. put_device(&master->dev);
  2260. return NOTIFY_OK;
  2261. }
  2262. spi = of_register_spi_device(master, rd->dn);
  2263. put_device(&master->dev);
  2264. if (IS_ERR(spi)) {
  2265. pr_err("%s: failed to create for '%s'\n",
  2266. __func__, rd->dn->full_name);
  2267. return notifier_from_errno(PTR_ERR(spi));
  2268. }
  2269. break;
  2270. case OF_RECONFIG_CHANGE_REMOVE:
  2271. /* already depopulated? */
  2272. if (!of_node_check_flag(rd->dn, OF_POPULATED))
  2273. return NOTIFY_OK;
  2274. /* find our device by node */
  2275. spi = of_find_spi_device_by_node(rd->dn);
  2276. if (spi == NULL)
  2277. return NOTIFY_OK; /* no? not meant for us */
  2278. /* unregister takes one ref away */
  2279. spi_unregister_device(spi);
  2280. /* and put the reference of the find */
  2281. put_device(&spi->dev);
  2282. break;
  2283. }
  2284. return NOTIFY_OK;
  2285. }
  2286. static struct notifier_block spi_of_notifier = {
  2287. .notifier_call = of_spi_notify,
  2288. };
  2289. #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
  2290. extern struct notifier_block spi_of_notifier;
  2291. #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
  2292. static int __init spi_init(void)
  2293. {
  2294. int status;
  2295. buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
  2296. if (!buf) {
  2297. status = -ENOMEM;
  2298. goto err0;
  2299. }
  2300. status = bus_register(&spi_bus_type);
  2301. if (status < 0)
  2302. goto err1;
  2303. status = class_register(&spi_master_class);
  2304. if (status < 0)
  2305. goto err2;
  2306. if (IS_ENABLED(CONFIG_OF_DYNAMIC))
  2307. WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
  2308. return 0;
  2309. err2:
  2310. bus_unregister(&spi_bus_type);
  2311. err1:
  2312. kfree(buf);
  2313. buf = NULL;
  2314. err0:
  2315. return status;
  2316. }
  2317. /* board_info is normally registered in arch_initcall(),
  2318. * but even essential drivers wait till later
  2319. *
  2320. * REVISIT only boardinfo really needs static linking. the rest (device and
  2321. * driver registration) _could_ be dynamically linked (modular) ... costs
  2322. * include needing to have boardinfo data structures be much more public.
  2323. */
  2324. postcore_initcall(spi_init);