dw_mmc.c 85 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395
  1. /*
  2. * Synopsys DesignWare Multimedia Card Interface driver
  3. * (Based on NXP driver for lpc 31xx)
  4. *
  5. * Copyright (C) 2009 NXP Semiconductors
  6. * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <linux/blkdev.h>
  14. #include <linux/clk.h>
  15. #include <linux/debugfs.h>
  16. #include <linux/device.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/err.h>
  19. #include <linux/init.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/ioport.h>
  22. #include <linux/module.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/slab.h>
  26. #include <linux/stat.h>
  27. #include <linux/delay.h>
  28. #include <linux/irq.h>
  29. #include <linux/mmc/card.h>
  30. #include <linux/mmc/host.h>
  31. #include <linux/mmc/mmc.h>
  32. #include <linux/mmc/sd.h>
  33. #include <linux/mmc/sdio.h>
  34. #include <linux/mmc/dw_mmc.h>
  35. #include <linux/bitops.h>
  36. #include <linux/regulator/consumer.h>
  37. #include <linux/of.h>
  38. #include <linux/of_gpio.h>
  39. #include <linux/mmc/slot-gpio.h>
  40. #include "dw_mmc.h"
  41. /* Common flag combinations */
  42. #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
  43. SDMMC_INT_HTO | SDMMC_INT_SBE | \
  44. SDMMC_INT_EBE | SDMMC_INT_HLE)
  45. #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
  46. SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
  47. #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
  48. DW_MCI_CMD_ERROR_FLAGS)
  49. #define DW_MCI_SEND_STATUS 1
  50. #define DW_MCI_RECV_STATUS 2
  51. #define DW_MCI_DMA_THRESHOLD 16
  52. #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
  53. #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
  54. #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
  55. SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
  56. SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
  57. SDMMC_IDMAC_INT_TI)
  58. #define DESC_RING_BUF_SZ PAGE_SIZE
  59. struct idmac_desc_64addr {
  60. u32 des0; /* Control Descriptor */
  61. u32 des1; /* Reserved */
  62. u32 des2; /*Buffer sizes */
  63. #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
  64. ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
  65. ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
  66. u32 des3; /* Reserved */
  67. u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
  68. u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
  69. u32 des6; /* Lower 32-bits of Next Descriptor Address */
  70. u32 des7; /* Upper 32-bits of Next Descriptor Address */
  71. };
  72. struct idmac_desc {
  73. __le32 des0; /* Control Descriptor */
  74. #define IDMAC_DES0_DIC BIT(1)
  75. #define IDMAC_DES0_LD BIT(2)
  76. #define IDMAC_DES0_FD BIT(3)
  77. #define IDMAC_DES0_CH BIT(4)
  78. #define IDMAC_DES0_ER BIT(5)
  79. #define IDMAC_DES0_CES BIT(30)
  80. #define IDMAC_DES0_OWN BIT(31)
  81. __le32 des1; /* Buffer sizes */
  82. #define IDMAC_SET_BUFFER1_SIZE(d, s) \
  83. ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
  84. __le32 des2; /* buffer 1 physical address */
  85. __le32 des3; /* buffer 2 physical address */
  86. };
  87. /* Each descriptor can transfer up to 4KB of data in chained mode */
  88. #define DW_MCI_DESC_DATA_LENGTH 0x1000
  89. static bool dw_mci_reset(struct dw_mci *host);
  90. static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
  91. static int dw_mci_card_busy(struct mmc_host *mmc);
  92. static int dw_mci_get_cd(struct mmc_host *mmc);
  93. #if defined(CONFIG_DEBUG_FS)
  94. static int dw_mci_req_show(struct seq_file *s, void *v)
  95. {
  96. struct dw_mci_slot *slot = s->private;
  97. struct mmc_request *mrq;
  98. struct mmc_command *cmd;
  99. struct mmc_command *stop;
  100. struct mmc_data *data;
  101. /* Make sure we get a consistent snapshot */
  102. spin_lock_bh(&slot->host->lock);
  103. mrq = slot->mrq;
  104. if (mrq) {
  105. cmd = mrq->cmd;
  106. data = mrq->data;
  107. stop = mrq->stop;
  108. if (cmd)
  109. seq_printf(s,
  110. "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
  111. cmd->opcode, cmd->arg, cmd->flags,
  112. cmd->resp[0], cmd->resp[1], cmd->resp[2],
  113. cmd->resp[2], cmd->error);
  114. if (data)
  115. seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
  116. data->bytes_xfered, data->blocks,
  117. data->blksz, data->flags, data->error);
  118. if (stop)
  119. seq_printf(s,
  120. "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
  121. stop->opcode, stop->arg, stop->flags,
  122. stop->resp[0], stop->resp[1], stop->resp[2],
  123. stop->resp[2], stop->error);
  124. }
  125. spin_unlock_bh(&slot->host->lock);
  126. return 0;
  127. }
  128. static int dw_mci_req_open(struct inode *inode, struct file *file)
  129. {
  130. return single_open(file, dw_mci_req_show, inode->i_private);
  131. }
  132. static const struct file_operations dw_mci_req_fops = {
  133. .owner = THIS_MODULE,
  134. .open = dw_mci_req_open,
  135. .read = seq_read,
  136. .llseek = seq_lseek,
  137. .release = single_release,
  138. };
  139. static int dw_mci_regs_show(struct seq_file *s, void *v)
  140. {
  141. struct dw_mci *host = s->private;
  142. seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
  143. seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
  144. seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
  145. seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
  146. seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
  147. seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
  148. return 0;
  149. }
  150. static int dw_mci_regs_open(struct inode *inode, struct file *file)
  151. {
  152. return single_open(file, dw_mci_regs_show, inode->i_private);
  153. }
  154. static const struct file_operations dw_mci_regs_fops = {
  155. .owner = THIS_MODULE,
  156. .open = dw_mci_regs_open,
  157. .read = seq_read,
  158. .llseek = seq_lseek,
  159. .release = single_release,
  160. };
  161. static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
  162. {
  163. struct mmc_host *mmc = slot->mmc;
  164. struct dw_mci *host = slot->host;
  165. struct dentry *root;
  166. struct dentry *node;
  167. root = mmc->debugfs_root;
  168. if (!root)
  169. return;
  170. node = debugfs_create_file("regs", S_IRUSR, root, host,
  171. &dw_mci_regs_fops);
  172. if (!node)
  173. goto err;
  174. node = debugfs_create_file("req", S_IRUSR, root, slot,
  175. &dw_mci_req_fops);
  176. if (!node)
  177. goto err;
  178. node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
  179. if (!node)
  180. goto err;
  181. node = debugfs_create_x32("pending_events", S_IRUSR, root,
  182. (u32 *)&host->pending_events);
  183. if (!node)
  184. goto err;
  185. node = debugfs_create_x32("completed_events", S_IRUSR, root,
  186. (u32 *)&host->completed_events);
  187. if (!node)
  188. goto err;
  189. return;
  190. err:
  191. dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
  192. }
  193. #endif /* defined(CONFIG_DEBUG_FS) */
  194. static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
  195. static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
  196. {
  197. struct dw_mci_slot *slot = mmc_priv(mmc);
  198. struct dw_mci *host = slot->host;
  199. u32 cmdr;
  200. cmd->error = -EINPROGRESS;
  201. cmdr = cmd->opcode;
  202. if (cmd->opcode == MMC_STOP_TRANSMISSION ||
  203. cmd->opcode == MMC_GO_IDLE_STATE ||
  204. cmd->opcode == MMC_GO_INACTIVE_STATE ||
  205. (cmd->opcode == SD_IO_RW_DIRECT &&
  206. ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
  207. cmdr |= SDMMC_CMD_STOP;
  208. else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
  209. cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
  210. if (cmd->opcode == SD_SWITCH_VOLTAGE) {
  211. u32 clk_en_a;
  212. /* Special bit makes CMD11 not die */
  213. cmdr |= SDMMC_CMD_VOLT_SWITCH;
  214. /* Change state to continue to handle CMD11 weirdness */
  215. WARN_ON(slot->host->state != STATE_SENDING_CMD);
  216. slot->host->state = STATE_SENDING_CMD11;
  217. /*
  218. * We need to disable low power mode (automatic clock stop)
  219. * while doing voltage switch so we don't confuse the card,
  220. * since stopping the clock is a specific part of the UHS
  221. * voltage change dance.
  222. *
  223. * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
  224. * unconditionally turned back on in dw_mci_setup_bus() if it's
  225. * ever called with a non-zero clock. That shouldn't happen
  226. * until the voltage change is all done.
  227. */
  228. clk_en_a = mci_readl(host, CLKENA);
  229. clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
  230. mci_writel(host, CLKENA, clk_en_a);
  231. mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
  232. SDMMC_CMD_PRV_DAT_WAIT, 0);
  233. }
  234. if (cmd->flags & MMC_RSP_PRESENT) {
  235. /* We expect a response, so set this bit */
  236. cmdr |= SDMMC_CMD_RESP_EXP;
  237. if (cmd->flags & MMC_RSP_136)
  238. cmdr |= SDMMC_CMD_RESP_LONG;
  239. }
  240. if (cmd->flags & MMC_RSP_CRC)
  241. cmdr |= SDMMC_CMD_RESP_CRC;
  242. if (cmd->data) {
  243. cmdr |= SDMMC_CMD_DAT_EXP;
  244. if (cmd->data->flags & MMC_DATA_WRITE)
  245. cmdr |= SDMMC_CMD_DAT_WR;
  246. }
  247. if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
  248. cmdr |= SDMMC_CMD_USE_HOLD_REG;
  249. return cmdr;
  250. }
  251. static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
  252. {
  253. struct mmc_command *stop;
  254. u32 cmdr;
  255. if (!cmd->data)
  256. return 0;
  257. stop = &host->stop_abort;
  258. cmdr = cmd->opcode;
  259. memset(stop, 0, sizeof(struct mmc_command));
  260. if (cmdr == MMC_READ_SINGLE_BLOCK ||
  261. cmdr == MMC_READ_MULTIPLE_BLOCK ||
  262. cmdr == MMC_WRITE_BLOCK ||
  263. cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
  264. cmdr == MMC_SEND_TUNING_BLOCK ||
  265. cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
  266. stop->opcode = MMC_STOP_TRANSMISSION;
  267. stop->arg = 0;
  268. stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
  269. } else if (cmdr == SD_IO_RW_EXTENDED) {
  270. stop->opcode = SD_IO_RW_DIRECT;
  271. stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
  272. ((cmd->arg >> 28) & 0x7);
  273. stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
  274. } else {
  275. return 0;
  276. }
  277. cmdr = stop->opcode | SDMMC_CMD_STOP |
  278. SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
  279. if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags))
  280. cmdr |= SDMMC_CMD_USE_HOLD_REG;
  281. return cmdr;
  282. }
  283. static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
  284. {
  285. unsigned long timeout = jiffies + msecs_to_jiffies(500);
  286. /*
  287. * Databook says that before issuing a new data transfer command
  288. * we need to check to see if the card is busy. Data transfer commands
  289. * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
  290. *
  291. * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
  292. * expected.
  293. */
  294. if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
  295. !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
  296. while (mci_readl(host, STATUS) & SDMMC_STATUS_BUSY) {
  297. if (time_after(jiffies, timeout)) {
  298. /* Command will fail; we'll pass error then */
  299. dev_err(host->dev, "Busy; trying anyway\n");
  300. break;
  301. }
  302. udelay(10);
  303. }
  304. }
  305. }
  306. static void dw_mci_start_command(struct dw_mci *host,
  307. struct mmc_command *cmd, u32 cmd_flags)
  308. {
  309. host->cmd = cmd;
  310. dev_vdbg(host->dev,
  311. "start command: ARGR=0x%08x CMDR=0x%08x\n",
  312. cmd->arg, cmd_flags);
  313. mci_writel(host, CMDARG, cmd->arg);
  314. wmb(); /* drain writebuffer */
  315. dw_mci_wait_while_busy(host, cmd_flags);
  316. mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
  317. }
  318. static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
  319. {
  320. struct mmc_command *stop = &host->stop_abort;
  321. dw_mci_start_command(host, stop, host->stop_cmdr);
  322. }
  323. /* DMA interface functions */
  324. static void dw_mci_stop_dma(struct dw_mci *host)
  325. {
  326. if (host->using_dma) {
  327. host->dma_ops->stop(host);
  328. host->dma_ops->cleanup(host);
  329. }
  330. /* Data transfer was stopped by the interrupt handler */
  331. set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  332. }
  333. static int dw_mci_get_dma_dir(struct mmc_data *data)
  334. {
  335. if (data->flags & MMC_DATA_WRITE)
  336. return DMA_TO_DEVICE;
  337. else
  338. return DMA_FROM_DEVICE;
  339. }
  340. static void dw_mci_dma_cleanup(struct dw_mci *host)
  341. {
  342. struct mmc_data *data = host->data;
  343. if (data && data->host_cookie == COOKIE_MAPPED) {
  344. dma_unmap_sg(host->dev,
  345. data->sg,
  346. data->sg_len,
  347. dw_mci_get_dma_dir(data));
  348. data->host_cookie = COOKIE_UNMAPPED;
  349. }
  350. }
  351. static void dw_mci_idmac_reset(struct dw_mci *host)
  352. {
  353. u32 bmod = mci_readl(host, BMOD);
  354. /* Software reset of DMA */
  355. bmod |= SDMMC_IDMAC_SWRESET;
  356. mci_writel(host, BMOD, bmod);
  357. }
  358. static void dw_mci_idmac_stop_dma(struct dw_mci *host)
  359. {
  360. u32 temp;
  361. /* Disable and reset the IDMAC interface */
  362. temp = mci_readl(host, CTRL);
  363. temp &= ~SDMMC_CTRL_USE_IDMAC;
  364. temp |= SDMMC_CTRL_DMA_RESET;
  365. mci_writel(host, CTRL, temp);
  366. /* Stop the IDMAC running */
  367. temp = mci_readl(host, BMOD);
  368. temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
  369. temp |= SDMMC_IDMAC_SWRESET;
  370. mci_writel(host, BMOD, temp);
  371. }
  372. static void dw_mci_dmac_complete_dma(void *arg)
  373. {
  374. struct dw_mci *host = arg;
  375. struct mmc_data *data = host->data;
  376. dev_vdbg(host->dev, "DMA complete\n");
  377. if ((host->use_dma == TRANS_MODE_EDMAC) &&
  378. data && (data->flags & MMC_DATA_READ))
  379. /* Invalidate cache after read */
  380. dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc),
  381. data->sg,
  382. data->sg_len,
  383. DMA_FROM_DEVICE);
  384. host->dma_ops->cleanup(host);
  385. /*
  386. * If the card was removed, data will be NULL. No point in trying to
  387. * send the stop command or waiting for NBUSY in this case.
  388. */
  389. if (data) {
  390. set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  391. tasklet_schedule(&host->tasklet);
  392. }
  393. }
  394. static int dw_mci_idmac_init(struct dw_mci *host)
  395. {
  396. int i;
  397. if (host->dma_64bit_address == 1) {
  398. struct idmac_desc_64addr *p;
  399. /* Number of descriptors in the ring buffer */
  400. host->ring_size =
  401. DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
  402. /* Forward link the descriptor list */
  403. for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
  404. i++, p++) {
  405. p->des6 = (host->sg_dma +
  406. (sizeof(struct idmac_desc_64addr) *
  407. (i + 1))) & 0xffffffff;
  408. p->des7 = (u64)(host->sg_dma +
  409. (sizeof(struct idmac_desc_64addr) *
  410. (i + 1))) >> 32;
  411. /* Initialize reserved and buffer size fields to "0" */
  412. p->des1 = 0;
  413. p->des2 = 0;
  414. p->des3 = 0;
  415. }
  416. /* Set the last descriptor as the end-of-ring descriptor */
  417. p->des6 = host->sg_dma & 0xffffffff;
  418. p->des7 = (u64)host->sg_dma >> 32;
  419. p->des0 = IDMAC_DES0_ER;
  420. } else {
  421. struct idmac_desc *p;
  422. /* Number of descriptors in the ring buffer */
  423. host->ring_size =
  424. DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
  425. /* Forward link the descriptor list */
  426. for (i = 0, p = host->sg_cpu;
  427. i < host->ring_size - 1;
  428. i++, p++) {
  429. p->des3 = cpu_to_le32(host->sg_dma +
  430. (sizeof(struct idmac_desc) * (i + 1)));
  431. p->des1 = 0;
  432. }
  433. /* Set the last descriptor as the end-of-ring descriptor */
  434. p->des3 = cpu_to_le32(host->sg_dma);
  435. p->des0 = cpu_to_le32(IDMAC_DES0_ER);
  436. }
  437. dw_mci_idmac_reset(host);
  438. if (host->dma_64bit_address == 1) {
  439. /* Mask out interrupts - get Tx & Rx complete only */
  440. mci_writel(host, IDSTS64, IDMAC_INT_CLR);
  441. mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
  442. SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
  443. /* Set the descriptor base address */
  444. mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
  445. mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
  446. } else {
  447. /* Mask out interrupts - get Tx & Rx complete only */
  448. mci_writel(host, IDSTS, IDMAC_INT_CLR);
  449. mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
  450. SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
  451. /* Set the descriptor base address */
  452. mci_writel(host, DBADDR, host->sg_dma);
  453. }
  454. return 0;
  455. }
  456. static inline int dw_mci_prepare_desc64(struct dw_mci *host,
  457. struct mmc_data *data,
  458. unsigned int sg_len)
  459. {
  460. unsigned int desc_len;
  461. struct idmac_desc_64addr *desc_first, *desc_last, *desc;
  462. unsigned long timeout;
  463. int i;
  464. desc_first = desc_last = desc = host->sg_cpu;
  465. for (i = 0; i < sg_len; i++) {
  466. unsigned int length = sg_dma_len(&data->sg[i]);
  467. u64 mem_addr = sg_dma_address(&data->sg[i]);
  468. for ( ; length ; desc++) {
  469. desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
  470. length : DW_MCI_DESC_DATA_LENGTH;
  471. length -= desc_len;
  472. /*
  473. * Wait for the former clear OWN bit operation
  474. * of IDMAC to make sure that this descriptor
  475. * isn't still owned by IDMAC as IDMAC's write
  476. * ops and CPU's read ops are asynchronous.
  477. */
  478. timeout = jiffies + msecs_to_jiffies(100);
  479. while (readl(&desc->des0) & IDMAC_DES0_OWN) {
  480. if (time_after(jiffies, timeout))
  481. goto err_own_bit;
  482. udelay(10);
  483. }
  484. /*
  485. * Set the OWN bit and disable interrupts
  486. * for this descriptor
  487. */
  488. desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
  489. IDMAC_DES0_CH;
  490. /* Buffer length */
  491. IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
  492. /* Physical address to DMA to/from */
  493. desc->des4 = mem_addr & 0xffffffff;
  494. desc->des5 = mem_addr >> 32;
  495. /* Update physical address for the next desc */
  496. mem_addr += desc_len;
  497. /* Save pointer to the last descriptor */
  498. desc_last = desc;
  499. }
  500. }
  501. /* Set first descriptor */
  502. desc_first->des0 |= IDMAC_DES0_FD;
  503. /* Set last descriptor */
  504. desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
  505. desc_last->des0 |= IDMAC_DES0_LD;
  506. return 0;
  507. err_own_bit:
  508. /* restore the descriptor chain as it's polluted */
  509. dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
  510. memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
  511. dw_mci_idmac_init(host);
  512. return -EINVAL;
  513. }
  514. static inline int dw_mci_prepare_desc32(struct dw_mci *host,
  515. struct mmc_data *data,
  516. unsigned int sg_len)
  517. {
  518. unsigned int desc_len;
  519. struct idmac_desc *desc_first, *desc_last, *desc;
  520. unsigned long timeout;
  521. int i;
  522. desc_first = desc_last = desc = host->sg_cpu;
  523. for (i = 0; i < sg_len; i++) {
  524. unsigned int length = sg_dma_len(&data->sg[i]);
  525. u32 mem_addr = sg_dma_address(&data->sg[i]);
  526. for ( ; length ; desc++) {
  527. desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
  528. length : DW_MCI_DESC_DATA_LENGTH;
  529. length -= desc_len;
  530. /*
  531. * Wait for the former clear OWN bit operation
  532. * of IDMAC to make sure that this descriptor
  533. * isn't still owned by IDMAC as IDMAC's write
  534. * ops and CPU's read ops are asynchronous.
  535. */
  536. timeout = jiffies + msecs_to_jiffies(100);
  537. while (readl(&desc->des0) &
  538. cpu_to_le32(IDMAC_DES0_OWN)) {
  539. if (time_after(jiffies, timeout))
  540. goto err_own_bit;
  541. udelay(10);
  542. }
  543. /*
  544. * Set the OWN bit and disable interrupts
  545. * for this descriptor
  546. */
  547. desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
  548. IDMAC_DES0_DIC |
  549. IDMAC_DES0_CH);
  550. /* Buffer length */
  551. IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
  552. /* Physical address to DMA to/from */
  553. desc->des2 = cpu_to_le32(mem_addr);
  554. /* Update physical address for the next desc */
  555. mem_addr += desc_len;
  556. /* Save pointer to the last descriptor */
  557. desc_last = desc;
  558. }
  559. }
  560. /* Set first descriptor */
  561. desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
  562. /* Set last descriptor */
  563. desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
  564. IDMAC_DES0_DIC));
  565. desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
  566. return 0;
  567. err_own_bit:
  568. /* restore the descriptor chain as it's polluted */
  569. dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
  570. memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
  571. dw_mci_idmac_init(host);
  572. return -EINVAL;
  573. }
  574. static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
  575. {
  576. u32 temp;
  577. int ret;
  578. if (host->dma_64bit_address == 1)
  579. ret = dw_mci_prepare_desc64(host, host->data, sg_len);
  580. else
  581. ret = dw_mci_prepare_desc32(host, host->data, sg_len);
  582. if (ret)
  583. goto out;
  584. /* drain writebuffer */
  585. wmb();
  586. /* Make sure to reset DMA in case we did PIO before this */
  587. dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
  588. dw_mci_idmac_reset(host);
  589. /* Select IDMAC interface */
  590. temp = mci_readl(host, CTRL);
  591. temp |= SDMMC_CTRL_USE_IDMAC;
  592. mci_writel(host, CTRL, temp);
  593. /* drain writebuffer */
  594. wmb();
  595. /* Enable the IDMAC */
  596. temp = mci_readl(host, BMOD);
  597. temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
  598. mci_writel(host, BMOD, temp);
  599. /* Start it running */
  600. mci_writel(host, PLDMND, 1);
  601. out:
  602. return ret;
  603. }
  604. static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
  605. .init = dw_mci_idmac_init,
  606. .start = dw_mci_idmac_start_dma,
  607. .stop = dw_mci_idmac_stop_dma,
  608. .complete = dw_mci_dmac_complete_dma,
  609. .cleanup = dw_mci_dma_cleanup,
  610. };
  611. static void dw_mci_edmac_stop_dma(struct dw_mci *host)
  612. {
  613. dmaengine_terminate_async(host->dms->ch);
  614. }
  615. static int dw_mci_edmac_start_dma(struct dw_mci *host,
  616. unsigned int sg_len)
  617. {
  618. struct dma_slave_config cfg;
  619. struct dma_async_tx_descriptor *desc = NULL;
  620. struct scatterlist *sgl = host->data->sg;
  621. const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
  622. u32 sg_elems = host->data->sg_len;
  623. u32 fifoth_val;
  624. u32 fifo_offset = host->fifo_reg - host->regs;
  625. int ret = 0;
  626. /* Set external dma config: burst size, burst width */
  627. cfg.dst_addr = host->phy_regs + fifo_offset;
  628. cfg.src_addr = cfg.dst_addr;
  629. cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  630. cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  631. /* Match burst msize with external dma config */
  632. fifoth_val = mci_readl(host, FIFOTH);
  633. cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
  634. cfg.src_maxburst = cfg.dst_maxburst;
  635. if (host->data->flags & MMC_DATA_WRITE)
  636. cfg.direction = DMA_MEM_TO_DEV;
  637. else
  638. cfg.direction = DMA_DEV_TO_MEM;
  639. ret = dmaengine_slave_config(host->dms->ch, &cfg);
  640. if (ret) {
  641. dev_err(host->dev, "Failed to config edmac.\n");
  642. return -EBUSY;
  643. }
  644. desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
  645. sg_len, cfg.direction,
  646. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  647. if (!desc) {
  648. dev_err(host->dev, "Can't prepare slave sg.\n");
  649. return -EBUSY;
  650. }
  651. /* Set dw_mci_dmac_complete_dma as callback */
  652. desc->callback = dw_mci_dmac_complete_dma;
  653. desc->callback_param = (void *)host;
  654. dmaengine_submit(desc);
  655. /* Flush cache before write */
  656. if (host->data->flags & MMC_DATA_WRITE)
  657. dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl,
  658. sg_elems, DMA_TO_DEVICE);
  659. dma_async_issue_pending(host->dms->ch);
  660. return 0;
  661. }
  662. static int dw_mci_edmac_init(struct dw_mci *host)
  663. {
  664. /* Request external dma channel */
  665. host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
  666. if (!host->dms)
  667. return -ENOMEM;
  668. host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
  669. if (!host->dms->ch) {
  670. dev_err(host->dev, "Failed to get external DMA channel.\n");
  671. kfree(host->dms);
  672. host->dms = NULL;
  673. return -ENXIO;
  674. }
  675. return 0;
  676. }
  677. static void dw_mci_edmac_exit(struct dw_mci *host)
  678. {
  679. if (host->dms) {
  680. if (host->dms->ch) {
  681. dma_release_channel(host->dms->ch);
  682. host->dms->ch = NULL;
  683. }
  684. kfree(host->dms);
  685. host->dms = NULL;
  686. }
  687. }
  688. static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
  689. .init = dw_mci_edmac_init,
  690. .exit = dw_mci_edmac_exit,
  691. .start = dw_mci_edmac_start_dma,
  692. .stop = dw_mci_edmac_stop_dma,
  693. .complete = dw_mci_dmac_complete_dma,
  694. .cleanup = dw_mci_dma_cleanup,
  695. };
  696. static int dw_mci_pre_dma_transfer(struct dw_mci *host,
  697. struct mmc_data *data,
  698. int cookie)
  699. {
  700. struct scatterlist *sg;
  701. unsigned int i, sg_len;
  702. if (data->host_cookie == COOKIE_PRE_MAPPED)
  703. return data->sg_len;
  704. /*
  705. * We don't do DMA on "complex" transfers, i.e. with
  706. * non-word-aligned buffers or lengths. Also, we don't bother
  707. * with all the DMA setup overhead for short transfers.
  708. */
  709. if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
  710. return -EINVAL;
  711. if (data->blksz & 3)
  712. return -EINVAL;
  713. for_each_sg(data->sg, sg, data->sg_len, i) {
  714. if (sg->offset & 3 || sg->length & 3)
  715. return -EINVAL;
  716. }
  717. sg_len = dma_map_sg(host->dev,
  718. data->sg,
  719. data->sg_len,
  720. dw_mci_get_dma_dir(data));
  721. if (sg_len == 0)
  722. return -EINVAL;
  723. data->host_cookie = cookie;
  724. return sg_len;
  725. }
  726. static void dw_mci_pre_req(struct mmc_host *mmc,
  727. struct mmc_request *mrq)
  728. {
  729. struct dw_mci_slot *slot = mmc_priv(mmc);
  730. struct mmc_data *data = mrq->data;
  731. if (!slot->host->use_dma || !data)
  732. return;
  733. /* This data might be unmapped at this time */
  734. data->host_cookie = COOKIE_UNMAPPED;
  735. if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
  736. COOKIE_PRE_MAPPED) < 0)
  737. data->host_cookie = COOKIE_UNMAPPED;
  738. }
  739. static void dw_mci_post_req(struct mmc_host *mmc,
  740. struct mmc_request *mrq,
  741. int err)
  742. {
  743. struct dw_mci_slot *slot = mmc_priv(mmc);
  744. struct mmc_data *data = mrq->data;
  745. if (!slot->host->use_dma || !data)
  746. return;
  747. if (data->host_cookie != COOKIE_UNMAPPED)
  748. dma_unmap_sg(slot->host->dev,
  749. data->sg,
  750. data->sg_len,
  751. dw_mci_get_dma_dir(data));
  752. data->host_cookie = COOKIE_UNMAPPED;
  753. }
  754. static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
  755. {
  756. unsigned int blksz = data->blksz;
  757. const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
  758. u32 fifo_width = 1 << host->data_shift;
  759. u32 blksz_depth = blksz / fifo_width, fifoth_val;
  760. u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
  761. int idx = ARRAY_SIZE(mszs) - 1;
  762. /* pio should ship this scenario */
  763. if (!host->use_dma)
  764. return;
  765. tx_wmark = (host->fifo_depth) / 2;
  766. tx_wmark_invers = host->fifo_depth - tx_wmark;
  767. /*
  768. * MSIZE is '1',
  769. * if blksz is not a multiple of the FIFO width
  770. */
  771. if (blksz % fifo_width)
  772. goto done;
  773. do {
  774. if (!((blksz_depth % mszs[idx]) ||
  775. (tx_wmark_invers % mszs[idx]))) {
  776. msize = idx;
  777. rx_wmark = mszs[idx] - 1;
  778. break;
  779. }
  780. } while (--idx > 0);
  781. /*
  782. * If idx is '0', it won't be tried
  783. * Thus, initial values are uesed
  784. */
  785. done:
  786. fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
  787. mci_writel(host, FIFOTH, fifoth_val);
  788. }
  789. static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
  790. {
  791. unsigned int blksz = data->blksz;
  792. u32 blksz_depth, fifo_depth;
  793. u16 thld_size;
  794. u8 enable;
  795. /*
  796. * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
  797. * in the FIFO region, so we really shouldn't access it).
  798. */
  799. if (host->verid < DW_MMC_240A ||
  800. (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
  801. return;
  802. /*
  803. * Card write Threshold is introduced since 2.80a
  804. * It's used when HS400 mode is enabled.
  805. */
  806. if (data->flags & MMC_DATA_WRITE &&
  807. !(host->timing != MMC_TIMING_MMC_HS400))
  808. return;
  809. if (data->flags & MMC_DATA_WRITE)
  810. enable = SDMMC_CARD_WR_THR_EN;
  811. else
  812. enable = SDMMC_CARD_RD_THR_EN;
  813. if (host->timing != MMC_TIMING_MMC_HS200 &&
  814. host->timing != MMC_TIMING_UHS_SDR104)
  815. goto disable;
  816. blksz_depth = blksz / (1 << host->data_shift);
  817. fifo_depth = host->fifo_depth;
  818. if (blksz_depth > fifo_depth)
  819. goto disable;
  820. /*
  821. * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
  822. * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
  823. * Currently just choose blksz.
  824. */
  825. thld_size = blksz;
  826. mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
  827. return;
  828. disable:
  829. mci_writel(host, CDTHRCTL, 0);
  830. }
  831. static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
  832. {
  833. unsigned long irqflags;
  834. int sg_len;
  835. u32 temp;
  836. host->using_dma = 0;
  837. /* If we don't have a channel, we can't do DMA */
  838. if (!host->use_dma)
  839. return -ENODEV;
  840. sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
  841. if (sg_len < 0) {
  842. host->dma_ops->stop(host);
  843. return sg_len;
  844. }
  845. host->using_dma = 1;
  846. if (host->use_dma == TRANS_MODE_IDMAC)
  847. dev_vdbg(host->dev,
  848. "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
  849. (unsigned long)host->sg_cpu,
  850. (unsigned long)host->sg_dma,
  851. sg_len);
  852. /*
  853. * Decide the MSIZE and RX/TX Watermark.
  854. * If current block size is same with previous size,
  855. * no need to update fifoth.
  856. */
  857. if (host->prev_blksz != data->blksz)
  858. dw_mci_adjust_fifoth(host, data);
  859. /* Enable the DMA interface */
  860. temp = mci_readl(host, CTRL);
  861. temp |= SDMMC_CTRL_DMA_ENABLE;
  862. mci_writel(host, CTRL, temp);
  863. /* Disable RX/TX IRQs, let DMA handle it */
  864. spin_lock_irqsave(&host->irq_lock, irqflags);
  865. temp = mci_readl(host, INTMASK);
  866. temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
  867. mci_writel(host, INTMASK, temp);
  868. spin_unlock_irqrestore(&host->irq_lock, irqflags);
  869. if (host->dma_ops->start(host, sg_len)) {
  870. host->dma_ops->stop(host);
  871. /* We can't do DMA, try PIO for this one */
  872. dev_dbg(host->dev,
  873. "%s: fall back to PIO mode for current transfer\n",
  874. __func__);
  875. return -ENODEV;
  876. }
  877. return 0;
  878. }
  879. static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
  880. {
  881. unsigned long irqflags;
  882. int flags = SG_MITER_ATOMIC;
  883. u32 temp;
  884. data->error = -EINPROGRESS;
  885. WARN_ON(host->data);
  886. host->sg = NULL;
  887. host->data = data;
  888. if (data->flags & MMC_DATA_READ)
  889. host->dir_status = DW_MCI_RECV_STATUS;
  890. else
  891. host->dir_status = DW_MCI_SEND_STATUS;
  892. dw_mci_ctrl_thld(host, data);
  893. if (dw_mci_submit_data_dma(host, data)) {
  894. if (host->data->flags & MMC_DATA_READ)
  895. flags |= SG_MITER_TO_SG;
  896. else
  897. flags |= SG_MITER_FROM_SG;
  898. sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
  899. host->sg = data->sg;
  900. host->part_buf_start = 0;
  901. host->part_buf_count = 0;
  902. mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
  903. spin_lock_irqsave(&host->irq_lock, irqflags);
  904. temp = mci_readl(host, INTMASK);
  905. temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
  906. mci_writel(host, INTMASK, temp);
  907. spin_unlock_irqrestore(&host->irq_lock, irqflags);
  908. temp = mci_readl(host, CTRL);
  909. temp &= ~SDMMC_CTRL_DMA_ENABLE;
  910. mci_writel(host, CTRL, temp);
  911. /*
  912. * Use the initial fifoth_val for PIO mode.
  913. * If next issued data may be transfered by DMA mode,
  914. * prev_blksz should be invalidated.
  915. */
  916. mci_writel(host, FIFOTH, host->fifoth_val);
  917. host->prev_blksz = 0;
  918. } else {
  919. /*
  920. * Keep the current block size.
  921. * It will be used to decide whether to update
  922. * fifoth register next time.
  923. */
  924. host->prev_blksz = data->blksz;
  925. }
  926. }
  927. static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
  928. {
  929. struct dw_mci *host = slot->host;
  930. unsigned long timeout = jiffies + msecs_to_jiffies(500);
  931. unsigned int cmd_status = 0;
  932. mci_writel(host, CMDARG, arg);
  933. wmb(); /* drain writebuffer */
  934. dw_mci_wait_while_busy(host, cmd);
  935. mci_writel(host, CMD, SDMMC_CMD_START | cmd);
  936. while (time_before(jiffies, timeout)) {
  937. cmd_status = mci_readl(host, CMD);
  938. if (!(cmd_status & SDMMC_CMD_START))
  939. return;
  940. }
  941. dev_err(&slot->mmc->class_dev,
  942. "Timeout sending command (cmd %#x arg %#x status %#x)\n",
  943. cmd, arg, cmd_status);
  944. }
  945. static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
  946. {
  947. struct dw_mci *host = slot->host;
  948. unsigned int clock = slot->clock;
  949. u32 div;
  950. u32 clk_en_a;
  951. u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
  952. /* We must continue to set bit 28 in CMD until the change is complete */
  953. if (host->state == STATE_WAITING_CMD11_DONE)
  954. sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
  955. if (!clock) {
  956. mci_writel(host, CLKENA, 0);
  957. mci_send_cmd(slot, sdmmc_cmd_bits, 0);
  958. } else if (clock != host->current_speed || force_clkinit) {
  959. div = host->bus_hz / clock;
  960. if (host->bus_hz % clock && host->bus_hz > clock)
  961. /*
  962. * move the + 1 after the divide to prevent
  963. * over-clocking the card.
  964. */
  965. div += 1;
  966. div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
  967. if ((clock != slot->__clk_old &&
  968. !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
  969. force_clkinit) {
  970. dev_info(&slot->mmc->class_dev,
  971. "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
  972. slot->id, host->bus_hz, clock,
  973. div ? ((host->bus_hz / div) >> 1) :
  974. host->bus_hz, div);
  975. /*
  976. * If card is polling, display the message only
  977. * one time at boot time.
  978. */
  979. if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
  980. slot->mmc->f_min == clock)
  981. set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
  982. }
  983. /* disable clock */
  984. mci_writel(host, CLKENA, 0);
  985. mci_writel(host, CLKSRC, 0);
  986. /* inform CIU */
  987. mci_send_cmd(slot, sdmmc_cmd_bits, 0);
  988. /* set clock to desired speed */
  989. mci_writel(host, CLKDIV, div);
  990. /* inform CIU */
  991. mci_send_cmd(slot, sdmmc_cmd_bits, 0);
  992. /* enable clock; only low power if no SDIO */
  993. clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
  994. if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
  995. clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
  996. mci_writel(host, CLKENA, clk_en_a);
  997. /* inform CIU */
  998. mci_send_cmd(slot, sdmmc_cmd_bits, 0);
  999. /* keep the last clock value that was requested from core */
  1000. slot->__clk_old = clock;
  1001. }
  1002. host->current_speed = clock;
  1003. /* Set the current slot bus width */
  1004. mci_writel(host, CTYPE, (slot->ctype << slot->id));
  1005. }
  1006. static void __dw_mci_start_request(struct dw_mci *host,
  1007. struct dw_mci_slot *slot,
  1008. struct mmc_command *cmd)
  1009. {
  1010. struct mmc_request *mrq;
  1011. struct mmc_data *data;
  1012. u32 cmdflags;
  1013. mrq = slot->mrq;
  1014. host->cur_slot = slot;
  1015. host->mrq = mrq;
  1016. host->pending_events = 0;
  1017. host->completed_events = 0;
  1018. host->cmd_status = 0;
  1019. host->data_status = 0;
  1020. host->dir_status = 0;
  1021. data = cmd->data;
  1022. if (data) {
  1023. mci_writel(host, TMOUT, 0xFFFFFFFF);
  1024. mci_writel(host, BYTCNT, data->blksz*data->blocks);
  1025. mci_writel(host, BLKSIZ, data->blksz);
  1026. }
  1027. cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
  1028. /* this is the first command, send the initialization clock */
  1029. if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
  1030. cmdflags |= SDMMC_CMD_INIT;
  1031. if (data) {
  1032. dw_mci_submit_data(host, data);
  1033. wmb(); /* drain writebuffer */
  1034. }
  1035. dw_mci_start_command(host, cmd, cmdflags);
  1036. if (cmd->opcode == SD_SWITCH_VOLTAGE) {
  1037. unsigned long irqflags;
  1038. /*
  1039. * Databook says to fail after 2ms w/ no response, but evidence
  1040. * shows that sometimes the cmd11 interrupt takes over 130ms.
  1041. * We'll set to 500ms, plus an extra jiffy just in case jiffies
  1042. * is just about to roll over.
  1043. *
  1044. * We do this whole thing under spinlock and only if the
  1045. * command hasn't already completed (indicating the the irq
  1046. * already ran so we don't want the timeout).
  1047. */
  1048. spin_lock_irqsave(&host->irq_lock, irqflags);
  1049. if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
  1050. mod_timer(&host->cmd11_timer,
  1051. jiffies + msecs_to_jiffies(500) + 1);
  1052. spin_unlock_irqrestore(&host->irq_lock, irqflags);
  1053. }
  1054. host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
  1055. }
  1056. static void dw_mci_start_request(struct dw_mci *host,
  1057. struct dw_mci_slot *slot)
  1058. {
  1059. struct mmc_request *mrq = slot->mrq;
  1060. struct mmc_command *cmd;
  1061. cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
  1062. __dw_mci_start_request(host, slot, cmd);
  1063. }
  1064. /* must be called with host->lock held */
  1065. static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
  1066. struct mmc_request *mrq)
  1067. {
  1068. dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
  1069. host->state);
  1070. slot->mrq = mrq;
  1071. if (host->state == STATE_WAITING_CMD11_DONE) {
  1072. dev_warn(&slot->mmc->class_dev,
  1073. "Voltage change didn't complete\n");
  1074. /*
  1075. * this case isn't expected to happen, so we can
  1076. * either crash here or just try to continue on
  1077. * in the closest possible state
  1078. */
  1079. host->state = STATE_IDLE;
  1080. }
  1081. if (host->state == STATE_IDLE) {
  1082. host->state = STATE_SENDING_CMD;
  1083. dw_mci_start_request(host, slot);
  1084. } else {
  1085. list_add_tail(&slot->queue_node, &host->queue);
  1086. }
  1087. }
  1088. static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
  1089. {
  1090. struct dw_mci_slot *slot = mmc_priv(mmc);
  1091. struct dw_mci *host = slot->host;
  1092. WARN_ON(slot->mrq);
  1093. /*
  1094. * The check for card presence and queueing of the request must be
  1095. * atomic, otherwise the card could be removed in between and the
  1096. * request wouldn't fail until another card was inserted.
  1097. */
  1098. if (!dw_mci_get_cd(mmc)) {
  1099. mrq->cmd->error = -ENOMEDIUM;
  1100. mmc_request_done(mmc, mrq);
  1101. return;
  1102. }
  1103. spin_lock_bh(&host->lock);
  1104. dw_mci_queue_request(host, slot, mrq);
  1105. spin_unlock_bh(&host->lock);
  1106. }
  1107. static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  1108. {
  1109. struct dw_mci_slot *slot = mmc_priv(mmc);
  1110. const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
  1111. u32 regs;
  1112. int ret;
  1113. switch (ios->bus_width) {
  1114. case MMC_BUS_WIDTH_4:
  1115. slot->ctype = SDMMC_CTYPE_4BIT;
  1116. break;
  1117. case MMC_BUS_WIDTH_8:
  1118. slot->ctype = SDMMC_CTYPE_8BIT;
  1119. break;
  1120. default:
  1121. /* set default 1 bit mode */
  1122. slot->ctype = SDMMC_CTYPE_1BIT;
  1123. }
  1124. regs = mci_readl(slot->host, UHS_REG);
  1125. /* DDR mode set */
  1126. if (ios->timing == MMC_TIMING_MMC_DDR52 ||
  1127. ios->timing == MMC_TIMING_UHS_DDR50 ||
  1128. ios->timing == MMC_TIMING_MMC_HS400)
  1129. regs |= ((0x1 << slot->id) << 16);
  1130. else
  1131. regs &= ~((0x1 << slot->id) << 16);
  1132. mci_writel(slot->host, UHS_REG, regs);
  1133. slot->host->timing = ios->timing;
  1134. /*
  1135. * Use mirror of ios->clock to prevent race with mmc
  1136. * core ios update when finding the minimum.
  1137. */
  1138. slot->clock = ios->clock;
  1139. if (drv_data && drv_data->set_ios)
  1140. drv_data->set_ios(slot->host, ios);
  1141. switch (ios->power_mode) {
  1142. case MMC_POWER_UP:
  1143. if (!IS_ERR(mmc->supply.vmmc)) {
  1144. ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
  1145. ios->vdd);
  1146. if (ret) {
  1147. dev_err(slot->host->dev,
  1148. "failed to enable vmmc regulator\n");
  1149. /*return, if failed turn on vmmc*/
  1150. return;
  1151. }
  1152. }
  1153. set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
  1154. regs = mci_readl(slot->host, PWREN);
  1155. regs |= (1 << slot->id);
  1156. mci_writel(slot->host, PWREN, regs);
  1157. break;
  1158. case MMC_POWER_ON:
  1159. if (!slot->host->vqmmc_enabled) {
  1160. if (!IS_ERR(mmc->supply.vqmmc)) {
  1161. ret = regulator_enable(mmc->supply.vqmmc);
  1162. if (ret < 0)
  1163. dev_err(slot->host->dev,
  1164. "failed to enable vqmmc\n");
  1165. else
  1166. slot->host->vqmmc_enabled = true;
  1167. } else {
  1168. /* Keep track so we don't reset again */
  1169. slot->host->vqmmc_enabled = true;
  1170. }
  1171. /* Reset our state machine after powering on */
  1172. dw_mci_ctrl_reset(slot->host,
  1173. SDMMC_CTRL_ALL_RESET_FLAGS);
  1174. }
  1175. /* Adjust clock / bus width after power is up */
  1176. dw_mci_setup_bus(slot, false);
  1177. break;
  1178. case MMC_POWER_OFF:
  1179. /* Turn clock off before power goes down */
  1180. dw_mci_setup_bus(slot, false);
  1181. if (!IS_ERR(mmc->supply.vmmc))
  1182. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
  1183. if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
  1184. regulator_disable(mmc->supply.vqmmc);
  1185. slot->host->vqmmc_enabled = false;
  1186. regs = mci_readl(slot->host, PWREN);
  1187. regs &= ~(1 << slot->id);
  1188. mci_writel(slot->host, PWREN, regs);
  1189. break;
  1190. default:
  1191. break;
  1192. }
  1193. if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
  1194. slot->host->state = STATE_IDLE;
  1195. }
  1196. static int dw_mci_card_busy(struct mmc_host *mmc)
  1197. {
  1198. struct dw_mci_slot *slot = mmc_priv(mmc);
  1199. u32 status;
  1200. /*
  1201. * Check the busy bit which is low when DAT[3:0]
  1202. * (the data lines) are 0000
  1203. */
  1204. status = mci_readl(slot->host, STATUS);
  1205. return !!(status & SDMMC_STATUS_BUSY);
  1206. }
  1207. static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
  1208. {
  1209. struct dw_mci_slot *slot = mmc_priv(mmc);
  1210. struct dw_mci *host = slot->host;
  1211. const struct dw_mci_drv_data *drv_data = host->drv_data;
  1212. u32 uhs;
  1213. u32 v18 = SDMMC_UHS_18V << slot->id;
  1214. int ret;
  1215. if (drv_data && drv_data->switch_voltage)
  1216. return drv_data->switch_voltage(mmc, ios);
  1217. /*
  1218. * Program the voltage. Note that some instances of dw_mmc may use
  1219. * the UHS_REG for this. For other instances (like exynos) the UHS_REG
  1220. * does no harm but you need to set the regulator directly. Try both.
  1221. */
  1222. uhs = mci_readl(host, UHS_REG);
  1223. if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
  1224. uhs &= ~v18;
  1225. else
  1226. uhs |= v18;
  1227. if (!IS_ERR(mmc->supply.vqmmc)) {
  1228. ret = mmc_regulator_set_vqmmc(mmc, ios);
  1229. if (ret) {
  1230. dev_dbg(&mmc->class_dev,
  1231. "Regulator set error %d - %s V\n",
  1232. ret, uhs & v18 ? "1.8" : "3.3");
  1233. return ret;
  1234. }
  1235. }
  1236. mci_writel(host, UHS_REG, uhs);
  1237. return 0;
  1238. }
  1239. static int dw_mci_get_ro(struct mmc_host *mmc)
  1240. {
  1241. int read_only;
  1242. struct dw_mci_slot *slot = mmc_priv(mmc);
  1243. int gpio_ro = mmc_gpio_get_ro(mmc);
  1244. /* Use platform get_ro function, else try on board write protect */
  1245. if (gpio_ro >= 0)
  1246. read_only = gpio_ro;
  1247. else
  1248. read_only =
  1249. mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
  1250. dev_dbg(&mmc->class_dev, "card is %s\n",
  1251. read_only ? "read-only" : "read-write");
  1252. return read_only;
  1253. }
  1254. static int dw_mci_get_cd(struct mmc_host *mmc)
  1255. {
  1256. int present;
  1257. struct dw_mci_slot *slot = mmc_priv(mmc);
  1258. struct dw_mci *host = slot->host;
  1259. int gpio_cd = mmc_gpio_get_cd(mmc);
  1260. /* Use platform get_cd function, else try onboard card detect */
  1261. if (((mmc->caps & MMC_CAP_NEEDS_POLL)
  1262. || !mmc_card_is_removable(mmc))) {
  1263. present = 1;
  1264. if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
  1265. if (mmc->caps & MMC_CAP_NEEDS_POLL) {
  1266. dev_info(&mmc->class_dev,
  1267. "card is polling.\n");
  1268. } else {
  1269. dev_info(&mmc->class_dev,
  1270. "card is non-removable.\n");
  1271. }
  1272. set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
  1273. }
  1274. return present;
  1275. } else if (gpio_cd >= 0)
  1276. present = gpio_cd;
  1277. else
  1278. present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
  1279. == 0 ? 1 : 0;
  1280. spin_lock_bh(&host->lock);
  1281. if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
  1282. dev_dbg(&mmc->class_dev, "card is present\n");
  1283. else if (!present &&
  1284. !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
  1285. dev_dbg(&mmc->class_dev, "card is not present\n");
  1286. spin_unlock_bh(&host->lock);
  1287. return present;
  1288. }
  1289. static void dw_mci_hw_reset(struct mmc_host *mmc)
  1290. {
  1291. struct dw_mci_slot *slot = mmc_priv(mmc);
  1292. struct dw_mci *host = slot->host;
  1293. int reset;
  1294. if (host->use_dma == TRANS_MODE_IDMAC)
  1295. dw_mci_idmac_reset(host);
  1296. if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
  1297. SDMMC_CTRL_FIFO_RESET))
  1298. return;
  1299. /*
  1300. * According to eMMC spec, card reset procedure:
  1301. * tRstW >= 1us: RST_n pulse width
  1302. * tRSCA >= 200us: RST_n to Command time
  1303. * tRSTH >= 1us: RST_n high period
  1304. */
  1305. reset = mci_readl(host, RST_N);
  1306. reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
  1307. mci_writel(host, RST_N, reset);
  1308. usleep_range(1, 2);
  1309. reset |= SDMMC_RST_HWACTIVE << slot->id;
  1310. mci_writel(host, RST_N, reset);
  1311. usleep_range(200, 300);
  1312. }
  1313. static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
  1314. {
  1315. struct dw_mci_slot *slot = mmc_priv(mmc);
  1316. struct dw_mci *host = slot->host;
  1317. /*
  1318. * Low power mode will stop the card clock when idle. According to the
  1319. * description of the CLKENA register we should disable low power mode
  1320. * for SDIO cards if we need SDIO interrupts to work.
  1321. */
  1322. if (mmc->caps & MMC_CAP_SDIO_IRQ) {
  1323. const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
  1324. u32 clk_en_a_old;
  1325. u32 clk_en_a;
  1326. clk_en_a_old = mci_readl(host, CLKENA);
  1327. if (card->type == MMC_TYPE_SDIO ||
  1328. card->type == MMC_TYPE_SD_COMBO) {
  1329. set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
  1330. clk_en_a = clk_en_a_old & ~clken_low_pwr;
  1331. } else {
  1332. clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
  1333. clk_en_a = clk_en_a_old | clken_low_pwr;
  1334. }
  1335. if (clk_en_a != clk_en_a_old) {
  1336. mci_writel(host, CLKENA, clk_en_a);
  1337. mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
  1338. SDMMC_CMD_PRV_DAT_WAIT, 0);
  1339. }
  1340. }
  1341. }
  1342. static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
  1343. {
  1344. struct dw_mci_slot *slot = mmc_priv(mmc);
  1345. struct dw_mci *host = slot->host;
  1346. unsigned long irqflags;
  1347. u32 int_mask;
  1348. spin_lock_irqsave(&host->irq_lock, irqflags);
  1349. /* Enable/disable Slot Specific SDIO interrupt */
  1350. int_mask = mci_readl(host, INTMASK);
  1351. if (enb)
  1352. int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
  1353. else
  1354. int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
  1355. mci_writel(host, INTMASK, int_mask);
  1356. spin_unlock_irqrestore(&host->irq_lock, irqflags);
  1357. }
  1358. static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
  1359. {
  1360. struct dw_mci_slot *slot = mmc_priv(mmc);
  1361. struct dw_mci *host = slot->host;
  1362. const struct dw_mci_drv_data *drv_data = host->drv_data;
  1363. int err = -EINVAL;
  1364. if (drv_data && drv_data->execute_tuning)
  1365. err = drv_data->execute_tuning(slot, opcode);
  1366. return err;
  1367. }
  1368. static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
  1369. struct mmc_ios *ios)
  1370. {
  1371. struct dw_mci_slot *slot = mmc_priv(mmc);
  1372. struct dw_mci *host = slot->host;
  1373. const struct dw_mci_drv_data *drv_data = host->drv_data;
  1374. if (drv_data && drv_data->prepare_hs400_tuning)
  1375. return drv_data->prepare_hs400_tuning(host, ios);
  1376. return 0;
  1377. }
  1378. static const struct mmc_host_ops dw_mci_ops = {
  1379. .request = dw_mci_request,
  1380. .pre_req = dw_mci_pre_req,
  1381. .post_req = dw_mci_post_req,
  1382. .set_ios = dw_mci_set_ios,
  1383. .get_ro = dw_mci_get_ro,
  1384. .get_cd = dw_mci_get_cd,
  1385. .hw_reset = dw_mci_hw_reset,
  1386. .enable_sdio_irq = dw_mci_enable_sdio_irq,
  1387. .execute_tuning = dw_mci_execute_tuning,
  1388. .card_busy = dw_mci_card_busy,
  1389. .start_signal_voltage_switch = dw_mci_switch_voltage,
  1390. .init_card = dw_mci_init_card,
  1391. .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
  1392. };
  1393. static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
  1394. __releases(&host->lock)
  1395. __acquires(&host->lock)
  1396. {
  1397. struct dw_mci_slot *slot;
  1398. struct mmc_host *prev_mmc = host->cur_slot->mmc;
  1399. WARN_ON(host->cmd || host->data);
  1400. host->cur_slot->mrq = NULL;
  1401. host->mrq = NULL;
  1402. if (!list_empty(&host->queue)) {
  1403. slot = list_entry(host->queue.next,
  1404. struct dw_mci_slot, queue_node);
  1405. list_del(&slot->queue_node);
  1406. dev_vdbg(host->dev, "list not empty: %s is next\n",
  1407. mmc_hostname(slot->mmc));
  1408. host->state = STATE_SENDING_CMD;
  1409. dw_mci_start_request(host, slot);
  1410. } else {
  1411. dev_vdbg(host->dev, "list empty\n");
  1412. if (host->state == STATE_SENDING_CMD11)
  1413. host->state = STATE_WAITING_CMD11_DONE;
  1414. else
  1415. host->state = STATE_IDLE;
  1416. }
  1417. spin_unlock(&host->lock);
  1418. mmc_request_done(prev_mmc, mrq);
  1419. spin_lock(&host->lock);
  1420. }
  1421. static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
  1422. {
  1423. u32 status = host->cmd_status;
  1424. host->cmd_status = 0;
  1425. /* Read the response from the card (up to 16 bytes) */
  1426. if (cmd->flags & MMC_RSP_PRESENT) {
  1427. if (cmd->flags & MMC_RSP_136) {
  1428. cmd->resp[3] = mci_readl(host, RESP0);
  1429. cmd->resp[2] = mci_readl(host, RESP1);
  1430. cmd->resp[1] = mci_readl(host, RESP2);
  1431. cmd->resp[0] = mci_readl(host, RESP3);
  1432. } else {
  1433. cmd->resp[0] = mci_readl(host, RESP0);
  1434. cmd->resp[1] = 0;
  1435. cmd->resp[2] = 0;
  1436. cmd->resp[3] = 0;
  1437. }
  1438. }
  1439. if (status & SDMMC_INT_RTO)
  1440. cmd->error = -ETIMEDOUT;
  1441. else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
  1442. cmd->error = -EILSEQ;
  1443. else if (status & SDMMC_INT_RESP_ERR)
  1444. cmd->error = -EIO;
  1445. else
  1446. cmd->error = 0;
  1447. return cmd->error;
  1448. }
  1449. static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
  1450. {
  1451. u32 status = host->data_status;
  1452. if (status & DW_MCI_DATA_ERROR_FLAGS) {
  1453. if (status & SDMMC_INT_DRTO) {
  1454. data->error = -ETIMEDOUT;
  1455. } else if (status & SDMMC_INT_DCRC) {
  1456. data->error = -EILSEQ;
  1457. } else if (status & SDMMC_INT_EBE) {
  1458. if (host->dir_status ==
  1459. DW_MCI_SEND_STATUS) {
  1460. /*
  1461. * No data CRC status was returned.
  1462. * The number of bytes transferred
  1463. * will be exaggerated in PIO mode.
  1464. */
  1465. data->bytes_xfered = 0;
  1466. data->error = -ETIMEDOUT;
  1467. } else if (host->dir_status ==
  1468. DW_MCI_RECV_STATUS) {
  1469. data->error = -EILSEQ;
  1470. }
  1471. } else {
  1472. /* SDMMC_INT_SBE is included */
  1473. data->error = -EILSEQ;
  1474. }
  1475. dev_dbg(host->dev, "data error, status 0x%08x\n", status);
  1476. /*
  1477. * After an error, there may be data lingering
  1478. * in the FIFO
  1479. */
  1480. dw_mci_reset(host);
  1481. } else {
  1482. data->bytes_xfered = data->blocks * data->blksz;
  1483. data->error = 0;
  1484. }
  1485. return data->error;
  1486. }
  1487. static void dw_mci_set_drto(struct dw_mci *host)
  1488. {
  1489. unsigned int drto_clks;
  1490. unsigned int drto_ms;
  1491. drto_clks = mci_readl(host, TMOUT) >> 8;
  1492. drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000);
  1493. /* add a bit spare time */
  1494. drto_ms += 10;
  1495. mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms));
  1496. }
  1497. static void dw_mci_tasklet_func(unsigned long priv)
  1498. {
  1499. struct dw_mci *host = (struct dw_mci *)priv;
  1500. struct mmc_data *data;
  1501. struct mmc_command *cmd;
  1502. struct mmc_request *mrq;
  1503. enum dw_mci_state state;
  1504. enum dw_mci_state prev_state;
  1505. unsigned int err;
  1506. spin_lock(&host->lock);
  1507. state = host->state;
  1508. data = host->data;
  1509. mrq = host->mrq;
  1510. do {
  1511. prev_state = state;
  1512. switch (state) {
  1513. case STATE_IDLE:
  1514. case STATE_WAITING_CMD11_DONE:
  1515. break;
  1516. case STATE_SENDING_CMD11:
  1517. case STATE_SENDING_CMD:
  1518. if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
  1519. &host->pending_events))
  1520. break;
  1521. cmd = host->cmd;
  1522. host->cmd = NULL;
  1523. set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
  1524. err = dw_mci_command_complete(host, cmd);
  1525. if (cmd == mrq->sbc && !err) {
  1526. prev_state = state = STATE_SENDING_CMD;
  1527. __dw_mci_start_request(host, host->cur_slot,
  1528. mrq->cmd);
  1529. goto unlock;
  1530. }
  1531. if (cmd->data && err) {
  1532. /*
  1533. * During UHS tuning sequence, sending the stop
  1534. * command after the response CRC error would
  1535. * throw the system into a confused state
  1536. * causing all future tuning phases to report
  1537. * failure.
  1538. *
  1539. * In such case controller will move into a data
  1540. * transfer state after a response error or
  1541. * response CRC error. Let's let that finish
  1542. * before trying to send a stop, so we'll go to
  1543. * STATE_SENDING_DATA.
  1544. *
  1545. * Although letting the data transfer take place
  1546. * will waste a bit of time (we already know
  1547. * the command was bad), it can't cause any
  1548. * errors since it's possible it would have
  1549. * taken place anyway if this tasklet got
  1550. * delayed. Allowing the transfer to take place
  1551. * avoids races and keeps things simple.
  1552. */
  1553. if ((err != -ETIMEDOUT) &&
  1554. (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
  1555. state = STATE_SENDING_DATA;
  1556. continue;
  1557. }
  1558. dw_mci_stop_dma(host);
  1559. send_stop_abort(host, data);
  1560. state = STATE_SENDING_STOP;
  1561. break;
  1562. }
  1563. if (!cmd->data || err) {
  1564. dw_mci_request_end(host, mrq);
  1565. goto unlock;
  1566. }
  1567. prev_state = state = STATE_SENDING_DATA;
  1568. /* fall through */
  1569. case STATE_SENDING_DATA:
  1570. /*
  1571. * We could get a data error and never a transfer
  1572. * complete so we'd better check for it here.
  1573. *
  1574. * Note that we don't really care if we also got a
  1575. * transfer complete; stopping the DMA and sending an
  1576. * abort won't hurt.
  1577. */
  1578. if (test_and_clear_bit(EVENT_DATA_ERROR,
  1579. &host->pending_events)) {
  1580. dw_mci_stop_dma(host);
  1581. if (!(host->data_status & (SDMMC_INT_DRTO |
  1582. SDMMC_INT_EBE)))
  1583. send_stop_abort(host, data);
  1584. state = STATE_DATA_ERROR;
  1585. break;
  1586. }
  1587. if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
  1588. &host->pending_events)) {
  1589. /*
  1590. * If all data-related interrupts don't come
  1591. * within the given time in reading data state.
  1592. */
  1593. if (host->dir_status == DW_MCI_RECV_STATUS)
  1594. dw_mci_set_drto(host);
  1595. break;
  1596. }
  1597. set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
  1598. /*
  1599. * Handle an EVENT_DATA_ERROR that might have shown up
  1600. * before the transfer completed. This might not have
  1601. * been caught by the check above because the interrupt
  1602. * could have gone off between the previous check and
  1603. * the check for transfer complete.
  1604. *
  1605. * Technically this ought not be needed assuming we
  1606. * get a DATA_COMPLETE eventually (we'll notice the
  1607. * error and end the request), but it shouldn't hurt.
  1608. *
  1609. * This has the advantage of sending the stop command.
  1610. */
  1611. if (test_and_clear_bit(EVENT_DATA_ERROR,
  1612. &host->pending_events)) {
  1613. dw_mci_stop_dma(host);
  1614. if (!(host->data_status & (SDMMC_INT_DRTO |
  1615. SDMMC_INT_EBE)))
  1616. send_stop_abort(host, data);
  1617. state = STATE_DATA_ERROR;
  1618. break;
  1619. }
  1620. prev_state = state = STATE_DATA_BUSY;
  1621. /* fall through */
  1622. case STATE_DATA_BUSY:
  1623. if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
  1624. &host->pending_events)) {
  1625. /*
  1626. * If data error interrupt comes but data over
  1627. * interrupt doesn't come within the given time.
  1628. * in reading data state.
  1629. */
  1630. if (host->dir_status == DW_MCI_RECV_STATUS)
  1631. dw_mci_set_drto(host);
  1632. break;
  1633. }
  1634. host->data = NULL;
  1635. set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
  1636. err = dw_mci_data_complete(host, data);
  1637. if (!err) {
  1638. if (!data->stop || mrq->sbc) {
  1639. if (mrq->sbc && data->stop)
  1640. data->stop->error = 0;
  1641. dw_mci_request_end(host, mrq);
  1642. goto unlock;
  1643. }
  1644. /* stop command for open-ended transfer*/
  1645. if (data->stop)
  1646. send_stop_abort(host, data);
  1647. } else {
  1648. /*
  1649. * If we don't have a command complete now we'll
  1650. * never get one since we just reset everything;
  1651. * better end the request.
  1652. *
  1653. * If we do have a command complete we'll fall
  1654. * through to the SENDING_STOP command and
  1655. * everything will be peachy keen.
  1656. */
  1657. if (!test_bit(EVENT_CMD_COMPLETE,
  1658. &host->pending_events)) {
  1659. host->cmd = NULL;
  1660. dw_mci_request_end(host, mrq);
  1661. goto unlock;
  1662. }
  1663. }
  1664. /*
  1665. * If err has non-zero,
  1666. * stop-abort command has been already issued.
  1667. */
  1668. prev_state = state = STATE_SENDING_STOP;
  1669. /* fall through */
  1670. case STATE_SENDING_STOP:
  1671. if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
  1672. &host->pending_events))
  1673. break;
  1674. /* CMD error in data command */
  1675. if (mrq->cmd->error && mrq->data)
  1676. dw_mci_reset(host);
  1677. host->cmd = NULL;
  1678. host->data = NULL;
  1679. if (!mrq->sbc && mrq->stop)
  1680. dw_mci_command_complete(host, mrq->stop);
  1681. else
  1682. host->cmd_status = 0;
  1683. dw_mci_request_end(host, mrq);
  1684. goto unlock;
  1685. case STATE_DATA_ERROR:
  1686. if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
  1687. &host->pending_events))
  1688. break;
  1689. state = STATE_DATA_BUSY;
  1690. break;
  1691. }
  1692. } while (state != prev_state);
  1693. host->state = state;
  1694. unlock:
  1695. spin_unlock(&host->lock);
  1696. }
  1697. /* push final bytes to part_buf, only use during push */
  1698. static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
  1699. {
  1700. memcpy((void *)&host->part_buf, buf, cnt);
  1701. host->part_buf_count = cnt;
  1702. }
  1703. /* append bytes to part_buf, only use during push */
  1704. static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
  1705. {
  1706. cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
  1707. memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
  1708. host->part_buf_count += cnt;
  1709. return cnt;
  1710. }
  1711. /* pull first bytes from part_buf, only use during pull */
  1712. static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
  1713. {
  1714. cnt = min_t(int, cnt, host->part_buf_count);
  1715. if (cnt) {
  1716. memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
  1717. cnt);
  1718. host->part_buf_count -= cnt;
  1719. host->part_buf_start += cnt;
  1720. }
  1721. return cnt;
  1722. }
  1723. /* pull final bytes from the part_buf, assuming it's just been filled */
  1724. static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
  1725. {
  1726. memcpy(buf, &host->part_buf, cnt);
  1727. host->part_buf_start = cnt;
  1728. host->part_buf_count = (1 << host->data_shift) - cnt;
  1729. }
  1730. static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
  1731. {
  1732. struct mmc_data *data = host->data;
  1733. int init_cnt = cnt;
  1734. /* try and push anything in the part_buf */
  1735. if (unlikely(host->part_buf_count)) {
  1736. int len = dw_mci_push_part_bytes(host, buf, cnt);
  1737. buf += len;
  1738. cnt -= len;
  1739. if (host->part_buf_count == 2) {
  1740. mci_fifo_writew(host->fifo_reg, host->part_buf16);
  1741. host->part_buf_count = 0;
  1742. }
  1743. }
  1744. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1745. if (unlikely((unsigned long)buf & 0x1)) {
  1746. while (cnt >= 2) {
  1747. u16 aligned_buf[64];
  1748. int len = min(cnt & -2, (int)sizeof(aligned_buf));
  1749. int items = len >> 1;
  1750. int i;
  1751. /* memcpy from input buffer into aligned buffer */
  1752. memcpy(aligned_buf, buf, len);
  1753. buf += len;
  1754. cnt -= len;
  1755. /* push data from aligned buffer into fifo */
  1756. for (i = 0; i < items; ++i)
  1757. mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
  1758. }
  1759. } else
  1760. #endif
  1761. {
  1762. u16 *pdata = buf;
  1763. for (; cnt >= 2; cnt -= 2)
  1764. mci_fifo_writew(host->fifo_reg, *pdata++);
  1765. buf = pdata;
  1766. }
  1767. /* put anything remaining in the part_buf */
  1768. if (cnt) {
  1769. dw_mci_set_part_bytes(host, buf, cnt);
  1770. /* Push data if we have reached the expected data length */
  1771. if ((data->bytes_xfered + init_cnt) ==
  1772. (data->blksz * data->blocks))
  1773. mci_fifo_writew(host->fifo_reg, host->part_buf16);
  1774. }
  1775. }
  1776. static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
  1777. {
  1778. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1779. if (unlikely((unsigned long)buf & 0x1)) {
  1780. while (cnt >= 2) {
  1781. /* pull data from fifo into aligned buffer */
  1782. u16 aligned_buf[64];
  1783. int len = min(cnt & -2, (int)sizeof(aligned_buf));
  1784. int items = len >> 1;
  1785. int i;
  1786. for (i = 0; i < items; ++i)
  1787. aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
  1788. /* memcpy from aligned buffer into output buffer */
  1789. memcpy(buf, aligned_buf, len);
  1790. buf += len;
  1791. cnt -= len;
  1792. }
  1793. } else
  1794. #endif
  1795. {
  1796. u16 *pdata = buf;
  1797. for (; cnt >= 2; cnt -= 2)
  1798. *pdata++ = mci_fifo_readw(host->fifo_reg);
  1799. buf = pdata;
  1800. }
  1801. if (cnt) {
  1802. host->part_buf16 = mci_fifo_readw(host->fifo_reg);
  1803. dw_mci_pull_final_bytes(host, buf, cnt);
  1804. }
  1805. }
  1806. static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
  1807. {
  1808. struct mmc_data *data = host->data;
  1809. int init_cnt = cnt;
  1810. /* try and push anything in the part_buf */
  1811. if (unlikely(host->part_buf_count)) {
  1812. int len = dw_mci_push_part_bytes(host, buf, cnt);
  1813. buf += len;
  1814. cnt -= len;
  1815. if (host->part_buf_count == 4) {
  1816. mci_fifo_writel(host->fifo_reg, host->part_buf32);
  1817. host->part_buf_count = 0;
  1818. }
  1819. }
  1820. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1821. if (unlikely((unsigned long)buf & 0x3)) {
  1822. while (cnt >= 4) {
  1823. u32 aligned_buf[32];
  1824. int len = min(cnt & -4, (int)sizeof(aligned_buf));
  1825. int items = len >> 2;
  1826. int i;
  1827. /* memcpy from input buffer into aligned buffer */
  1828. memcpy(aligned_buf, buf, len);
  1829. buf += len;
  1830. cnt -= len;
  1831. /* push data from aligned buffer into fifo */
  1832. for (i = 0; i < items; ++i)
  1833. mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
  1834. }
  1835. } else
  1836. #endif
  1837. {
  1838. u32 *pdata = buf;
  1839. for (; cnt >= 4; cnt -= 4)
  1840. mci_fifo_writel(host->fifo_reg, *pdata++);
  1841. buf = pdata;
  1842. }
  1843. /* put anything remaining in the part_buf */
  1844. if (cnt) {
  1845. dw_mci_set_part_bytes(host, buf, cnt);
  1846. /* Push data if we have reached the expected data length */
  1847. if ((data->bytes_xfered + init_cnt) ==
  1848. (data->blksz * data->blocks))
  1849. mci_fifo_writel(host->fifo_reg, host->part_buf32);
  1850. }
  1851. }
  1852. static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
  1853. {
  1854. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1855. if (unlikely((unsigned long)buf & 0x3)) {
  1856. while (cnt >= 4) {
  1857. /* pull data from fifo into aligned buffer */
  1858. u32 aligned_buf[32];
  1859. int len = min(cnt & -4, (int)sizeof(aligned_buf));
  1860. int items = len >> 2;
  1861. int i;
  1862. for (i = 0; i < items; ++i)
  1863. aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
  1864. /* memcpy from aligned buffer into output buffer */
  1865. memcpy(buf, aligned_buf, len);
  1866. buf += len;
  1867. cnt -= len;
  1868. }
  1869. } else
  1870. #endif
  1871. {
  1872. u32 *pdata = buf;
  1873. for (; cnt >= 4; cnt -= 4)
  1874. *pdata++ = mci_fifo_readl(host->fifo_reg);
  1875. buf = pdata;
  1876. }
  1877. if (cnt) {
  1878. host->part_buf32 = mci_fifo_readl(host->fifo_reg);
  1879. dw_mci_pull_final_bytes(host, buf, cnt);
  1880. }
  1881. }
  1882. static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
  1883. {
  1884. struct mmc_data *data = host->data;
  1885. int init_cnt = cnt;
  1886. /* try and push anything in the part_buf */
  1887. if (unlikely(host->part_buf_count)) {
  1888. int len = dw_mci_push_part_bytes(host, buf, cnt);
  1889. buf += len;
  1890. cnt -= len;
  1891. if (host->part_buf_count == 8) {
  1892. mci_fifo_writeq(host->fifo_reg, host->part_buf);
  1893. host->part_buf_count = 0;
  1894. }
  1895. }
  1896. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1897. if (unlikely((unsigned long)buf & 0x7)) {
  1898. while (cnt >= 8) {
  1899. u64 aligned_buf[16];
  1900. int len = min(cnt & -8, (int)sizeof(aligned_buf));
  1901. int items = len >> 3;
  1902. int i;
  1903. /* memcpy from input buffer into aligned buffer */
  1904. memcpy(aligned_buf, buf, len);
  1905. buf += len;
  1906. cnt -= len;
  1907. /* push data from aligned buffer into fifo */
  1908. for (i = 0; i < items; ++i)
  1909. mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
  1910. }
  1911. } else
  1912. #endif
  1913. {
  1914. u64 *pdata = buf;
  1915. for (; cnt >= 8; cnt -= 8)
  1916. mci_fifo_writeq(host->fifo_reg, *pdata++);
  1917. buf = pdata;
  1918. }
  1919. /* put anything remaining in the part_buf */
  1920. if (cnt) {
  1921. dw_mci_set_part_bytes(host, buf, cnt);
  1922. /* Push data if we have reached the expected data length */
  1923. if ((data->bytes_xfered + init_cnt) ==
  1924. (data->blksz * data->blocks))
  1925. mci_fifo_writeq(host->fifo_reg, host->part_buf);
  1926. }
  1927. }
  1928. static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
  1929. {
  1930. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1931. if (unlikely((unsigned long)buf & 0x7)) {
  1932. while (cnt >= 8) {
  1933. /* pull data from fifo into aligned buffer */
  1934. u64 aligned_buf[16];
  1935. int len = min(cnt & -8, (int)sizeof(aligned_buf));
  1936. int items = len >> 3;
  1937. int i;
  1938. for (i = 0; i < items; ++i)
  1939. aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
  1940. /* memcpy from aligned buffer into output buffer */
  1941. memcpy(buf, aligned_buf, len);
  1942. buf += len;
  1943. cnt -= len;
  1944. }
  1945. } else
  1946. #endif
  1947. {
  1948. u64 *pdata = buf;
  1949. for (; cnt >= 8; cnt -= 8)
  1950. *pdata++ = mci_fifo_readq(host->fifo_reg);
  1951. buf = pdata;
  1952. }
  1953. if (cnt) {
  1954. host->part_buf = mci_fifo_readq(host->fifo_reg);
  1955. dw_mci_pull_final_bytes(host, buf, cnt);
  1956. }
  1957. }
  1958. static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
  1959. {
  1960. int len;
  1961. /* get remaining partial bytes */
  1962. len = dw_mci_pull_part_bytes(host, buf, cnt);
  1963. if (unlikely(len == cnt))
  1964. return;
  1965. buf += len;
  1966. cnt -= len;
  1967. /* get the rest of the data */
  1968. host->pull_data(host, buf, cnt);
  1969. }
  1970. static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
  1971. {
  1972. struct sg_mapping_iter *sg_miter = &host->sg_miter;
  1973. void *buf;
  1974. unsigned int offset;
  1975. struct mmc_data *data = host->data;
  1976. int shift = host->data_shift;
  1977. u32 status;
  1978. unsigned int len;
  1979. unsigned int remain, fcnt;
  1980. do {
  1981. if (!sg_miter_next(sg_miter))
  1982. goto done;
  1983. host->sg = sg_miter->piter.sg;
  1984. buf = sg_miter->addr;
  1985. remain = sg_miter->length;
  1986. offset = 0;
  1987. do {
  1988. fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
  1989. << shift) + host->part_buf_count;
  1990. len = min(remain, fcnt);
  1991. if (!len)
  1992. break;
  1993. dw_mci_pull_data(host, (void *)(buf + offset), len);
  1994. data->bytes_xfered += len;
  1995. offset += len;
  1996. remain -= len;
  1997. } while (remain);
  1998. sg_miter->consumed = offset;
  1999. status = mci_readl(host, MINTSTS);
  2000. mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
  2001. /* if the RXDR is ready read again */
  2002. } while ((status & SDMMC_INT_RXDR) ||
  2003. (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
  2004. if (!remain) {
  2005. if (!sg_miter_next(sg_miter))
  2006. goto done;
  2007. sg_miter->consumed = 0;
  2008. }
  2009. sg_miter_stop(sg_miter);
  2010. return;
  2011. done:
  2012. sg_miter_stop(sg_miter);
  2013. host->sg = NULL;
  2014. smp_wmb(); /* drain writebuffer */
  2015. set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  2016. }
  2017. static void dw_mci_write_data_pio(struct dw_mci *host)
  2018. {
  2019. struct sg_mapping_iter *sg_miter = &host->sg_miter;
  2020. void *buf;
  2021. unsigned int offset;
  2022. struct mmc_data *data = host->data;
  2023. int shift = host->data_shift;
  2024. u32 status;
  2025. unsigned int len;
  2026. unsigned int fifo_depth = host->fifo_depth;
  2027. unsigned int remain, fcnt;
  2028. do {
  2029. if (!sg_miter_next(sg_miter))
  2030. goto done;
  2031. host->sg = sg_miter->piter.sg;
  2032. buf = sg_miter->addr;
  2033. remain = sg_miter->length;
  2034. offset = 0;
  2035. do {
  2036. fcnt = ((fifo_depth -
  2037. SDMMC_GET_FCNT(mci_readl(host, STATUS)))
  2038. << shift) - host->part_buf_count;
  2039. len = min(remain, fcnt);
  2040. if (!len)
  2041. break;
  2042. host->push_data(host, (void *)(buf + offset), len);
  2043. data->bytes_xfered += len;
  2044. offset += len;
  2045. remain -= len;
  2046. } while (remain);
  2047. sg_miter->consumed = offset;
  2048. status = mci_readl(host, MINTSTS);
  2049. mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
  2050. } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
  2051. if (!remain) {
  2052. if (!sg_miter_next(sg_miter))
  2053. goto done;
  2054. sg_miter->consumed = 0;
  2055. }
  2056. sg_miter_stop(sg_miter);
  2057. return;
  2058. done:
  2059. sg_miter_stop(sg_miter);
  2060. host->sg = NULL;
  2061. smp_wmb(); /* drain writebuffer */
  2062. set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  2063. }
  2064. static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
  2065. {
  2066. if (!host->cmd_status)
  2067. host->cmd_status = status;
  2068. smp_wmb(); /* drain writebuffer */
  2069. set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
  2070. tasklet_schedule(&host->tasklet);
  2071. }
  2072. static void dw_mci_handle_cd(struct dw_mci *host)
  2073. {
  2074. int i;
  2075. for (i = 0; i < host->num_slots; i++) {
  2076. struct dw_mci_slot *slot = host->slot[i];
  2077. if (!slot)
  2078. continue;
  2079. if (slot->mmc->ops->card_event)
  2080. slot->mmc->ops->card_event(slot->mmc);
  2081. mmc_detect_change(slot->mmc,
  2082. msecs_to_jiffies(host->pdata->detect_delay_ms));
  2083. }
  2084. }
  2085. static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
  2086. {
  2087. struct dw_mci *host = dev_id;
  2088. u32 pending;
  2089. int i;
  2090. pending = mci_readl(host, MINTSTS); /* read-only mask reg */
  2091. if (pending) {
  2092. /* Check volt switch first, since it can look like an error */
  2093. if ((host->state == STATE_SENDING_CMD11) &&
  2094. (pending & SDMMC_INT_VOLT_SWITCH)) {
  2095. unsigned long irqflags;
  2096. mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
  2097. pending &= ~SDMMC_INT_VOLT_SWITCH;
  2098. /*
  2099. * Hold the lock; we know cmd11_timer can't be kicked
  2100. * off after the lock is released, so safe to delete.
  2101. */
  2102. spin_lock_irqsave(&host->irq_lock, irqflags);
  2103. dw_mci_cmd_interrupt(host, pending);
  2104. spin_unlock_irqrestore(&host->irq_lock, irqflags);
  2105. del_timer(&host->cmd11_timer);
  2106. }
  2107. if (pending & DW_MCI_CMD_ERROR_FLAGS) {
  2108. mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
  2109. host->cmd_status = pending;
  2110. smp_wmb(); /* drain writebuffer */
  2111. set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
  2112. }
  2113. if (pending & DW_MCI_DATA_ERROR_FLAGS) {
  2114. /* if there is an error report DATA_ERROR */
  2115. mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
  2116. host->data_status = pending;
  2117. smp_wmb(); /* drain writebuffer */
  2118. set_bit(EVENT_DATA_ERROR, &host->pending_events);
  2119. tasklet_schedule(&host->tasklet);
  2120. }
  2121. if (pending & SDMMC_INT_DATA_OVER) {
  2122. del_timer(&host->dto_timer);
  2123. mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
  2124. if (!host->data_status)
  2125. host->data_status = pending;
  2126. smp_wmb(); /* drain writebuffer */
  2127. if (host->dir_status == DW_MCI_RECV_STATUS) {
  2128. if (host->sg != NULL)
  2129. dw_mci_read_data_pio(host, true);
  2130. }
  2131. set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
  2132. tasklet_schedule(&host->tasklet);
  2133. }
  2134. if (pending & SDMMC_INT_RXDR) {
  2135. mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
  2136. if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
  2137. dw_mci_read_data_pio(host, false);
  2138. }
  2139. if (pending & SDMMC_INT_TXDR) {
  2140. mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
  2141. if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
  2142. dw_mci_write_data_pio(host);
  2143. }
  2144. if (pending & SDMMC_INT_CMD_DONE) {
  2145. mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
  2146. dw_mci_cmd_interrupt(host, pending);
  2147. }
  2148. if (pending & SDMMC_INT_CD) {
  2149. mci_writel(host, RINTSTS, SDMMC_INT_CD);
  2150. dw_mci_handle_cd(host);
  2151. }
  2152. /* Handle SDIO Interrupts */
  2153. for (i = 0; i < host->num_slots; i++) {
  2154. struct dw_mci_slot *slot = host->slot[i];
  2155. if (!slot)
  2156. continue;
  2157. if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
  2158. mci_writel(host, RINTSTS,
  2159. SDMMC_INT_SDIO(slot->sdio_id));
  2160. mmc_signal_sdio_irq(slot->mmc);
  2161. }
  2162. }
  2163. }
  2164. if (host->use_dma != TRANS_MODE_IDMAC)
  2165. return IRQ_HANDLED;
  2166. /* Handle IDMA interrupts */
  2167. if (host->dma_64bit_address == 1) {
  2168. pending = mci_readl(host, IDSTS64);
  2169. if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
  2170. mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
  2171. SDMMC_IDMAC_INT_RI);
  2172. mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
  2173. if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
  2174. host->dma_ops->complete((void *)host);
  2175. }
  2176. } else {
  2177. pending = mci_readl(host, IDSTS);
  2178. if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
  2179. mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
  2180. SDMMC_IDMAC_INT_RI);
  2181. mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
  2182. if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
  2183. host->dma_ops->complete((void *)host);
  2184. }
  2185. }
  2186. return IRQ_HANDLED;
  2187. }
  2188. static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
  2189. {
  2190. struct mmc_host *mmc;
  2191. struct dw_mci_slot *slot;
  2192. const struct dw_mci_drv_data *drv_data = host->drv_data;
  2193. int ctrl_id, ret;
  2194. u32 freq[2];
  2195. mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
  2196. if (!mmc)
  2197. return -ENOMEM;
  2198. slot = mmc_priv(mmc);
  2199. slot->id = id;
  2200. slot->sdio_id = host->sdio_id0 + id;
  2201. slot->mmc = mmc;
  2202. slot->host = host;
  2203. host->slot[id] = slot;
  2204. mmc->ops = &dw_mci_ops;
  2205. if (of_property_read_u32_array(host->dev->of_node,
  2206. "clock-freq-min-max", freq, 2)) {
  2207. mmc->f_min = DW_MCI_FREQ_MIN;
  2208. mmc->f_max = DW_MCI_FREQ_MAX;
  2209. } else {
  2210. dev_info(host->dev,
  2211. "'clock-freq-min-max' property was deprecated.\n");
  2212. mmc->f_min = freq[0];
  2213. mmc->f_max = freq[1];
  2214. }
  2215. /*if there are external regulators, get them*/
  2216. ret = mmc_regulator_get_supply(mmc);
  2217. if (ret == -EPROBE_DEFER)
  2218. goto err_host_allocated;
  2219. if (!mmc->ocr_avail)
  2220. mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
  2221. if (host->pdata->caps)
  2222. mmc->caps = host->pdata->caps;
  2223. /*
  2224. * Support MMC_CAP_ERASE by default.
  2225. * It needs to use trim/discard/erase commands.
  2226. */
  2227. mmc->caps |= MMC_CAP_ERASE;
  2228. if (host->pdata->pm_caps)
  2229. mmc->pm_caps = host->pdata->pm_caps;
  2230. if (host->dev->of_node) {
  2231. ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
  2232. if (ctrl_id < 0)
  2233. ctrl_id = 0;
  2234. } else {
  2235. ctrl_id = to_platform_device(host->dev)->id;
  2236. }
  2237. if (drv_data && drv_data->caps)
  2238. mmc->caps |= drv_data->caps[ctrl_id];
  2239. if (host->pdata->caps2)
  2240. mmc->caps2 = host->pdata->caps2;
  2241. ret = mmc_of_parse(mmc);
  2242. if (ret)
  2243. goto err_host_allocated;
  2244. /* Useful defaults if platform data is unset. */
  2245. if (host->use_dma == TRANS_MODE_IDMAC) {
  2246. mmc->max_segs = host->ring_size;
  2247. mmc->max_blk_size = 65535;
  2248. mmc->max_seg_size = 0x1000;
  2249. mmc->max_req_size = mmc->max_seg_size * host->ring_size;
  2250. mmc->max_blk_count = mmc->max_req_size / 512;
  2251. } else if (host->use_dma == TRANS_MODE_EDMAC) {
  2252. mmc->max_segs = 64;
  2253. mmc->max_blk_size = 65535;
  2254. mmc->max_blk_count = 65535;
  2255. mmc->max_req_size =
  2256. mmc->max_blk_size * mmc->max_blk_count;
  2257. mmc->max_seg_size = mmc->max_req_size;
  2258. } else {
  2259. /* TRANS_MODE_PIO */
  2260. mmc->max_segs = 64;
  2261. mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
  2262. mmc->max_blk_count = 512;
  2263. mmc->max_req_size = mmc->max_blk_size *
  2264. mmc->max_blk_count;
  2265. mmc->max_seg_size = mmc->max_req_size;
  2266. }
  2267. dw_mci_get_cd(mmc);
  2268. ret = mmc_add_host(mmc);
  2269. if (ret)
  2270. goto err_host_allocated;
  2271. #if defined(CONFIG_DEBUG_FS)
  2272. dw_mci_init_debugfs(slot);
  2273. #endif
  2274. return 0;
  2275. err_host_allocated:
  2276. mmc_free_host(mmc);
  2277. return ret;
  2278. }
  2279. static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
  2280. {
  2281. /* Debugfs stuff is cleaned up by mmc core */
  2282. mmc_remove_host(slot->mmc);
  2283. slot->host->slot[id] = NULL;
  2284. mmc_free_host(slot->mmc);
  2285. }
  2286. static void dw_mci_init_dma(struct dw_mci *host)
  2287. {
  2288. int addr_config;
  2289. struct device *dev = host->dev;
  2290. struct device_node *np = dev->of_node;
  2291. /*
  2292. * Check tansfer mode from HCON[17:16]
  2293. * Clear the ambiguous description of dw_mmc databook:
  2294. * 2b'00: No DMA Interface -> Actually means using Internal DMA block
  2295. * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
  2296. * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
  2297. * 2b'11: Non DW DMA Interface -> pio only
  2298. * Compared to DesignWare DMA Interface, Generic DMA Interface has a
  2299. * simpler request/acknowledge handshake mechanism and both of them
  2300. * are regarded as external dma master for dw_mmc.
  2301. */
  2302. host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
  2303. if (host->use_dma == DMA_INTERFACE_IDMA) {
  2304. host->use_dma = TRANS_MODE_IDMAC;
  2305. } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
  2306. host->use_dma == DMA_INTERFACE_GDMA) {
  2307. host->use_dma = TRANS_MODE_EDMAC;
  2308. } else {
  2309. goto no_dma;
  2310. }
  2311. /* Determine which DMA interface to use */
  2312. if (host->use_dma == TRANS_MODE_IDMAC) {
  2313. /*
  2314. * Check ADDR_CONFIG bit in HCON to find
  2315. * IDMAC address bus width
  2316. */
  2317. addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
  2318. if (addr_config == 1) {
  2319. /* host supports IDMAC in 64-bit address mode */
  2320. host->dma_64bit_address = 1;
  2321. dev_info(host->dev,
  2322. "IDMAC supports 64-bit address mode.\n");
  2323. if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
  2324. dma_set_coherent_mask(host->dev,
  2325. DMA_BIT_MASK(64));
  2326. } else {
  2327. /* host supports IDMAC in 32-bit address mode */
  2328. host->dma_64bit_address = 0;
  2329. dev_info(host->dev,
  2330. "IDMAC supports 32-bit address mode.\n");
  2331. }
  2332. /* Alloc memory for sg translation */
  2333. host->sg_cpu = dmam_alloc_coherent(host->dev,
  2334. DESC_RING_BUF_SZ,
  2335. &host->sg_dma, GFP_KERNEL);
  2336. if (!host->sg_cpu) {
  2337. dev_err(host->dev,
  2338. "%s: could not alloc DMA memory\n",
  2339. __func__);
  2340. goto no_dma;
  2341. }
  2342. host->dma_ops = &dw_mci_idmac_ops;
  2343. dev_info(host->dev, "Using internal DMA controller.\n");
  2344. } else {
  2345. /* TRANS_MODE_EDMAC: check dma bindings again */
  2346. if ((of_property_count_strings(np, "dma-names") < 0) ||
  2347. (!of_find_property(np, "dmas", NULL))) {
  2348. goto no_dma;
  2349. }
  2350. host->dma_ops = &dw_mci_edmac_ops;
  2351. dev_info(host->dev, "Using external DMA controller.\n");
  2352. }
  2353. if (host->dma_ops->init && host->dma_ops->start &&
  2354. host->dma_ops->stop && host->dma_ops->cleanup) {
  2355. if (host->dma_ops->init(host)) {
  2356. dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
  2357. __func__);
  2358. goto no_dma;
  2359. }
  2360. } else {
  2361. dev_err(host->dev, "DMA initialization not found.\n");
  2362. goto no_dma;
  2363. }
  2364. return;
  2365. no_dma:
  2366. dev_info(host->dev, "Using PIO mode.\n");
  2367. host->use_dma = TRANS_MODE_PIO;
  2368. }
  2369. static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
  2370. {
  2371. unsigned long timeout = jiffies + msecs_to_jiffies(500);
  2372. u32 ctrl;
  2373. ctrl = mci_readl(host, CTRL);
  2374. ctrl |= reset;
  2375. mci_writel(host, CTRL, ctrl);
  2376. /* wait till resets clear */
  2377. do {
  2378. ctrl = mci_readl(host, CTRL);
  2379. if (!(ctrl & reset))
  2380. return true;
  2381. } while (time_before(jiffies, timeout));
  2382. dev_err(host->dev,
  2383. "Timeout resetting block (ctrl reset %#x)\n",
  2384. ctrl & reset);
  2385. return false;
  2386. }
  2387. static bool dw_mci_reset(struct dw_mci *host)
  2388. {
  2389. u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
  2390. bool ret = false;
  2391. /*
  2392. * Reseting generates a block interrupt, hence setting
  2393. * the scatter-gather pointer to NULL.
  2394. */
  2395. if (host->sg) {
  2396. sg_miter_stop(&host->sg_miter);
  2397. host->sg = NULL;
  2398. }
  2399. if (host->use_dma)
  2400. flags |= SDMMC_CTRL_DMA_RESET;
  2401. if (dw_mci_ctrl_reset(host, flags)) {
  2402. /*
  2403. * In all cases we clear the RAWINTS register to clear any
  2404. * interrupts.
  2405. */
  2406. mci_writel(host, RINTSTS, 0xFFFFFFFF);
  2407. /* if using dma we wait for dma_req to clear */
  2408. if (host->use_dma) {
  2409. unsigned long timeout = jiffies + msecs_to_jiffies(500);
  2410. u32 status;
  2411. do {
  2412. status = mci_readl(host, STATUS);
  2413. if (!(status & SDMMC_STATUS_DMA_REQ))
  2414. break;
  2415. cpu_relax();
  2416. } while (time_before(jiffies, timeout));
  2417. if (status & SDMMC_STATUS_DMA_REQ) {
  2418. dev_err(host->dev,
  2419. "%s: Timeout waiting for dma_req to clear during reset\n",
  2420. __func__);
  2421. goto ciu_out;
  2422. }
  2423. /* when using DMA next we reset the fifo again */
  2424. if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
  2425. goto ciu_out;
  2426. }
  2427. } else {
  2428. /* if the controller reset bit did clear, then set clock regs */
  2429. if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
  2430. dev_err(host->dev,
  2431. "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
  2432. __func__);
  2433. goto ciu_out;
  2434. }
  2435. }
  2436. if (host->use_dma == TRANS_MODE_IDMAC)
  2437. /* It is also recommended that we reset and reprogram idmac */
  2438. dw_mci_idmac_reset(host);
  2439. ret = true;
  2440. ciu_out:
  2441. /* After a CTRL reset we need to have CIU set clock registers */
  2442. mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
  2443. return ret;
  2444. }
  2445. static void dw_mci_cmd11_timer(unsigned long arg)
  2446. {
  2447. struct dw_mci *host = (struct dw_mci *)arg;
  2448. if (host->state != STATE_SENDING_CMD11) {
  2449. dev_warn(host->dev, "Unexpected CMD11 timeout\n");
  2450. return;
  2451. }
  2452. host->cmd_status = SDMMC_INT_RTO;
  2453. set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
  2454. tasklet_schedule(&host->tasklet);
  2455. }
  2456. static void dw_mci_dto_timer(unsigned long arg)
  2457. {
  2458. struct dw_mci *host = (struct dw_mci *)arg;
  2459. switch (host->state) {
  2460. case STATE_SENDING_DATA:
  2461. case STATE_DATA_BUSY:
  2462. /*
  2463. * If DTO interrupt does NOT come in sending data state,
  2464. * we should notify the driver to terminate current transfer
  2465. * and report a data timeout to the core.
  2466. */
  2467. host->data_status = SDMMC_INT_DRTO;
  2468. set_bit(EVENT_DATA_ERROR, &host->pending_events);
  2469. set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
  2470. tasklet_schedule(&host->tasklet);
  2471. break;
  2472. default:
  2473. break;
  2474. }
  2475. }
  2476. #ifdef CONFIG_OF
  2477. static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
  2478. {
  2479. struct dw_mci_board *pdata;
  2480. struct device *dev = host->dev;
  2481. struct device_node *np = dev->of_node;
  2482. const struct dw_mci_drv_data *drv_data = host->drv_data;
  2483. int ret;
  2484. u32 clock_frequency;
  2485. pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
  2486. if (!pdata)
  2487. return ERR_PTR(-ENOMEM);
  2488. /* find reset controller when exist */
  2489. pdata->rstc = devm_reset_control_get_optional(dev, "reset");
  2490. if (IS_ERR(pdata->rstc)) {
  2491. if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
  2492. return ERR_PTR(-EPROBE_DEFER);
  2493. }
  2494. /* find out number of slots supported */
  2495. of_property_read_u32(np, "num-slots", &pdata->num_slots);
  2496. if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
  2497. dev_info(dev,
  2498. "fifo-depth property not found, using value of FIFOTH register as default\n");
  2499. of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
  2500. if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
  2501. pdata->bus_hz = clock_frequency;
  2502. if (drv_data && drv_data->parse_dt) {
  2503. ret = drv_data->parse_dt(host);
  2504. if (ret)
  2505. return ERR_PTR(ret);
  2506. }
  2507. return pdata;
  2508. }
  2509. #else /* CONFIG_OF */
  2510. static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
  2511. {
  2512. return ERR_PTR(-EINVAL);
  2513. }
  2514. #endif /* CONFIG_OF */
  2515. static void dw_mci_enable_cd(struct dw_mci *host)
  2516. {
  2517. unsigned long irqflags;
  2518. u32 temp;
  2519. int i;
  2520. struct dw_mci_slot *slot;
  2521. /*
  2522. * No need for CD if all slots have a non-error GPIO
  2523. * as well as broken card detection is found.
  2524. */
  2525. for (i = 0; i < host->num_slots; i++) {
  2526. slot = host->slot[i];
  2527. if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
  2528. return;
  2529. if (mmc_gpio_get_cd(slot->mmc) < 0)
  2530. break;
  2531. }
  2532. if (i == host->num_slots)
  2533. return;
  2534. spin_lock_irqsave(&host->irq_lock, irqflags);
  2535. temp = mci_readl(host, INTMASK);
  2536. temp |= SDMMC_INT_CD;
  2537. mci_writel(host, INTMASK, temp);
  2538. spin_unlock_irqrestore(&host->irq_lock, irqflags);
  2539. }
  2540. int dw_mci_probe(struct dw_mci *host)
  2541. {
  2542. const struct dw_mci_drv_data *drv_data = host->drv_data;
  2543. int width, i, ret = 0;
  2544. u32 fifo_size;
  2545. int init_slots = 0;
  2546. if (!host->pdata) {
  2547. host->pdata = dw_mci_parse_dt(host);
  2548. if (PTR_ERR(host->pdata) == -EPROBE_DEFER) {
  2549. return -EPROBE_DEFER;
  2550. } else if (IS_ERR(host->pdata)) {
  2551. dev_err(host->dev, "platform data not available\n");
  2552. return -EINVAL;
  2553. }
  2554. }
  2555. host->biu_clk = devm_clk_get(host->dev, "biu");
  2556. if (IS_ERR(host->biu_clk)) {
  2557. dev_dbg(host->dev, "biu clock not available\n");
  2558. } else {
  2559. ret = clk_prepare_enable(host->biu_clk);
  2560. if (ret) {
  2561. dev_err(host->dev, "failed to enable biu clock\n");
  2562. return ret;
  2563. }
  2564. }
  2565. host->ciu_clk = devm_clk_get(host->dev, "ciu");
  2566. if (IS_ERR(host->ciu_clk)) {
  2567. dev_dbg(host->dev, "ciu clock not available\n");
  2568. host->bus_hz = host->pdata->bus_hz;
  2569. } else {
  2570. ret = clk_prepare_enable(host->ciu_clk);
  2571. if (ret) {
  2572. dev_err(host->dev, "failed to enable ciu clock\n");
  2573. goto err_clk_biu;
  2574. }
  2575. if (host->pdata->bus_hz) {
  2576. ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
  2577. if (ret)
  2578. dev_warn(host->dev,
  2579. "Unable to set bus rate to %uHz\n",
  2580. host->pdata->bus_hz);
  2581. }
  2582. host->bus_hz = clk_get_rate(host->ciu_clk);
  2583. }
  2584. if (!host->bus_hz) {
  2585. dev_err(host->dev,
  2586. "Platform data must supply bus speed\n");
  2587. ret = -ENODEV;
  2588. goto err_clk_ciu;
  2589. }
  2590. if (drv_data && drv_data->init) {
  2591. ret = drv_data->init(host);
  2592. if (ret) {
  2593. dev_err(host->dev,
  2594. "implementation specific init failed\n");
  2595. goto err_clk_ciu;
  2596. }
  2597. }
  2598. if (!IS_ERR(host->pdata->rstc)) {
  2599. reset_control_assert(host->pdata->rstc);
  2600. usleep_range(10, 50);
  2601. reset_control_deassert(host->pdata->rstc);
  2602. }
  2603. setup_timer(&host->cmd11_timer,
  2604. dw_mci_cmd11_timer, (unsigned long)host);
  2605. setup_timer(&host->dto_timer,
  2606. dw_mci_dto_timer, (unsigned long)host);
  2607. spin_lock_init(&host->lock);
  2608. spin_lock_init(&host->irq_lock);
  2609. INIT_LIST_HEAD(&host->queue);
  2610. /*
  2611. * Get the host data width - this assumes that HCON has been set with
  2612. * the correct values.
  2613. */
  2614. i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
  2615. if (!i) {
  2616. host->push_data = dw_mci_push_data16;
  2617. host->pull_data = dw_mci_pull_data16;
  2618. width = 16;
  2619. host->data_shift = 1;
  2620. } else if (i == 2) {
  2621. host->push_data = dw_mci_push_data64;
  2622. host->pull_data = dw_mci_pull_data64;
  2623. width = 64;
  2624. host->data_shift = 3;
  2625. } else {
  2626. /* Check for a reserved value, and warn if it is */
  2627. WARN((i != 1),
  2628. "HCON reports a reserved host data width!\n"
  2629. "Defaulting to 32-bit access.\n");
  2630. host->push_data = dw_mci_push_data32;
  2631. host->pull_data = dw_mci_pull_data32;
  2632. width = 32;
  2633. host->data_shift = 2;
  2634. }
  2635. /* Reset all blocks */
  2636. if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
  2637. ret = -ENODEV;
  2638. goto err_clk_ciu;
  2639. }
  2640. host->dma_ops = host->pdata->dma_ops;
  2641. dw_mci_init_dma(host);
  2642. /* Clear the interrupts for the host controller */
  2643. mci_writel(host, RINTSTS, 0xFFFFFFFF);
  2644. mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
  2645. /* Put in max timeout */
  2646. mci_writel(host, TMOUT, 0xFFFFFFFF);
  2647. /*
  2648. * FIFO threshold settings RxMark = fifo_size / 2 - 1,
  2649. * Tx Mark = fifo_size / 2 DMA Size = 8
  2650. */
  2651. if (!host->pdata->fifo_depth) {
  2652. /*
  2653. * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
  2654. * have been overwritten by the bootloader, just like we're
  2655. * about to do, so if you know the value for your hardware, you
  2656. * should put it in the platform data.
  2657. */
  2658. fifo_size = mci_readl(host, FIFOTH);
  2659. fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
  2660. } else {
  2661. fifo_size = host->pdata->fifo_depth;
  2662. }
  2663. host->fifo_depth = fifo_size;
  2664. host->fifoth_val =
  2665. SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
  2666. mci_writel(host, FIFOTH, host->fifoth_val);
  2667. /* disable clock to CIU */
  2668. mci_writel(host, CLKENA, 0);
  2669. mci_writel(host, CLKSRC, 0);
  2670. /*
  2671. * In 2.40a spec, Data offset is changed.
  2672. * Need to check the version-id and set data-offset for DATA register.
  2673. */
  2674. host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
  2675. dev_info(host->dev, "Version ID is %04x\n", host->verid);
  2676. if (host->verid < DW_MMC_240A)
  2677. host->fifo_reg = host->regs + DATA_OFFSET;
  2678. else
  2679. host->fifo_reg = host->regs + DATA_240A_OFFSET;
  2680. tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
  2681. ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
  2682. host->irq_flags, "dw-mci", host);
  2683. if (ret)
  2684. goto err_dmaunmap;
  2685. if (host->pdata->num_slots)
  2686. host->num_slots = host->pdata->num_slots;
  2687. else
  2688. host->num_slots = 1;
  2689. if (host->num_slots < 1 ||
  2690. host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) {
  2691. dev_err(host->dev,
  2692. "Platform data must supply correct num_slots.\n");
  2693. ret = -ENODEV;
  2694. goto err_clk_ciu;
  2695. }
  2696. /*
  2697. * Enable interrupts for command done, data over, data empty,
  2698. * receive ready and error such as transmit, receive timeout, crc error
  2699. */
  2700. mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
  2701. SDMMC_INT_TXDR | SDMMC_INT_RXDR |
  2702. DW_MCI_ERROR_FLAGS);
  2703. /* Enable mci interrupt */
  2704. mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
  2705. dev_info(host->dev,
  2706. "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
  2707. host->irq, width, fifo_size);
  2708. /* We need at least one slot to succeed */
  2709. for (i = 0; i < host->num_slots; i++) {
  2710. ret = dw_mci_init_slot(host, i);
  2711. if (ret)
  2712. dev_dbg(host->dev, "slot %d init failed\n", i);
  2713. else
  2714. init_slots++;
  2715. }
  2716. if (init_slots) {
  2717. dev_info(host->dev, "%d slots initialized\n", init_slots);
  2718. } else {
  2719. dev_dbg(host->dev,
  2720. "attempted to initialize %d slots, but failed on all\n",
  2721. host->num_slots);
  2722. goto err_dmaunmap;
  2723. }
  2724. /* Now that slots are all setup, we can enable card detect */
  2725. dw_mci_enable_cd(host);
  2726. return 0;
  2727. err_dmaunmap:
  2728. if (host->use_dma && host->dma_ops->exit)
  2729. host->dma_ops->exit(host);
  2730. if (!IS_ERR(host->pdata->rstc))
  2731. reset_control_assert(host->pdata->rstc);
  2732. err_clk_ciu:
  2733. clk_disable_unprepare(host->ciu_clk);
  2734. err_clk_biu:
  2735. clk_disable_unprepare(host->biu_clk);
  2736. return ret;
  2737. }
  2738. EXPORT_SYMBOL(dw_mci_probe);
  2739. void dw_mci_remove(struct dw_mci *host)
  2740. {
  2741. int i;
  2742. for (i = 0; i < host->num_slots; i++) {
  2743. dev_dbg(host->dev, "remove slot %d\n", i);
  2744. if (host->slot[i])
  2745. dw_mci_cleanup_slot(host->slot[i], i);
  2746. }
  2747. mci_writel(host, RINTSTS, 0xFFFFFFFF);
  2748. mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
  2749. /* disable clock to CIU */
  2750. mci_writel(host, CLKENA, 0);
  2751. mci_writel(host, CLKSRC, 0);
  2752. if (host->use_dma && host->dma_ops->exit)
  2753. host->dma_ops->exit(host);
  2754. if (!IS_ERR(host->pdata->rstc))
  2755. reset_control_assert(host->pdata->rstc);
  2756. clk_disable_unprepare(host->ciu_clk);
  2757. clk_disable_unprepare(host->biu_clk);
  2758. }
  2759. EXPORT_SYMBOL(dw_mci_remove);
  2760. #ifdef CONFIG_PM
  2761. int dw_mci_runtime_suspend(struct device *dev)
  2762. {
  2763. struct dw_mci *host = dev_get_drvdata(dev);
  2764. if (host->use_dma && host->dma_ops->exit)
  2765. host->dma_ops->exit(host);
  2766. clk_disable_unprepare(host->ciu_clk);
  2767. if (host->cur_slot &&
  2768. (mmc_can_gpio_cd(host->cur_slot->mmc) ||
  2769. !mmc_card_is_removable(host->cur_slot->mmc)))
  2770. clk_disable_unprepare(host->biu_clk);
  2771. return 0;
  2772. }
  2773. EXPORT_SYMBOL(dw_mci_runtime_suspend);
  2774. int dw_mci_runtime_resume(struct device *dev)
  2775. {
  2776. int i, ret = 0;
  2777. struct dw_mci *host = dev_get_drvdata(dev);
  2778. if (host->cur_slot &&
  2779. (mmc_can_gpio_cd(host->cur_slot->mmc) ||
  2780. !mmc_card_is_removable(host->cur_slot->mmc))) {
  2781. ret = clk_prepare_enable(host->biu_clk);
  2782. if (ret)
  2783. return ret;
  2784. }
  2785. ret = clk_prepare_enable(host->ciu_clk);
  2786. if (ret)
  2787. goto err;
  2788. if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
  2789. clk_disable_unprepare(host->ciu_clk);
  2790. ret = -ENODEV;
  2791. goto err;
  2792. }
  2793. if (host->use_dma && host->dma_ops->init)
  2794. host->dma_ops->init(host);
  2795. /*
  2796. * Restore the initial value at FIFOTH register
  2797. * And Invalidate the prev_blksz with zero
  2798. */
  2799. mci_writel(host, FIFOTH, host->fifoth_val);
  2800. host->prev_blksz = 0;
  2801. /* Put in max timeout */
  2802. mci_writel(host, TMOUT, 0xFFFFFFFF);
  2803. mci_writel(host, RINTSTS, 0xFFFFFFFF);
  2804. mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
  2805. SDMMC_INT_TXDR | SDMMC_INT_RXDR |
  2806. DW_MCI_ERROR_FLAGS);
  2807. mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
  2808. for (i = 0; i < host->num_slots; i++) {
  2809. struct dw_mci_slot *slot = host->slot[i];
  2810. if (!slot)
  2811. continue;
  2812. if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
  2813. dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
  2814. dw_mci_setup_bus(slot, true);
  2815. }
  2816. }
  2817. /* Now that slots are all setup, we can enable card detect */
  2818. dw_mci_enable_cd(host);
  2819. return 0;
  2820. err:
  2821. if (host->cur_slot &&
  2822. (mmc_can_gpio_cd(host->cur_slot->mmc) ||
  2823. !mmc_card_is_removable(host->cur_slot->mmc)))
  2824. clk_disable_unprepare(host->biu_clk);
  2825. return ret;
  2826. }
  2827. EXPORT_SYMBOL(dw_mci_runtime_resume);
  2828. #endif /* CONFIG_PM */
  2829. static int __init dw_mci_init(void)
  2830. {
  2831. pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
  2832. return 0;
  2833. }
  2834. static void __exit dw_mci_exit(void)
  2835. {
  2836. }
  2837. module_init(dw_mci_init);
  2838. module_exit(dw_mci_exit);
  2839. MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
  2840. MODULE_AUTHOR("NXP Semiconductor VietNam");
  2841. MODULE_AUTHOR("Imagination Technologies Ltd");
  2842. MODULE_LICENSE("GPL v2");