amd_iommu.c 98 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355
  1. /*
  2. * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
  3. * Author: Joerg Roedel <joerg.roedel@amd.com>
  4. * Leo Duran <leo.duran@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/ratelimit.h>
  20. #include <linux/pci.h>
  21. #include <linux/pci-ats.h>
  22. #include <linux/bitmap.h>
  23. #include <linux/slab.h>
  24. #include <linux/debugfs.h>
  25. #include <linux/scatterlist.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/iommu-helper.h>
  28. #include <linux/iommu.h>
  29. #include <linux/delay.h>
  30. #include <linux/amd-iommu.h>
  31. #include <linux/notifier.h>
  32. #include <linux/export.h>
  33. #include <linux/irq.h>
  34. #include <linux/msi.h>
  35. #include <asm/irq_remapping.h>
  36. #include <asm/io_apic.h>
  37. #include <asm/apic.h>
  38. #include <asm/hw_irq.h>
  39. #include <asm/msidef.h>
  40. #include <asm/proto.h>
  41. #include <asm/iommu.h>
  42. #include <asm/gart.h>
  43. #include <asm/dma.h>
  44. #include "amd_iommu_proto.h"
  45. #include "amd_iommu_types.h"
  46. #include "irq_remapping.h"
  47. #include "pci.h"
  48. #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
  49. #define LOOP_TIMEOUT 100000
  50. /*
  51. * This bitmap is used to advertise the page sizes our hardware support
  52. * to the IOMMU core, which will then use this information to split
  53. * physically contiguous memory regions it is mapping into page sizes
  54. * that we support.
  55. *
  56. * 512GB Pages are not supported due to a hardware bug
  57. */
  58. #define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
  59. static DEFINE_RWLOCK(amd_iommu_devtable_lock);
  60. /* A list of preallocated protection domains */
  61. static LIST_HEAD(iommu_pd_list);
  62. static DEFINE_SPINLOCK(iommu_pd_list_lock);
  63. /* List of all available dev_data structures */
  64. static LIST_HEAD(dev_data_list);
  65. static DEFINE_SPINLOCK(dev_data_list_lock);
  66. LIST_HEAD(ioapic_map);
  67. LIST_HEAD(hpet_map);
  68. /*
  69. * Domain for untranslated devices - only allocated
  70. * if iommu=pt passed on kernel cmd line.
  71. */
  72. static struct protection_domain *pt_domain;
  73. static struct iommu_ops amd_iommu_ops;
  74. static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
  75. int amd_iommu_max_glx_val = -1;
  76. static struct dma_map_ops amd_iommu_dma_ops;
  77. /*
  78. * general struct to manage commands send to an IOMMU
  79. */
  80. struct iommu_cmd {
  81. u32 data[4];
  82. };
  83. struct kmem_cache *amd_iommu_irq_cache;
  84. static void update_domain(struct protection_domain *domain);
  85. static int __init alloc_passthrough_domain(void);
  86. /****************************************************************************
  87. *
  88. * Helper functions
  89. *
  90. ****************************************************************************/
  91. static struct iommu_dev_data *alloc_dev_data(u16 devid)
  92. {
  93. struct iommu_dev_data *dev_data;
  94. unsigned long flags;
  95. dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
  96. if (!dev_data)
  97. return NULL;
  98. dev_data->devid = devid;
  99. atomic_set(&dev_data->bind, 0);
  100. spin_lock_irqsave(&dev_data_list_lock, flags);
  101. list_add_tail(&dev_data->dev_data_list, &dev_data_list);
  102. spin_unlock_irqrestore(&dev_data_list_lock, flags);
  103. return dev_data;
  104. }
  105. static void free_dev_data(struct iommu_dev_data *dev_data)
  106. {
  107. unsigned long flags;
  108. spin_lock_irqsave(&dev_data_list_lock, flags);
  109. list_del(&dev_data->dev_data_list);
  110. spin_unlock_irqrestore(&dev_data_list_lock, flags);
  111. if (dev_data->group)
  112. iommu_group_put(dev_data->group);
  113. kfree(dev_data);
  114. }
  115. static struct iommu_dev_data *search_dev_data(u16 devid)
  116. {
  117. struct iommu_dev_data *dev_data;
  118. unsigned long flags;
  119. spin_lock_irqsave(&dev_data_list_lock, flags);
  120. list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
  121. if (dev_data->devid == devid)
  122. goto out_unlock;
  123. }
  124. dev_data = NULL;
  125. out_unlock:
  126. spin_unlock_irqrestore(&dev_data_list_lock, flags);
  127. return dev_data;
  128. }
  129. static struct iommu_dev_data *find_dev_data(u16 devid)
  130. {
  131. struct iommu_dev_data *dev_data;
  132. dev_data = search_dev_data(devid);
  133. if (dev_data == NULL)
  134. dev_data = alloc_dev_data(devid);
  135. return dev_data;
  136. }
  137. static inline u16 get_device_id(struct device *dev)
  138. {
  139. struct pci_dev *pdev = to_pci_dev(dev);
  140. return PCI_DEVID(pdev->bus->number, pdev->devfn);
  141. }
  142. static struct iommu_dev_data *get_dev_data(struct device *dev)
  143. {
  144. return dev->archdata.iommu;
  145. }
  146. static bool pci_iommuv2_capable(struct pci_dev *pdev)
  147. {
  148. static const int caps[] = {
  149. PCI_EXT_CAP_ID_ATS,
  150. PCI_EXT_CAP_ID_PRI,
  151. PCI_EXT_CAP_ID_PASID,
  152. };
  153. int i, pos;
  154. for (i = 0; i < 3; ++i) {
  155. pos = pci_find_ext_capability(pdev, caps[i]);
  156. if (pos == 0)
  157. return false;
  158. }
  159. return true;
  160. }
  161. static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
  162. {
  163. struct iommu_dev_data *dev_data;
  164. dev_data = get_dev_data(&pdev->dev);
  165. return dev_data->errata & (1 << erratum) ? true : false;
  166. }
  167. /*
  168. * In this function the list of preallocated protection domains is traversed to
  169. * find the domain for a specific device
  170. */
  171. static struct dma_ops_domain *find_protection_domain(u16 devid)
  172. {
  173. struct dma_ops_domain *entry, *ret = NULL;
  174. unsigned long flags;
  175. u16 alias = amd_iommu_alias_table[devid];
  176. if (list_empty(&iommu_pd_list))
  177. return NULL;
  178. spin_lock_irqsave(&iommu_pd_list_lock, flags);
  179. list_for_each_entry(entry, &iommu_pd_list, list) {
  180. if (entry->target_dev == devid ||
  181. entry->target_dev == alias) {
  182. ret = entry;
  183. break;
  184. }
  185. }
  186. spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
  187. return ret;
  188. }
  189. /*
  190. * This function checks if the driver got a valid device from the caller to
  191. * avoid dereferencing invalid pointers.
  192. */
  193. static bool check_device(struct device *dev)
  194. {
  195. u16 devid;
  196. if (!dev || !dev->dma_mask)
  197. return false;
  198. /* No PCI device */
  199. if (!dev_is_pci(dev))
  200. return false;
  201. devid = get_device_id(dev);
  202. /* Out of our scope? */
  203. if (devid > amd_iommu_last_bdf)
  204. return false;
  205. if (amd_iommu_rlookup_table[devid] == NULL)
  206. return false;
  207. return true;
  208. }
  209. static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
  210. {
  211. while (!bus->self) {
  212. if (!pci_is_root_bus(bus))
  213. bus = bus->parent;
  214. else
  215. return ERR_PTR(-ENODEV);
  216. }
  217. return bus;
  218. }
  219. #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
  220. static struct pci_dev *get_isolation_root(struct pci_dev *pdev)
  221. {
  222. struct pci_dev *dma_pdev = pdev;
  223. /* Account for quirked devices */
  224. swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
  225. /*
  226. * If it's a multifunction device that does not support our
  227. * required ACS flags, add to the same group as lowest numbered
  228. * function that also does not suport the required ACS flags.
  229. */
  230. if (dma_pdev->multifunction &&
  231. !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
  232. u8 i, slot = PCI_SLOT(dma_pdev->devfn);
  233. for (i = 0; i < 8; i++) {
  234. struct pci_dev *tmp;
  235. tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
  236. if (!tmp)
  237. continue;
  238. if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
  239. swap_pci_ref(&dma_pdev, tmp);
  240. break;
  241. }
  242. pci_dev_put(tmp);
  243. }
  244. }
  245. /*
  246. * Devices on the root bus go through the iommu. If that's not us,
  247. * find the next upstream device and test ACS up to the root bus.
  248. * Finding the next device may require skipping virtual buses.
  249. */
  250. while (!pci_is_root_bus(dma_pdev->bus)) {
  251. struct pci_bus *bus = find_hosted_bus(dma_pdev->bus);
  252. if (IS_ERR(bus))
  253. break;
  254. if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
  255. break;
  256. swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
  257. }
  258. return dma_pdev;
  259. }
  260. static int use_pdev_iommu_group(struct pci_dev *pdev, struct device *dev)
  261. {
  262. struct iommu_group *group = iommu_group_get(&pdev->dev);
  263. int ret;
  264. if (!group) {
  265. group = iommu_group_alloc();
  266. if (IS_ERR(group))
  267. return PTR_ERR(group);
  268. WARN_ON(&pdev->dev != dev);
  269. }
  270. ret = iommu_group_add_device(group, dev);
  271. iommu_group_put(group);
  272. return ret;
  273. }
  274. static int use_dev_data_iommu_group(struct iommu_dev_data *dev_data,
  275. struct device *dev)
  276. {
  277. if (!dev_data->group) {
  278. struct iommu_group *group = iommu_group_alloc();
  279. if (IS_ERR(group))
  280. return PTR_ERR(group);
  281. dev_data->group = group;
  282. }
  283. return iommu_group_add_device(dev_data->group, dev);
  284. }
  285. static int init_iommu_group(struct device *dev)
  286. {
  287. struct iommu_dev_data *dev_data;
  288. struct iommu_group *group;
  289. struct pci_dev *dma_pdev;
  290. int ret;
  291. group = iommu_group_get(dev);
  292. if (group) {
  293. iommu_group_put(group);
  294. return 0;
  295. }
  296. dev_data = find_dev_data(get_device_id(dev));
  297. if (!dev_data)
  298. return -ENOMEM;
  299. if (dev_data->alias_data) {
  300. u16 alias;
  301. struct pci_bus *bus;
  302. if (dev_data->alias_data->group)
  303. goto use_group;
  304. /*
  305. * If the alias device exists, it's effectively just a first
  306. * level quirk for finding the DMA source.
  307. */
  308. alias = amd_iommu_alias_table[dev_data->devid];
  309. dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
  310. if (dma_pdev) {
  311. dma_pdev = get_isolation_root(dma_pdev);
  312. goto use_pdev;
  313. }
  314. /*
  315. * If the alias is virtual, try to find a parent device
  316. * and test whether the IOMMU group is actualy rooted above
  317. * the alias. Be careful to also test the parent device if
  318. * we think the alias is the root of the group.
  319. */
  320. bus = pci_find_bus(0, alias >> 8);
  321. if (!bus)
  322. goto use_group;
  323. bus = find_hosted_bus(bus);
  324. if (IS_ERR(bus) || !bus->self)
  325. goto use_group;
  326. dma_pdev = get_isolation_root(pci_dev_get(bus->self));
  327. if (dma_pdev != bus->self || (dma_pdev->multifunction &&
  328. !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)))
  329. goto use_pdev;
  330. pci_dev_put(dma_pdev);
  331. goto use_group;
  332. }
  333. dma_pdev = get_isolation_root(pci_dev_get(to_pci_dev(dev)));
  334. use_pdev:
  335. ret = use_pdev_iommu_group(dma_pdev, dev);
  336. pci_dev_put(dma_pdev);
  337. return ret;
  338. use_group:
  339. return use_dev_data_iommu_group(dev_data->alias_data, dev);
  340. }
  341. static int iommu_init_device(struct device *dev)
  342. {
  343. struct pci_dev *pdev = to_pci_dev(dev);
  344. struct iommu_dev_data *dev_data;
  345. u16 alias;
  346. int ret;
  347. if (dev->archdata.iommu)
  348. return 0;
  349. dev_data = find_dev_data(get_device_id(dev));
  350. if (!dev_data)
  351. return -ENOMEM;
  352. alias = amd_iommu_alias_table[dev_data->devid];
  353. if (alias != dev_data->devid) {
  354. struct iommu_dev_data *alias_data;
  355. alias_data = find_dev_data(alias);
  356. if (alias_data == NULL) {
  357. pr_err("AMD-Vi: Warning: Unhandled device %s\n",
  358. dev_name(dev));
  359. free_dev_data(dev_data);
  360. return -ENOTSUPP;
  361. }
  362. dev_data->alias_data = alias_data;
  363. }
  364. ret = init_iommu_group(dev);
  365. if (ret) {
  366. free_dev_data(dev_data);
  367. return ret;
  368. }
  369. if (pci_iommuv2_capable(pdev)) {
  370. struct amd_iommu *iommu;
  371. iommu = amd_iommu_rlookup_table[dev_data->devid];
  372. dev_data->iommu_v2 = iommu->is_iommu_v2;
  373. }
  374. dev->archdata.iommu = dev_data;
  375. return 0;
  376. }
  377. static void iommu_ignore_device(struct device *dev)
  378. {
  379. u16 devid, alias;
  380. devid = get_device_id(dev);
  381. alias = amd_iommu_alias_table[devid];
  382. memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
  383. memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
  384. amd_iommu_rlookup_table[devid] = NULL;
  385. amd_iommu_rlookup_table[alias] = NULL;
  386. }
  387. static void iommu_uninit_device(struct device *dev)
  388. {
  389. iommu_group_remove_device(dev);
  390. /*
  391. * Nothing to do here - we keep dev_data around for unplugged devices
  392. * and reuse it when the device is re-plugged - not doing so would
  393. * introduce a ton of races.
  394. */
  395. }
  396. void __init amd_iommu_uninit_devices(void)
  397. {
  398. struct iommu_dev_data *dev_data, *n;
  399. struct pci_dev *pdev = NULL;
  400. for_each_pci_dev(pdev) {
  401. if (!check_device(&pdev->dev))
  402. continue;
  403. iommu_uninit_device(&pdev->dev);
  404. }
  405. /* Free all of our dev_data structures */
  406. list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list)
  407. free_dev_data(dev_data);
  408. }
  409. int __init amd_iommu_init_devices(void)
  410. {
  411. struct pci_dev *pdev = NULL;
  412. int ret = 0;
  413. for_each_pci_dev(pdev) {
  414. if (!check_device(&pdev->dev))
  415. continue;
  416. ret = iommu_init_device(&pdev->dev);
  417. if (ret == -ENOTSUPP)
  418. iommu_ignore_device(&pdev->dev);
  419. else if (ret)
  420. goto out_free;
  421. }
  422. return 0;
  423. out_free:
  424. amd_iommu_uninit_devices();
  425. return ret;
  426. }
  427. #ifdef CONFIG_AMD_IOMMU_STATS
  428. /*
  429. * Initialization code for statistics collection
  430. */
  431. DECLARE_STATS_COUNTER(compl_wait);
  432. DECLARE_STATS_COUNTER(cnt_map_single);
  433. DECLARE_STATS_COUNTER(cnt_unmap_single);
  434. DECLARE_STATS_COUNTER(cnt_map_sg);
  435. DECLARE_STATS_COUNTER(cnt_unmap_sg);
  436. DECLARE_STATS_COUNTER(cnt_alloc_coherent);
  437. DECLARE_STATS_COUNTER(cnt_free_coherent);
  438. DECLARE_STATS_COUNTER(cross_page);
  439. DECLARE_STATS_COUNTER(domain_flush_single);
  440. DECLARE_STATS_COUNTER(domain_flush_all);
  441. DECLARE_STATS_COUNTER(alloced_io_mem);
  442. DECLARE_STATS_COUNTER(total_map_requests);
  443. DECLARE_STATS_COUNTER(complete_ppr);
  444. DECLARE_STATS_COUNTER(invalidate_iotlb);
  445. DECLARE_STATS_COUNTER(invalidate_iotlb_all);
  446. DECLARE_STATS_COUNTER(pri_requests);
  447. static struct dentry *stats_dir;
  448. static struct dentry *de_fflush;
  449. static void amd_iommu_stats_add(struct __iommu_counter *cnt)
  450. {
  451. if (stats_dir == NULL)
  452. return;
  453. cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
  454. &cnt->value);
  455. }
  456. static void amd_iommu_stats_init(void)
  457. {
  458. stats_dir = debugfs_create_dir("amd-iommu", NULL);
  459. if (stats_dir == NULL)
  460. return;
  461. de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
  462. &amd_iommu_unmap_flush);
  463. amd_iommu_stats_add(&compl_wait);
  464. amd_iommu_stats_add(&cnt_map_single);
  465. amd_iommu_stats_add(&cnt_unmap_single);
  466. amd_iommu_stats_add(&cnt_map_sg);
  467. amd_iommu_stats_add(&cnt_unmap_sg);
  468. amd_iommu_stats_add(&cnt_alloc_coherent);
  469. amd_iommu_stats_add(&cnt_free_coherent);
  470. amd_iommu_stats_add(&cross_page);
  471. amd_iommu_stats_add(&domain_flush_single);
  472. amd_iommu_stats_add(&domain_flush_all);
  473. amd_iommu_stats_add(&alloced_io_mem);
  474. amd_iommu_stats_add(&total_map_requests);
  475. amd_iommu_stats_add(&complete_ppr);
  476. amd_iommu_stats_add(&invalidate_iotlb);
  477. amd_iommu_stats_add(&invalidate_iotlb_all);
  478. amd_iommu_stats_add(&pri_requests);
  479. }
  480. #endif
  481. /****************************************************************************
  482. *
  483. * Interrupt handling functions
  484. *
  485. ****************************************************************************/
  486. static void dump_dte_entry(u16 devid)
  487. {
  488. int i;
  489. for (i = 0; i < 4; ++i)
  490. pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
  491. amd_iommu_dev_table[devid].data[i]);
  492. }
  493. static void dump_command(unsigned long phys_addr)
  494. {
  495. struct iommu_cmd *cmd = phys_to_virt(phys_addr);
  496. int i;
  497. for (i = 0; i < 4; ++i)
  498. pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
  499. }
  500. static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
  501. {
  502. int type, devid, domid, flags;
  503. volatile u32 *event = __evt;
  504. int count = 0;
  505. u64 address;
  506. retry:
  507. type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
  508. devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
  509. domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
  510. flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
  511. address = (u64)(((u64)event[3]) << 32) | event[2];
  512. if (type == 0) {
  513. /* Did we hit the erratum? */
  514. if (++count == LOOP_TIMEOUT) {
  515. pr_err("AMD-Vi: No event written to event log\n");
  516. return;
  517. }
  518. udelay(1);
  519. goto retry;
  520. }
  521. printk(KERN_ERR "AMD-Vi: Event logged [");
  522. switch (type) {
  523. case EVENT_TYPE_ILL_DEV:
  524. printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
  525. "address=0x%016llx flags=0x%04x]\n",
  526. PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  527. address, flags);
  528. dump_dte_entry(devid);
  529. break;
  530. case EVENT_TYPE_IO_FAULT:
  531. printk("IO_PAGE_FAULT device=%02x:%02x.%x "
  532. "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
  533. PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  534. domid, address, flags);
  535. break;
  536. case EVENT_TYPE_DEV_TAB_ERR:
  537. printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
  538. "address=0x%016llx flags=0x%04x]\n",
  539. PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  540. address, flags);
  541. break;
  542. case EVENT_TYPE_PAGE_TAB_ERR:
  543. printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
  544. "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
  545. PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  546. domid, address, flags);
  547. break;
  548. case EVENT_TYPE_ILL_CMD:
  549. printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
  550. dump_command(address);
  551. break;
  552. case EVENT_TYPE_CMD_HARD_ERR:
  553. printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
  554. "flags=0x%04x]\n", address, flags);
  555. break;
  556. case EVENT_TYPE_IOTLB_INV_TO:
  557. printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
  558. "address=0x%016llx]\n",
  559. PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  560. address);
  561. break;
  562. case EVENT_TYPE_INV_DEV_REQ:
  563. printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
  564. "address=0x%016llx flags=0x%04x]\n",
  565. PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  566. address, flags);
  567. break;
  568. default:
  569. printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
  570. }
  571. memset(__evt, 0, 4 * sizeof(u32));
  572. }
  573. static void iommu_poll_events(struct amd_iommu *iommu)
  574. {
  575. u32 head, tail;
  576. head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
  577. tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
  578. while (head != tail) {
  579. iommu_print_event(iommu, iommu->evt_buf + head);
  580. head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
  581. }
  582. writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
  583. }
  584. static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
  585. {
  586. struct amd_iommu_fault fault;
  587. INC_STATS_COUNTER(pri_requests);
  588. if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
  589. pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
  590. return;
  591. }
  592. fault.address = raw[1];
  593. fault.pasid = PPR_PASID(raw[0]);
  594. fault.device_id = PPR_DEVID(raw[0]);
  595. fault.tag = PPR_TAG(raw[0]);
  596. fault.flags = PPR_FLAGS(raw[0]);
  597. atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
  598. }
  599. static void iommu_poll_ppr_log(struct amd_iommu *iommu)
  600. {
  601. u32 head, tail;
  602. if (iommu->ppr_log == NULL)
  603. return;
  604. head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
  605. tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
  606. while (head != tail) {
  607. volatile u64 *raw;
  608. u64 entry[2];
  609. int i;
  610. raw = (u64 *)(iommu->ppr_log + head);
  611. /*
  612. * Hardware bug: Interrupt may arrive before the entry is
  613. * written to memory. If this happens we need to wait for the
  614. * entry to arrive.
  615. */
  616. for (i = 0; i < LOOP_TIMEOUT; ++i) {
  617. if (PPR_REQ_TYPE(raw[0]) != 0)
  618. break;
  619. udelay(1);
  620. }
  621. /* Avoid memcpy function-call overhead */
  622. entry[0] = raw[0];
  623. entry[1] = raw[1];
  624. /*
  625. * To detect the hardware bug we need to clear the entry
  626. * back to zero.
  627. */
  628. raw[0] = raw[1] = 0UL;
  629. /* Update head pointer of hardware ring-buffer */
  630. head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
  631. writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
  632. /* Handle PPR entry */
  633. iommu_handle_ppr_entry(iommu, entry);
  634. /* Refresh ring-buffer information */
  635. head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
  636. tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
  637. }
  638. }
  639. irqreturn_t amd_iommu_int_thread(int irq, void *data)
  640. {
  641. struct amd_iommu *iommu = (struct amd_iommu *) data;
  642. u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
  643. while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
  644. /* Enable EVT and PPR interrupts again */
  645. writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
  646. iommu->mmio_base + MMIO_STATUS_OFFSET);
  647. if (status & MMIO_STATUS_EVT_INT_MASK) {
  648. pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
  649. iommu_poll_events(iommu);
  650. }
  651. if (status & MMIO_STATUS_PPR_INT_MASK) {
  652. pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
  653. iommu_poll_ppr_log(iommu);
  654. }
  655. /*
  656. * Hardware bug: ERBT1312
  657. * When re-enabling interrupt (by writing 1
  658. * to clear the bit), the hardware might also try to set
  659. * the interrupt bit in the event status register.
  660. * In this scenario, the bit will be set, and disable
  661. * subsequent interrupts.
  662. *
  663. * Workaround: The IOMMU driver should read back the
  664. * status register and check if the interrupt bits are cleared.
  665. * If not, driver will need to go through the interrupt handler
  666. * again and re-clear the bits
  667. */
  668. status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
  669. }
  670. return IRQ_HANDLED;
  671. }
  672. irqreturn_t amd_iommu_int_handler(int irq, void *data)
  673. {
  674. return IRQ_WAKE_THREAD;
  675. }
  676. /****************************************************************************
  677. *
  678. * IOMMU command queuing functions
  679. *
  680. ****************************************************************************/
  681. static int wait_on_sem(volatile u64 *sem)
  682. {
  683. int i = 0;
  684. while (*sem == 0 && i < LOOP_TIMEOUT) {
  685. udelay(1);
  686. i += 1;
  687. }
  688. if (i == LOOP_TIMEOUT) {
  689. pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
  690. return -EIO;
  691. }
  692. return 0;
  693. }
  694. static void copy_cmd_to_buffer(struct amd_iommu *iommu,
  695. struct iommu_cmd *cmd,
  696. u32 tail)
  697. {
  698. u8 *target;
  699. target = iommu->cmd_buf + tail;
  700. tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
  701. /* Copy command to buffer */
  702. memcpy(target, cmd, sizeof(*cmd));
  703. /* Tell the IOMMU about it */
  704. writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  705. }
  706. static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
  707. {
  708. WARN_ON(address & 0x7ULL);
  709. memset(cmd, 0, sizeof(*cmd));
  710. cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
  711. cmd->data[1] = upper_32_bits(__pa(address));
  712. cmd->data[2] = 1;
  713. CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
  714. }
  715. static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
  716. {
  717. memset(cmd, 0, sizeof(*cmd));
  718. cmd->data[0] = devid;
  719. CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
  720. }
  721. static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
  722. size_t size, u16 domid, int pde)
  723. {
  724. u64 pages;
  725. int s;
  726. pages = iommu_num_pages(address, size, PAGE_SIZE);
  727. s = 0;
  728. if (pages > 1) {
  729. /*
  730. * If we have to flush more than one page, flush all
  731. * TLB entries for this domain
  732. */
  733. address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  734. s = 1;
  735. }
  736. address &= PAGE_MASK;
  737. memset(cmd, 0, sizeof(*cmd));
  738. cmd->data[1] |= domid;
  739. cmd->data[2] = lower_32_bits(address);
  740. cmd->data[3] = upper_32_bits(address);
  741. CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
  742. if (s) /* size bit - we flush more than one 4kb page */
  743. cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  744. if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
  745. cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
  746. }
  747. static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
  748. u64 address, size_t size)
  749. {
  750. u64 pages;
  751. int s;
  752. pages = iommu_num_pages(address, size, PAGE_SIZE);
  753. s = 0;
  754. if (pages > 1) {
  755. /*
  756. * If we have to flush more than one page, flush all
  757. * TLB entries for this domain
  758. */
  759. address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  760. s = 1;
  761. }
  762. address &= PAGE_MASK;
  763. memset(cmd, 0, sizeof(*cmd));
  764. cmd->data[0] = devid;
  765. cmd->data[0] |= (qdep & 0xff) << 24;
  766. cmd->data[1] = devid;
  767. cmd->data[2] = lower_32_bits(address);
  768. cmd->data[3] = upper_32_bits(address);
  769. CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
  770. if (s)
  771. cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  772. }
  773. static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
  774. u64 address, bool size)
  775. {
  776. memset(cmd, 0, sizeof(*cmd));
  777. address &= ~(0xfffULL);
  778. cmd->data[0] = pasid;
  779. cmd->data[1] = domid;
  780. cmd->data[2] = lower_32_bits(address);
  781. cmd->data[3] = upper_32_bits(address);
  782. cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
  783. cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
  784. if (size)
  785. cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  786. CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
  787. }
  788. static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
  789. int qdep, u64 address, bool size)
  790. {
  791. memset(cmd, 0, sizeof(*cmd));
  792. address &= ~(0xfffULL);
  793. cmd->data[0] = devid;
  794. cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
  795. cmd->data[0] |= (qdep & 0xff) << 24;
  796. cmd->data[1] = devid;
  797. cmd->data[1] |= (pasid & 0xff) << 16;
  798. cmd->data[2] = lower_32_bits(address);
  799. cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
  800. cmd->data[3] = upper_32_bits(address);
  801. if (size)
  802. cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  803. CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
  804. }
  805. static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
  806. int status, int tag, bool gn)
  807. {
  808. memset(cmd, 0, sizeof(*cmd));
  809. cmd->data[0] = devid;
  810. if (gn) {
  811. cmd->data[1] = pasid;
  812. cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
  813. }
  814. cmd->data[3] = tag & 0x1ff;
  815. cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
  816. CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
  817. }
  818. static void build_inv_all(struct iommu_cmd *cmd)
  819. {
  820. memset(cmd, 0, sizeof(*cmd));
  821. CMD_SET_TYPE(cmd, CMD_INV_ALL);
  822. }
  823. static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
  824. {
  825. memset(cmd, 0, sizeof(*cmd));
  826. cmd->data[0] = devid;
  827. CMD_SET_TYPE(cmd, CMD_INV_IRT);
  828. }
  829. /*
  830. * Writes the command to the IOMMUs command buffer and informs the
  831. * hardware about the new command.
  832. */
  833. static int iommu_queue_command_sync(struct amd_iommu *iommu,
  834. struct iommu_cmd *cmd,
  835. bool sync)
  836. {
  837. u32 left, tail, head, next_tail;
  838. unsigned long flags;
  839. WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
  840. again:
  841. spin_lock_irqsave(&iommu->lock, flags);
  842. head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
  843. tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  844. next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
  845. left = (head - next_tail) % iommu->cmd_buf_size;
  846. if (left <= 2) {
  847. struct iommu_cmd sync_cmd;
  848. volatile u64 sem = 0;
  849. int ret;
  850. build_completion_wait(&sync_cmd, (u64)&sem);
  851. copy_cmd_to_buffer(iommu, &sync_cmd, tail);
  852. spin_unlock_irqrestore(&iommu->lock, flags);
  853. if ((ret = wait_on_sem(&sem)) != 0)
  854. return ret;
  855. goto again;
  856. }
  857. copy_cmd_to_buffer(iommu, cmd, tail);
  858. /* We need to sync now to make sure all commands are processed */
  859. iommu->need_sync = sync;
  860. spin_unlock_irqrestore(&iommu->lock, flags);
  861. return 0;
  862. }
  863. static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
  864. {
  865. return iommu_queue_command_sync(iommu, cmd, true);
  866. }
  867. /*
  868. * This function queues a completion wait command into the command
  869. * buffer of an IOMMU
  870. */
  871. static int iommu_completion_wait(struct amd_iommu *iommu)
  872. {
  873. struct iommu_cmd cmd;
  874. volatile u64 sem = 0;
  875. int ret;
  876. if (!iommu->need_sync)
  877. return 0;
  878. build_completion_wait(&cmd, (u64)&sem);
  879. ret = iommu_queue_command_sync(iommu, &cmd, false);
  880. if (ret)
  881. return ret;
  882. return wait_on_sem(&sem);
  883. }
  884. static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
  885. {
  886. struct iommu_cmd cmd;
  887. build_inv_dte(&cmd, devid);
  888. return iommu_queue_command(iommu, &cmd);
  889. }
  890. static void iommu_flush_dte_all(struct amd_iommu *iommu)
  891. {
  892. u32 devid;
  893. for (devid = 0; devid <= 0xffff; ++devid)
  894. iommu_flush_dte(iommu, devid);
  895. iommu_completion_wait(iommu);
  896. }
  897. /*
  898. * This function uses heavy locking and may disable irqs for some time. But
  899. * this is no issue because it is only called during resume.
  900. */
  901. static void iommu_flush_tlb_all(struct amd_iommu *iommu)
  902. {
  903. u32 dom_id;
  904. for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
  905. struct iommu_cmd cmd;
  906. build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
  907. dom_id, 1);
  908. iommu_queue_command(iommu, &cmd);
  909. }
  910. iommu_completion_wait(iommu);
  911. }
  912. static void iommu_flush_all(struct amd_iommu *iommu)
  913. {
  914. struct iommu_cmd cmd;
  915. build_inv_all(&cmd);
  916. iommu_queue_command(iommu, &cmd);
  917. iommu_completion_wait(iommu);
  918. }
  919. static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
  920. {
  921. struct iommu_cmd cmd;
  922. build_inv_irt(&cmd, devid);
  923. iommu_queue_command(iommu, &cmd);
  924. }
  925. static void iommu_flush_irt_all(struct amd_iommu *iommu)
  926. {
  927. u32 devid;
  928. for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
  929. iommu_flush_irt(iommu, devid);
  930. iommu_completion_wait(iommu);
  931. }
  932. void iommu_flush_all_caches(struct amd_iommu *iommu)
  933. {
  934. if (iommu_feature(iommu, FEATURE_IA)) {
  935. iommu_flush_all(iommu);
  936. } else {
  937. iommu_flush_dte_all(iommu);
  938. iommu_flush_irt_all(iommu);
  939. iommu_flush_tlb_all(iommu);
  940. }
  941. }
  942. /*
  943. * Command send function for flushing on-device TLB
  944. */
  945. static int device_flush_iotlb(struct iommu_dev_data *dev_data,
  946. u64 address, size_t size)
  947. {
  948. struct amd_iommu *iommu;
  949. struct iommu_cmd cmd;
  950. int qdep;
  951. qdep = dev_data->ats.qdep;
  952. iommu = amd_iommu_rlookup_table[dev_data->devid];
  953. build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
  954. return iommu_queue_command(iommu, &cmd);
  955. }
  956. /*
  957. * Command send function for invalidating a device table entry
  958. */
  959. static int device_flush_dte(struct iommu_dev_data *dev_data)
  960. {
  961. struct amd_iommu *iommu;
  962. int ret;
  963. iommu = amd_iommu_rlookup_table[dev_data->devid];
  964. ret = iommu_flush_dte(iommu, dev_data->devid);
  965. if (ret)
  966. return ret;
  967. if (dev_data->ats.enabled)
  968. ret = device_flush_iotlb(dev_data, 0, ~0UL);
  969. return ret;
  970. }
  971. /*
  972. * TLB invalidation function which is called from the mapping functions.
  973. * It invalidates a single PTE if the range to flush is within a single
  974. * page. Otherwise it flushes the whole TLB of the IOMMU.
  975. */
  976. static void __domain_flush_pages(struct protection_domain *domain,
  977. u64 address, size_t size, int pde)
  978. {
  979. struct iommu_dev_data *dev_data;
  980. struct iommu_cmd cmd;
  981. int ret = 0, i;
  982. build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
  983. for (i = 0; i < amd_iommus_present; ++i) {
  984. if (!domain->dev_iommu[i])
  985. continue;
  986. /*
  987. * Devices of this domain are behind this IOMMU
  988. * We need a TLB flush
  989. */
  990. ret |= iommu_queue_command(amd_iommus[i], &cmd);
  991. }
  992. list_for_each_entry(dev_data, &domain->dev_list, list) {
  993. if (!dev_data->ats.enabled)
  994. continue;
  995. ret |= device_flush_iotlb(dev_data, address, size);
  996. }
  997. WARN_ON(ret);
  998. }
  999. static void domain_flush_pages(struct protection_domain *domain,
  1000. u64 address, size_t size)
  1001. {
  1002. __domain_flush_pages(domain, address, size, 0);
  1003. }
  1004. /* Flush the whole IO/TLB for a given protection domain */
  1005. static void domain_flush_tlb(struct protection_domain *domain)
  1006. {
  1007. __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
  1008. }
  1009. /* Flush the whole IO/TLB for a given protection domain - including PDE */
  1010. static void domain_flush_tlb_pde(struct protection_domain *domain)
  1011. {
  1012. __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
  1013. }
  1014. static void domain_flush_complete(struct protection_domain *domain)
  1015. {
  1016. int i;
  1017. for (i = 0; i < amd_iommus_present; ++i) {
  1018. if (!domain->dev_iommu[i])
  1019. continue;
  1020. /*
  1021. * Devices of this domain are behind this IOMMU
  1022. * We need to wait for completion of all commands.
  1023. */
  1024. iommu_completion_wait(amd_iommus[i]);
  1025. }
  1026. }
  1027. /*
  1028. * This function flushes the DTEs for all devices in domain
  1029. */
  1030. static void domain_flush_devices(struct protection_domain *domain)
  1031. {
  1032. struct iommu_dev_data *dev_data;
  1033. list_for_each_entry(dev_data, &domain->dev_list, list)
  1034. device_flush_dte(dev_data);
  1035. }
  1036. /****************************************************************************
  1037. *
  1038. * The functions below are used the create the page table mappings for
  1039. * unity mapped regions.
  1040. *
  1041. ****************************************************************************/
  1042. /*
  1043. * This function is used to add another level to an IO page table. Adding
  1044. * another level increases the size of the address space by 9 bits to a size up
  1045. * to 64 bits.
  1046. */
  1047. static bool increase_address_space(struct protection_domain *domain,
  1048. gfp_t gfp)
  1049. {
  1050. u64 *pte;
  1051. if (domain->mode == PAGE_MODE_6_LEVEL)
  1052. /* address space already 64 bit large */
  1053. return false;
  1054. pte = (void *)get_zeroed_page(gfp);
  1055. if (!pte)
  1056. return false;
  1057. *pte = PM_LEVEL_PDE(domain->mode,
  1058. virt_to_phys(domain->pt_root));
  1059. domain->pt_root = pte;
  1060. domain->mode += 1;
  1061. domain->updated = true;
  1062. return true;
  1063. }
  1064. static u64 *alloc_pte(struct protection_domain *domain,
  1065. unsigned long address,
  1066. unsigned long page_size,
  1067. u64 **pte_page,
  1068. gfp_t gfp)
  1069. {
  1070. int level, end_lvl;
  1071. u64 *pte, *page;
  1072. BUG_ON(!is_power_of_2(page_size));
  1073. while (address > PM_LEVEL_SIZE(domain->mode))
  1074. increase_address_space(domain, gfp);
  1075. level = domain->mode - 1;
  1076. pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
  1077. address = PAGE_SIZE_ALIGN(address, page_size);
  1078. end_lvl = PAGE_SIZE_LEVEL(page_size);
  1079. while (level > end_lvl) {
  1080. if (!IOMMU_PTE_PRESENT(*pte)) {
  1081. page = (u64 *)get_zeroed_page(gfp);
  1082. if (!page)
  1083. return NULL;
  1084. *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
  1085. }
  1086. /* No level skipping support yet */
  1087. if (PM_PTE_LEVEL(*pte) != level)
  1088. return NULL;
  1089. level -= 1;
  1090. pte = IOMMU_PTE_PAGE(*pte);
  1091. if (pte_page && level == end_lvl)
  1092. *pte_page = pte;
  1093. pte = &pte[PM_LEVEL_INDEX(level, address)];
  1094. }
  1095. return pte;
  1096. }
  1097. /*
  1098. * This function checks if there is a PTE for a given dma address. If
  1099. * there is one, it returns the pointer to it.
  1100. */
  1101. static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
  1102. {
  1103. int level;
  1104. u64 *pte;
  1105. if (address > PM_LEVEL_SIZE(domain->mode))
  1106. return NULL;
  1107. level = domain->mode - 1;
  1108. pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
  1109. while (level > 0) {
  1110. /* Not Present */
  1111. if (!IOMMU_PTE_PRESENT(*pte))
  1112. return NULL;
  1113. /* Large PTE */
  1114. if (PM_PTE_LEVEL(*pte) == 0x07) {
  1115. unsigned long pte_mask, __pte;
  1116. /*
  1117. * If we have a series of large PTEs, make
  1118. * sure to return a pointer to the first one.
  1119. */
  1120. pte_mask = PTE_PAGE_SIZE(*pte);
  1121. pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
  1122. __pte = ((unsigned long)pte) & pte_mask;
  1123. return (u64 *)__pte;
  1124. }
  1125. /* No level skipping support yet */
  1126. if (PM_PTE_LEVEL(*pte) != level)
  1127. return NULL;
  1128. level -= 1;
  1129. /* Walk to the next level */
  1130. pte = IOMMU_PTE_PAGE(*pte);
  1131. pte = &pte[PM_LEVEL_INDEX(level, address)];
  1132. }
  1133. return pte;
  1134. }
  1135. /*
  1136. * Generic mapping functions. It maps a physical address into a DMA
  1137. * address space. It allocates the page table pages if necessary.
  1138. * In the future it can be extended to a generic mapping function
  1139. * supporting all features of AMD IOMMU page tables like level skipping
  1140. * and full 64 bit address spaces.
  1141. */
  1142. static int iommu_map_page(struct protection_domain *dom,
  1143. unsigned long bus_addr,
  1144. unsigned long phys_addr,
  1145. int prot,
  1146. unsigned long page_size)
  1147. {
  1148. u64 __pte, *pte;
  1149. int i, count;
  1150. if (!(prot & IOMMU_PROT_MASK))
  1151. return -EINVAL;
  1152. bus_addr = PAGE_ALIGN(bus_addr);
  1153. phys_addr = PAGE_ALIGN(phys_addr);
  1154. count = PAGE_SIZE_PTE_COUNT(page_size);
  1155. pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
  1156. for (i = 0; i < count; ++i)
  1157. if (IOMMU_PTE_PRESENT(pte[i]))
  1158. return -EBUSY;
  1159. if (page_size > PAGE_SIZE) {
  1160. __pte = PAGE_SIZE_PTE(phys_addr, page_size);
  1161. __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
  1162. } else
  1163. __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
  1164. if (prot & IOMMU_PROT_IR)
  1165. __pte |= IOMMU_PTE_IR;
  1166. if (prot & IOMMU_PROT_IW)
  1167. __pte |= IOMMU_PTE_IW;
  1168. for (i = 0; i < count; ++i)
  1169. pte[i] = __pte;
  1170. update_domain(dom);
  1171. return 0;
  1172. }
  1173. static unsigned long iommu_unmap_page(struct protection_domain *dom,
  1174. unsigned long bus_addr,
  1175. unsigned long page_size)
  1176. {
  1177. unsigned long long unmap_size, unmapped;
  1178. u64 *pte;
  1179. BUG_ON(!is_power_of_2(page_size));
  1180. unmapped = 0;
  1181. while (unmapped < page_size) {
  1182. pte = fetch_pte(dom, bus_addr);
  1183. if (!pte) {
  1184. /*
  1185. * No PTE for this address
  1186. * move forward in 4kb steps
  1187. */
  1188. unmap_size = PAGE_SIZE;
  1189. } else if (PM_PTE_LEVEL(*pte) == 0) {
  1190. /* 4kb PTE found for this address */
  1191. unmap_size = PAGE_SIZE;
  1192. *pte = 0ULL;
  1193. } else {
  1194. int count, i;
  1195. /* Large PTE found which maps this address */
  1196. unmap_size = PTE_PAGE_SIZE(*pte);
  1197. /* Only unmap from the first pte in the page */
  1198. if ((unmap_size - 1) & bus_addr)
  1199. break;
  1200. count = PAGE_SIZE_PTE_COUNT(unmap_size);
  1201. for (i = 0; i < count; i++)
  1202. pte[i] = 0ULL;
  1203. }
  1204. bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size;
  1205. unmapped += unmap_size;
  1206. }
  1207. BUG_ON(unmapped && !is_power_of_2(unmapped));
  1208. return unmapped;
  1209. }
  1210. /*
  1211. * This function checks if a specific unity mapping entry is needed for
  1212. * this specific IOMMU.
  1213. */
  1214. static int iommu_for_unity_map(struct amd_iommu *iommu,
  1215. struct unity_map_entry *entry)
  1216. {
  1217. u16 bdf, i;
  1218. for (i = entry->devid_start; i <= entry->devid_end; ++i) {
  1219. bdf = amd_iommu_alias_table[i];
  1220. if (amd_iommu_rlookup_table[bdf] == iommu)
  1221. return 1;
  1222. }
  1223. return 0;
  1224. }
  1225. /*
  1226. * This function actually applies the mapping to the page table of the
  1227. * dma_ops domain.
  1228. */
  1229. static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
  1230. struct unity_map_entry *e)
  1231. {
  1232. u64 addr;
  1233. int ret;
  1234. for (addr = e->address_start; addr < e->address_end;
  1235. addr += PAGE_SIZE) {
  1236. ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
  1237. PAGE_SIZE);
  1238. if (ret)
  1239. return ret;
  1240. /*
  1241. * if unity mapping is in aperture range mark the page
  1242. * as allocated in the aperture
  1243. */
  1244. if (addr < dma_dom->aperture_size)
  1245. __set_bit(addr >> PAGE_SHIFT,
  1246. dma_dom->aperture[0]->bitmap);
  1247. }
  1248. return 0;
  1249. }
  1250. /*
  1251. * Init the unity mappings for a specific IOMMU in the system
  1252. *
  1253. * Basically iterates over all unity mapping entries and applies them to
  1254. * the default domain DMA of that IOMMU if necessary.
  1255. */
  1256. static int iommu_init_unity_mappings(struct amd_iommu *iommu)
  1257. {
  1258. struct unity_map_entry *entry;
  1259. int ret;
  1260. list_for_each_entry(entry, &amd_iommu_unity_map, list) {
  1261. if (!iommu_for_unity_map(iommu, entry))
  1262. continue;
  1263. ret = dma_ops_unity_map(iommu->default_dom, entry);
  1264. if (ret)
  1265. return ret;
  1266. }
  1267. return 0;
  1268. }
  1269. /*
  1270. * Inits the unity mappings required for a specific device
  1271. */
  1272. static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
  1273. u16 devid)
  1274. {
  1275. struct unity_map_entry *e;
  1276. int ret;
  1277. list_for_each_entry(e, &amd_iommu_unity_map, list) {
  1278. if (!(devid >= e->devid_start && devid <= e->devid_end))
  1279. continue;
  1280. ret = dma_ops_unity_map(dma_dom, e);
  1281. if (ret)
  1282. return ret;
  1283. }
  1284. return 0;
  1285. }
  1286. /****************************************************************************
  1287. *
  1288. * The next functions belong to the address allocator for the dma_ops
  1289. * interface functions. They work like the allocators in the other IOMMU
  1290. * drivers. Its basically a bitmap which marks the allocated pages in
  1291. * the aperture. Maybe it could be enhanced in the future to a more
  1292. * efficient allocator.
  1293. *
  1294. ****************************************************************************/
  1295. /*
  1296. * The address allocator core functions.
  1297. *
  1298. * called with domain->lock held
  1299. */
  1300. /*
  1301. * Used to reserve address ranges in the aperture (e.g. for exclusion
  1302. * ranges.
  1303. */
  1304. static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
  1305. unsigned long start_page,
  1306. unsigned int pages)
  1307. {
  1308. unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
  1309. if (start_page + pages > last_page)
  1310. pages = last_page - start_page;
  1311. for (i = start_page; i < start_page + pages; ++i) {
  1312. int index = i / APERTURE_RANGE_PAGES;
  1313. int page = i % APERTURE_RANGE_PAGES;
  1314. __set_bit(page, dom->aperture[index]->bitmap);
  1315. }
  1316. }
  1317. /*
  1318. * This function is used to add a new aperture range to an existing
  1319. * aperture in case of dma_ops domain allocation or address allocation
  1320. * failure.
  1321. */
  1322. static int alloc_new_range(struct dma_ops_domain *dma_dom,
  1323. bool populate, gfp_t gfp)
  1324. {
  1325. int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
  1326. struct amd_iommu *iommu;
  1327. unsigned long i, old_size;
  1328. #ifdef CONFIG_IOMMU_STRESS
  1329. populate = false;
  1330. #endif
  1331. if (index >= APERTURE_MAX_RANGES)
  1332. return -ENOMEM;
  1333. dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
  1334. if (!dma_dom->aperture[index])
  1335. return -ENOMEM;
  1336. dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
  1337. if (!dma_dom->aperture[index]->bitmap)
  1338. goto out_free;
  1339. dma_dom->aperture[index]->offset = dma_dom->aperture_size;
  1340. if (populate) {
  1341. unsigned long address = dma_dom->aperture_size;
  1342. int i, num_ptes = APERTURE_RANGE_PAGES / 512;
  1343. u64 *pte, *pte_page;
  1344. for (i = 0; i < num_ptes; ++i) {
  1345. pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
  1346. &pte_page, gfp);
  1347. if (!pte)
  1348. goto out_free;
  1349. dma_dom->aperture[index]->pte_pages[i] = pte_page;
  1350. address += APERTURE_RANGE_SIZE / 64;
  1351. }
  1352. }
  1353. old_size = dma_dom->aperture_size;
  1354. dma_dom->aperture_size += APERTURE_RANGE_SIZE;
  1355. /* Reserve address range used for MSI messages */
  1356. if (old_size < MSI_ADDR_BASE_LO &&
  1357. dma_dom->aperture_size > MSI_ADDR_BASE_LO) {
  1358. unsigned long spage;
  1359. int pages;
  1360. pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE);
  1361. spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT;
  1362. dma_ops_reserve_addresses(dma_dom, spage, pages);
  1363. }
  1364. /* Initialize the exclusion range if necessary */
  1365. for_each_iommu(iommu) {
  1366. if (iommu->exclusion_start &&
  1367. iommu->exclusion_start >= dma_dom->aperture[index]->offset
  1368. && iommu->exclusion_start < dma_dom->aperture_size) {
  1369. unsigned long startpage;
  1370. int pages = iommu_num_pages(iommu->exclusion_start,
  1371. iommu->exclusion_length,
  1372. PAGE_SIZE);
  1373. startpage = iommu->exclusion_start >> PAGE_SHIFT;
  1374. dma_ops_reserve_addresses(dma_dom, startpage, pages);
  1375. }
  1376. }
  1377. /*
  1378. * Check for areas already mapped as present in the new aperture
  1379. * range and mark those pages as reserved in the allocator. Such
  1380. * mappings may already exist as a result of requested unity
  1381. * mappings for devices.
  1382. */
  1383. for (i = dma_dom->aperture[index]->offset;
  1384. i < dma_dom->aperture_size;
  1385. i += PAGE_SIZE) {
  1386. u64 *pte = fetch_pte(&dma_dom->domain, i);
  1387. if (!pte || !IOMMU_PTE_PRESENT(*pte))
  1388. continue;
  1389. dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
  1390. }
  1391. update_domain(&dma_dom->domain);
  1392. return 0;
  1393. out_free:
  1394. update_domain(&dma_dom->domain);
  1395. free_page((unsigned long)dma_dom->aperture[index]->bitmap);
  1396. kfree(dma_dom->aperture[index]);
  1397. dma_dom->aperture[index] = NULL;
  1398. return -ENOMEM;
  1399. }
  1400. static unsigned long dma_ops_area_alloc(struct device *dev,
  1401. struct dma_ops_domain *dom,
  1402. unsigned int pages,
  1403. unsigned long align_mask,
  1404. u64 dma_mask,
  1405. unsigned long start)
  1406. {
  1407. unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
  1408. int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
  1409. int i = start >> APERTURE_RANGE_SHIFT;
  1410. unsigned long boundary_size;
  1411. unsigned long address = -1;
  1412. unsigned long limit;
  1413. next_bit >>= PAGE_SHIFT;
  1414. boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
  1415. PAGE_SIZE) >> PAGE_SHIFT;
  1416. for (;i < max_index; ++i) {
  1417. unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
  1418. if (dom->aperture[i]->offset >= dma_mask)
  1419. break;
  1420. limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
  1421. dma_mask >> PAGE_SHIFT);
  1422. address = iommu_area_alloc(dom->aperture[i]->bitmap,
  1423. limit, next_bit, pages, 0,
  1424. boundary_size, align_mask);
  1425. if (address != -1) {
  1426. address = dom->aperture[i]->offset +
  1427. (address << PAGE_SHIFT);
  1428. dom->next_address = address + (pages << PAGE_SHIFT);
  1429. break;
  1430. }
  1431. next_bit = 0;
  1432. }
  1433. return address;
  1434. }
  1435. static unsigned long dma_ops_alloc_addresses(struct device *dev,
  1436. struct dma_ops_domain *dom,
  1437. unsigned int pages,
  1438. unsigned long align_mask,
  1439. u64 dma_mask)
  1440. {
  1441. unsigned long address;
  1442. #ifdef CONFIG_IOMMU_STRESS
  1443. dom->next_address = 0;
  1444. dom->need_flush = true;
  1445. #endif
  1446. address = dma_ops_area_alloc(dev, dom, pages, align_mask,
  1447. dma_mask, dom->next_address);
  1448. if (address == -1) {
  1449. dom->next_address = 0;
  1450. address = dma_ops_area_alloc(dev, dom, pages, align_mask,
  1451. dma_mask, 0);
  1452. dom->need_flush = true;
  1453. }
  1454. if (unlikely(address == -1))
  1455. address = DMA_ERROR_CODE;
  1456. WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
  1457. return address;
  1458. }
  1459. /*
  1460. * The address free function.
  1461. *
  1462. * called with domain->lock held
  1463. */
  1464. static void dma_ops_free_addresses(struct dma_ops_domain *dom,
  1465. unsigned long address,
  1466. unsigned int pages)
  1467. {
  1468. unsigned i = address >> APERTURE_RANGE_SHIFT;
  1469. struct aperture_range *range = dom->aperture[i];
  1470. BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
  1471. #ifdef CONFIG_IOMMU_STRESS
  1472. if (i < 4)
  1473. return;
  1474. #endif
  1475. if (address >= dom->next_address)
  1476. dom->need_flush = true;
  1477. address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
  1478. bitmap_clear(range->bitmap, address, pages);
  1479. }
  1480. /****************************************************************************
  1481. *
  1482. * The next functions belong to the domain allocation. A domain is
  1483. * allocated for every IOMMU as the default domain. If device isolation
  1484. * is enabled, every device get its own domain. The most important thing
  1485. * about domains is the page table mapping the DMA address space they
  1486. * contain.
  1487. *
  1488. ****************************************************************************/
  1489. /*
  1490. * This function adds a protection domain to the global protection domain list
  1491. */
  1492. static void add_domain_to_list(struct protection_domain *domain)
  1493. {
  1494. unsigned long flags;
  1495. spin_lock_irqsave(&amd_iommu_pd_lock, flags);
  1496. list_add(&domain->list, &amd_iommu_pd_list);
  1497. spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
  1498. }
  1499. /*
  1500. * This function removes a protection domain to the global
  1501. * protection domain list
  1502. */
  1503. static void del_domain_from_list(struct protection_domain *domain)
  1504. {
  1505. unsigned long flags;
  1506. spin_lock_irqsave(&amd_iommu_pd_lock, flags);
  1507. list_del(&domain->list);
  1508. spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
  1509. }
  1510. static u16 domain_id_alloc(void)
  1511. {
  1512. unsigned long flags;
  1513. int id;
  1514. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  1515. id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
  1516. BUG_ON(id == 0);
  1517. if (id > 0 && id < MAX_DOMAIN_ID)
  1518. __set_bit(id, amd_iommu_pd_alloc_bitmap);
  1519. else
  1520. id = 0;
  1521. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  1522. return id;
  1523. }
  1524. static void domain_id_free(int id)
  1525. {
  1526. unsigned long flags;
  1527. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  1528. if (id > 0 && id < MAX_DOMAIN_ID)
  1529. __clear_bit(id, amd_iommu_pd_alloc_bitmap);
  1530. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  1531. }
  1532. #define DEFINE_FREE_PT_FN(LVL, FN) \
  1533. static void free_pt_##LVL (unsigned long __pt) \
  1534. { \
  1535. unsigned long p; \
  1536. u64 *pt; \
  1537. int i; \
  1538. \
  1539. pt = (u64 *)__pt; \
  1540. \
  1541. for (i = 0; i < 512; ++i) { \
  1542. if (!IOMMU_PTE_PRESENT(pt[i])) \
  1543. continue; \
  1544. \
  1545. p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
  1546. FN(p); \
  1547. } \
  1548. free_page((unsigned long)pt); \
  1549. }
  1550. DEFINE_FREE_PT_FN(l2, free_page)
  1551. DEFINE_FREE_PT_FN(l3, free_pt_l2)
  1552. DEFINE_FREE_PT_FN(l4, free_pt_l3)
  1553. DEFINE_FREE_PT_FN(l5, free_pt_l4)
  1554. DEFINE_FREE_PT_FN(l6, free_pt_l5)
  1555. static void free_pagetable(struct protection_domain *domain)
  1556. {
  1557. unsigned long root = (unsigned long)domain->pt_root;
  1558. switch (domain->mode) {
  1559. case PAGE_MODE_NONE:
  1560. break;
  1561. case PAGE_MODE_1_LEVEL:
  1562. free_page(root);
  1563. break;
  1564. case PAGE_MODE_2_LEVEL:
  1565. free_pt_l2(root);
  1566. break;
  1567. case PAGE_MODE_3_LEVEL:
  1568. free_pt_l3(root);
  1569. break;
  1570. case PAGE_MODE_4_LEVEL:
  1571. free_pt_l4(root);
  1572. break;
  1573. case PAGE_MODE_5_LEVEL:
  1574. free_pt_l5(root);
  1575. break;
  1576. case PAGE_MODE_6_LEVEL:
  1577. free_pt_l6(root);
  1578. break;
  1579. default:
  1580. BUG();
  1581. }
  1582. }
  1583. static void free_gcr3_tbl_level1(u64 *tbl)
  1584. {
  1585. u64 *ptr;
  1586. int i;
  1587. for (i = 0; i < 512; ++i) {
  1588. if (!(tbl[i] & GCR3_VALID))
  1589. continue;
  1590. ptr = __va(tbl[i] & PAGE_MASK);
  1591. free_page((unsigned long)ptr);
  1592. }
  1593. }
  1594. static void free_gcr3_tbl_level2(u64 *tbl)
  1595. {
  1596. u64 *ptr;
  1597. int i;
  1598. for (i = 0; i < 512; ++i) {
  1599. if (!(tbl[i] & GCR3_VALID))
  1600. continue;
  1601. ptr = __va(tbl[i] & PAGE_MASK);
  1602. free_gcr3_tbl_level1(ptr);
  1603. }
  1604. }
  1605. static void free_gcr3_table(struct protection_domain *domain)
  1606. {
  1607. if (domain->glx == 2)
  1608. free_gcr3_tbl_level2(domain->gcr3_tbl);
  1609. else if (domain->glx == 1)
  1610. free_gcr3_tbl_level1(domain->gcr3_tbl);
  1611. else if (domain->glx != 0)
  1612. BUG();
  1613. free_page((unsigned long)domain->gcr3_tbl);
  1614. }
  1615. /*
  1616. * Free a domain, only used if something went wrong in the
  1617. * allocation path and we need to free an already allocated page table
  1618. */
  1619. static void dma_ops_domain_free(struct dma_ops_domain *dom)
  1620. {
  1621. int i;
  1622. if (!dom)
  1623. return;
  1624. del_domain_from_list(&dom->domain);
  1625. free_pagetable(&dom->domain);
  1626. for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
  1627. if (!dom->aperture[i])
  1628. continue;
  1629. free_page((unsigned long)dom->aperture[i]->bitmap);
  1630. kfree(dom->aperture[i]);
  1631. }
  1632. kfree(dom);
  1633. }
  1634. /*
  1635. * Allocates a new protection domain usable for the dma_ops functions.
  1636. * It also initializes the page table and the address allocator data
  1637. * structures required for the dma_ops interface
  1638. */
  1639. static struct dma_ops_domain *dma_ops_domain_alloc(void)
  1640. {
  1641. struct dma_ops_domain *dma_dom;
  1642. dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
  1643. if (!dma_dom)
  1644. return NULL;
  1645. spin_lock_init(&dma_dom->domain.lock);
  1646. dma_dom->domain.id = domain_id_alloc();
  1647. if (dma_dom->domain.id == 0)
  1648. goto free_dma_dom;
  1649. INIT_LIST_HEAD(&dma_dom->domain.dev_list);
  1650. dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
  1651. dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
  1652. dma_dom->domain.flags = PD_DMA_OPS_MASK;
  1653. dma_dom->domain.priv = dma_dom;
  1654. if (!dma_dom->domain.pt_root)
  1655. goto free_dma_dom;
  1656. dma_dom->need_flush = false;
  1657. dma_dom->target_dev = 0xffff;
  1658. add_domain_to_list(&dma_dom->domain);
  1659. if (alloc_new_range(dma_dom, true, GFP_KERNEL))
  1660. goto free_dma_dom;
  1661. /*
  1662. * mark the first page as allocated so we never return 0 as
  1663. * a valid dma-address. So we can use 0 as error value
  1664. */
  1665. dma_dom->aperture[0]->bitmap[0] = 1;
  1666. dma_dom->next_address = 0;
  1667. return dma_dom;
  1668. free_dma_dom:
  1669. dma_ops_domain_free(dma_dom);
  1670. return NULL;
  1671. }
  1672. /*
  1673. * little helper function to check whether a given protection domain is a
  1674. * dma_ops domain
  1675. */
  1676. static bool dma_ops_domain(struct protection_domain *domain)
  1677. {
  1678. return domain->flags & PD_DMA_OPS_MASK;
  1679. }
  1680. static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
  1681. {
  1682. u64 pte_root = 0;
  1683. u64 flags = 0;
  1684. if (domain->mode != PAGE_MODE_NONE)
  1685. pte_root = virt_to_phys(domain->pt_root);
  1686. pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
  1687. << DEV_ENTRY_MODE_SHIFT;
  1688. pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
  1689. flags = amd_iommu_dev_table[devid].data[1];
  1690. if (ats)
  1691. flags |= DTE_FLAG_IOTLB;
  1692. if (domain->flags & PD_IOMMUV2_MASK) {
  1693. u64 gcr3 = __pa(domain->gcr3_tbl);
  1694. u64 glx = domain->glx;
  1695. u64 tmp;
  1696. pte_root |= DTE_FLAG_GV;
  1697. pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
  1698. /* First mask out possible old values for GCR3 table */
  1699. tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
  1700. flags &= ~tmp;
  1701. tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
  1702. flags &= ~tmp;
  1703. /* Encode GCR3 table into DTE */
  1704. tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
  1705. pte_root |= tmp;
  1706. tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
  1707. flags |= tmp;
  1708. tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
  1709. flags |= tmp;
  1710. }
  1711. flags &= ~(0xffffUL);
  1712. flags |= domain->id;
  1713. amd_iommu_dev_table[devid].data[1] = flags;
  1714. amd_iommu_dev_table[devid].data[0] = pte_root;
  1715. }
  1716. static void clear_dte_entry(u16 devid)
  1717. {
  1718. /* remove entry from the device table seen by the hardware */
  1719. amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
  1720. amd_iommu_dev_table[devid].data[1] = 0;
  1721. amd_iommu_apply_erratum_63(devid);
  1722. }
  1723. static void do_attach(struct iommu_dev_data *dev_data,
  1724. struct protection_domain *domain)
  1725. {
  1726. struct amd_iommu *iommu;
  1727. bool ats;
  1728. iommu = amd_iommu_rlookup_table[dev_data->devid];
  1729. ats = dev_data->ats.enabled;
  1730. /* Update data structures */
  1731. dev_data->domain = domain;
  1732. list_add(&dev_data->list, &domain->dev_list);
  1733. set_dte_entry(dev_data->devid, domain, ats);
  1734. /* Do reference counting */
  1735. domain->dev_iommu[iommu->index] += 1;
  1736. domain->dev_cnt += 1;
  1737. /* Flush the DTE entry */
  1738. device_flush_dte(dev_data);
  1739. }
  1740. static void do_detach(struct iommu_dev_data *dev_data)
  1741. {
  1742. struct amd_iommu *iommu;
  1743. iommu = amd_iommu_rlookup_table[dev_data->devid];
  1744. /* decrease reference counters */
  1745. dev_data->domain->dev_iommu[iommu->index] -= 1;
  1746. dev_data->domain->dev_cnt -= 1;
  1747. /* Update data structures */
  1748. dev_data->domain = NULL;
  1749. list_del(&dev_data->list);
  1750. clear_dte_entry(dev_data->devid);
  1751. /* Flush the DTE entry */
  1752. device_flush_dte(dev_data);
  1753. }
  1754. /*
  1755. * If a device is not yet associated with a domain, this function does
  1756. * assigns it visible for the hardware
  1757. */
  1758. static int __attach_device(struct iommu_dev_data *dev_data,
  1759. struct protection_domain *domain)
  1760. {
  1761. int ret;
  1762. /* lock domain */
  1763. spin_lock(&domain->lock);
  1764. if (dev_data->alias_data != NULL) {
  1765. struct iommu_dev_data *alias_data = dev_data->alias_data;
  1766. /* Some sanity checks */
  1767. ret = -EBUSY;
  1768. if (alias_data->domain != NULL &&
  1769. alias_data->domain != domain)
  1770. goto out_unlock;
  1771. if (dev_data->domain != NULL &&
  1772. dev_data->domain != domain)
  1773. goto out_unlock;
  1774. /* Do real assignment */
  1775. if (alias_data->domain == NULL)
  1776. do_attach(alias_data, domain);
  1777. atomic_inc(&alias_data->bind);
  1778. }
  1779. if (dev_data->domain == NULL)
  1780. do_attach(dev_data, domain);
  1781. atomic_inc(&dev_data->bind);
  1782. ret = 0;
  1783. out_unlock:
  1784. /* ready */
  1785. spin_unlock(&domain->lock);
  1786. return ret;
  1787. }
  1788. static void pdev_iommuv2_disable(struct pci_dev *pdev)
  1789. {
  1790. pci_disable_ats(pdev);
  1791. pci_disable_pri(pdev);
  1792. pci_disable_pasid(pdev);
  1793. }
  1794. /* FIXME: Change generic reset-function to do the same */
  1795. static int pri_reset_while_enabled(struct pci_dev *pdev)
  1796. {
  1797. u16 control;
  1798. int pos;
  1799. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
  1800. if (!pos)
  1801. return -EINVAL;
  1802. pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
  1803. control |= PCI_PRI_CTRL_RESET;
  1804. pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
  1805. return 0;
  1806. }
  1807. static int pdev_iommuv2_enable(struct pci_dev *pdev)
  1808. {
  1809. bool reset_enable;
  1810. int reqs, ret;
  1811. /* FIXME: Hardcode number of outstanding requests for now */
  1812. reqs = 32;
  1813. if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
  1814. reqs = 1;
  1815. reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
  1816. /* Only allow access to user-accessible pages */
  1817. ret = pci_enable_pasid(pdev, 0);
  1818. if (ret)
  1819. goto out_err;
  1820. /* First reset the PRI state of the device */
  1821. ret = pci_reset_pri(pdev);
  1822. if (ret)
  1823. goto out_err;
  1824. /* Enable PRI */
  1825. ret = pci_enable_pri(pdev, reqs);
  1826. if (ret)
  1827. goto out_err;
  1828. if (reset_enable) {
  1829. ret = pri_reset_while_enabled(pdev);
  1830. if (ret)
  1831. goto out_err;
  1832. }
  1833. ret = pci_enable_ats(pdev, PAGE_SHIFT);
  1834. if (ret)
  1835. goto out_err;
  1836. return 0;
  1837. out_err:
  1838. pci_disable_pri(pdev);
  1839. pci_disable_pasid(pdev);
  1840. return ret;
  1841. }
  1842. /* FIXME: Move this to PCI code */
  1843. #define PCI_PRI_TLP_OFF (1 << 15)
  1844. static bool pci_pri_tlp_required(struct pci_dev *pdev)
  1845. {
  1846. u16 status;
  1847. int pos;
  1848. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
  1849. if (!pos)
  1850. return false;
  1851. pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
  1852. return (status & PCI_PRI_TLP_OFF) ? true : false;
  1853. }
  1854. /*
  1855. * If a device is not yet associated with a domain, this function
  1856. * assigns it visible for the hardware
  1857. */
  1858. static int attach_device(struct device *dev,
  1859. struct protection_domain *domain)
  1860. {
  1861. struct pci_dev *pdev = to_pci_dev(dev);
  1862. struct iommu_dev_data *dev_data;
  1863. unsigned long flags;
  1864. int ret;
  1865. dev_data = get_dev_data(dev);
  1866. if (domain->flags & PD_IOMMUV2_MASK) {
  1867. if (!dev_data->iommu_v2 || !dev_data->passthrough)
  1868. return -EINVAL;
  1869. if (pdev_iommuv2_enable(pdev) != 0)
  1870. return -EINVAL;
  1871. dev_data->ats.enabled = true;
  1872. dev_data->ats.qdep = pci_ats_queue_depth(pdev);
  1873. dev_data->pri_tlp = pci_pri_tlp_required(pdev);
  1874. } else if (amd_iommu_iotlb_sup &&
  1875. pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
  1876. dev_data->ats.enabled = true;
  1877. dev_data->ats.qdep = pci_ats_queue_depth(pdev);
  1878. }
  1879. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  1880. ret = __attach_device(dev_data, domain);
  1881. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  1882. /*
  1883. * We might boot into a crash-kernel here. The crashed kernel
  1884. * left the caches in the IOMMU dirty. So we have to flush
  1885. * here to evict all dirty stuff.
  1886. */
  1887. domain_flush_tlb_pde(domain);
  1888. return ret;
  1889. }
  1890. /*
  1891. * Removes a device from a protection domain (unlocked)
  1892. */
  1893. static void __detach_device(struct iommu_dev_data *dev_data)
  1894. {
  1895. struct protection_domain *domain;
  1896. unsigned long flags;
  1897. BUG_ON(!dev_data->domain);
  1898. domain = dev_data->domain;
  1899. spin_lock_irqsave(&domain->lock, flags);
  1900. if (dev_data->alias_data != NULL) {
  1901. struct iommu_dev_data *alias_data = dev_data->alias_data;
  1902. if (atomic_dec_and_test(&alias_data->bind))
  1903. do_detach(alias_data);
  1904. }
  1905. if (atomic_dec_and_test(&dev_data->bind))
  1906. do_detach(dev_data);
  1907. spin_unlock_irqrestore(&domain->lock, flags);
  1908. /*
  1909. * If we run in passthrough mode the device must be assigned to the
  1910. * passthrough domain if it is detached from any other domain.
  1911. * Make sure we can deassign from the pt_domain itself.
  1912. */
  1913. if (dev_data->passthrough &&
  1914. (dev_data->domain == NULL && domain != pt_domain))
  1915. __attach_device(dev_data, pt_domain);
  1916. }
  1917. /*
  1918. * Removes a device from a protection domain (with devtable_lock held)
  1919. */
  1920. static void detach_device(struct device *dev)
  1921. {
  1922. struct protection_domain *domain;
  1923. struct iommu_dev_data *dev_data;
  1924. unsigned long flags;
  1925. dev_data = get_dev_data(dev);
  1926. domain = dev_data->domain;
  1927. /* lock device table */
  1928. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  1929. __detach_device(dev_data);
  1930. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  1931. if (domain->flags & PD_IOMMUV2_MASK)
  1932. pdev_iommuv2_disable(to_pci_dev(dev));
  1933. else if (dev_data->ats.enabled)
  1934. pci_disable_ats(to_pci_dev(dev));
  1935. dev_data->ats.enabled = false;
  1936. }
  1937. /*
  1938. * Find out the protection domain structure for a given PCI device. This
  1939. * will give us the pointer to the page table root for example.
  1940. */
  1941. static struct protection_domain *domain_for_device(struct device *dev)
  1942. {
  1943. struct iommu_dev_data *dev_data;
  1944. struct protection_domain *dom = NULL;
  1945. unsigned long flags;
  1946. dev_data = get_dev_data(dev);
  1947. if (dev_data->domain)
  1948. return dev_data->domain;
  1949. if (dev_data->alias_data != NULL) {
  1950. struct iommu_dev_data *alias_data = dev_data->alias_data;
  1951. read_lock_irqsave(&amd_iommu_devtable_lock, flags);
  1952. if (alias_data->domain != NULL) {
  1953. __attach_device(dev_data, alias_data->domain);
  1954. dom = alias_data->domain;
  1955. }
  1956. read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  1957. }
  1958. return dom;
  1959. }
  1960. static int device_change_notifier(struct notifier_block *nb,
  1961. unsigned long action, void *data)
  1962. {
  1963. struct dma_ops_domain *dma_domain;
  1964. struct protection_domain *domain;
  1965. struct iommu_dev_data *dev_data;
  1966. struct device *dev = data;
  1967. struct amd_iommu *iommu;
  1968. unsigned long flags;
  1969. u16 devid;
  1970. if (!check_device(dev))
  1971. return 0;
  1972. devid = get_device_id(dev);
  1973. iommu = amd_iommu_rlookup_table[devid];
  1974. dev_data = get_dev_data(dev);
  1975. switch (action) {
  1976. case BUS_NOTIFY_UNBOUND_DRIVER:
  1977. domain = domain_for_device(dev);
  1978. if (!domain)
  1979. goto out;
  1980. if (dev_data->passthrough)
  1981. break;
  1982. detach_device(dev);
  1983. break;
  1984. case BUS_NOTIFY_ADD_DEVICE:
  1985. iommu_init_device(dev);
  1986. /*
  1987. * dev_data is still NULL and
  1988. * got initialized in iommu_init_device
  1989. */
  1990. dev_data = get_dev_data(dev);
  1991. if (iommu_pass_through || dev_data->iommu_v2) {
  1992. dev_data->passthrough = true;
  1993. attach_device(dev, pt_domain);
  1994. break;
  1995. }
  1996. domain = domain_for_device(dev);
  1997. /* allocate a protection domain if a device is added */
  1998. dma_domain = find_protection_domain(devid);
  1999. if (!dma_domain) {
  2000. dma_domain = dma_ops_domain_alloc();
  2001. if (!dma_domain)
  2002. goto out;
  2003. dma_domain->target_dev = devid;
  2004. spin_lock_irqsave(&iommu_pd_list_lock, flags);
  2005. list_add_tail(&dma_domain->list, &iommu_pd_list);
  2006. spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
  2007. }
  2008. dev->archdata.dma_ops = &amd_iommu_dma_ops;
  2009. break;
  2010. case BUS_NOTIFY_DEL_DEVICE:
  2011. iommu_uninit_device(dev);
  2012. default:
  2013. goto out;
  2014. }
  2015. iommu_completion_wait(iommu);
  2016. out:
  2017. return 0;
  2018. }
  2019. static struct notifier_block device_nb = {
  2020. .notifier_call = device_change_notifier,
  2021. };
  2022. void amd_iommu_init_notifier(void)
  2023. {
  2024. bus_register_notifier(&pci_bus_type, &device_nb);
  2025. }
  2026. /*****************************************************************************
  2027. *
  2028. * The next functions belong to the dma_ops mapping/unmapping code.
  2029. *
  2030. *****************************************************************************/
  2031. /*
  2032. * In the dma_ops path we only have the struct device. This function
  2033. * finds the corresponding IOMMU, the protection domain and the
  2034. * requestor id for a given device.
  2035. * If the device is not yet associated with a domain this is also done
  2036. * in this function.
  2037. */
  2038. static struct protection_domain *get_domain(struct device *dev)
  2039. {
  2040. struct protection_domain *domain;
  2041. struct dma_ops_domain *dma_dom;
  2042. u16 devid = get_device_id(dev);
  2043. if (!check_device(dev))
  2044. return ERR_PTR(-EINVAL);
  2045. domain = domain_for_device(dev);
  2046. if (domain != NULL && !dma_ops_domain(domain))
  2047. return ERR_PTR(-EBUSY);
  2048. if (domain != NULL)
  2049. return domain;
  2050. /* Device not bound yet - bind it */
  2051. dma_dom = find_protection_domain(devid);
  2052. if (!dma_dom)
  2053. dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
  2054. attach_device(dev, &dma_dom->domain);
  2055. DUMP_printk("Using protection domain %d for device %s\n",
  2056. dma_dom->domain.id, dev_name(dev));
  2057. return &dma_dom->domain;
  2058. }
  2059. static void update_device_table(struct protection_domain *domain)
  2060. {
  2061. struct iommu_dev_data *dev_data;
  2062. list_for_each_entry(dev_data, &domain->dev_list, list)
  2063. set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
  2064. }
  2065. static void update_domain(struct protection_domain *domain)
  2066. {
  2067. if (!domain->updated)
  2068. return;
  2069. update_device_table(domain);
  2070. domain_flush_devices(domain);
  2071. domain_flush_tlb_pde(domain);
  2072. domain->updated = false;
  2073. }
  2074. /*
  2075. * This function fetches the PTE for a given address in the aperture
  2076. */
  2077. static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
  2078. unsigned long address)
  2079. {
  2080. struct aperture_range *aperture;
  2081. u64 *pte, *pte_page;
  2082. aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
  2083. if (!aperture)
  2084. return NULL;
  2085. pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
  2086. if (!pte) {
  2087. pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
  2088. GFP_ATOMIC);
  2089. aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
  2090. } else
  2091. pte += PM_LEVEL_INDEX(0, address);
  2092. update_domain(&dom->domain);
  2093. return pte;
  2094. }
  2095. /*
  2096. * This is the generic map function. It maps one 4kb page at paddr to
  2097. * the given address in the DMA address space for the domain.
  2098. */
  2099. static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
  2100. unsigned long address,
  2101. phys_addr_t paddr,
  2102. int direction)
  2103. {
  2104. u64 *pte, __pte;
  2105. WARN_ON(address > dom->aperture_size);
  2106. paddr &= PAGE_MASK;
  2107. pte = dma_ops_get_pte(dom, address);
  2108. if (!pte)
  2109. return DMA_ERROR_CODE;
  2110. __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
  2111. if (direction == DMA_TO_DEVICE)
  2112. __pte |= IOMMU_PTE_IR;
  2113. else if (direction == DMA_FROM_DEVICE)
  2114. __pte |= IOMMU_PTE_IW;
  2115. else if (direction == DMA_BIDIRECTIONAL)
  2116. __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
  2117. WARN_ON(*pte);
  2118. *pte = __pte;
  2119. return (dma_addr_t)address;
  2120. }
  2121. /*
  2122. * The generic unmapping function for on page in the DMA address space.
  2123. */
  2124. static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
  2125. unsigned long address)
  2126. {
  2127. struct aperture_range *aperture;
  2128. u64 *pte;
  2129. if (address >= dom->aperture_size)
  2130. return;
  2131. aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
  2132. if (!aperture)
  2133. return;
  2134. pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
  2135. if (!pte)
  2136. return;
  2137. pte += PM_LEVEL_INDEX(0, address);
  2138. WARN_ON(!*pte);
  2139. *pte = 0ULL;
  2140. }
  2141. /*
  2142. * This function contains common code for mapping of a physically
  2143. * contiguous memory region into DMA address space. It is used by all
  2144. * mapping functions provided with this IOMMU driver.
  2145. * Must be called with the domain lock held.
  2146. */
  2147. static dma_addr_t __map_single(struct device *dev,
  2148. struct dma_ops_domain *dma_dom,
  2149. phys_addr_t paddr,
  2150. size_t size,
  2151. int dir,
  2152. bool align,
  2153. u64 dma_mask)
  2154. {
  2155. dma_addr_t offset = paddr & ~PAGE_MASK;
  2156. dma_addr_t address, start, ret;
  2157. unsigned int pages;
  2158. unsigned long align_mask = 0;
  2159. int i;
  2160. pages = iommu_num_pages(paddr, size, PAGE_SIZE);
  2161. paddr &= PAGE_MASK;
  2162. INC_STATS_COUNTER(total_map_requests);
  2163. if (pages > 1)
  2164. INC_STATS_COUNTER(cross_page);
  2165. if (align)
  2166. align_mask = (1UL << get_order(size)) - 1;
  2167. retry:
  2168. address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
  2169. dma_mask);
  2170. if (unlikely(address == DMA_ERROR_CODE)) {
  2171. /*
  2172. * setting next_address here will let the address
  2173. * allocator only scan the new allocated range in the
  2174. * first run. This is a small optimization.
  2175. */
  2176. dma_dom->next_address = dma_dom->aperture_size;
  2177. if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
  2178. goto out;
  2179. /*
  2180. * aperture was successfully enlarged by 128 MB, try
  2181. * allocation again
  2182. */
  2183. goto retry;
  2184. }
  2185. start = address;
  2186. for (i = 0; i < pages; ++i) {
  2187. ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
  2188. if (ret == DMA_ERROR_CODE)
  2189. goto out_unmap;
  2190. paddr += PAGE_SIZE;
  2191. start += PAGE_SIZE;
  2192. }
  2193. address += offset;
  2194. ADD_STATS_COUNTER(alloced_io_mem, size);
  2195. if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
  2196. domain_flush_tlb(&dma_dom->domain);
  2197. dma_dom->need_flush = false;
  2198. } else if (unlikely(amd_iommu_np_cache))
  2199. domain_flush_pages(&dma_dom->domain, address, size);
  2200. out:
  2201. return address;
  2202. out_unmap:
  2203. for (--i; i >= 0; --i) {
  2204. start -= PAGE_SIZE;
  2205. dma_ops_domain_unmap(dma_dom, start);
  2206. }
  2207. dma_ops_free_addresses(dma_dom, address, pages);
  2208. return DMA_ERROR_CODE;
  2209. }
  2210. /*
  2211. * Does the reverse of the __map_single function. Must be called with
  2212. * the domain lock held too
  2213. */
  2214. static void __unmap_single(struct dma_ops_domain *dma_dom,
  2215. dma_addr_t dma_addr,
  2216. size_t size,
  2217. int dir)
  2218. {
  2219. dma_addr_t flush_addr;
  2220. dma_addr_t i, start;
  2221. unsigned int pages;
  2222. if ((dma_addr == DMA_ERROR_CODE) ||
  2223. (dma_addr + size > dma_dom->aperture_size))
  2224. return;
  2225. flush_addr = dma_addr;
  2226. pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
  2227. dma_addr &= PAGE_MASK;
  2228. start = dma_addr;
  2229. for (i = 0; i < pages; ++i) {
  2230. dma_ops_domain_unmap(dma_dom, start);
  2231. start += PAGE_SIZE;
  2232. }
  2233. SUB_STATS_COUNTER(alloced_io_mem, size);
  2234. dma_ops_free_addresses(dma_dom, dma_addr, pages);
  2235. if (amd_iommu_unmap_flush || dma_dom->need_flush) {
  2236. domain_flush_pages(&dma_dom->domain, flush_addr, size);
  2237. dma_dom->need_flush = false;
  2238. }
  2239. }
  2240. /*
  2241. * The exported map_single function for dma_ops.
  2242. */
  2243. static dma_addr_t map_page(struct device *dev, struct page *page,
  2244. unsigned long offset, size_t size,
  2245. enum dma_data_direction dir,
  2246. struct dma_attrs *attrs)
  2247. {
  2248. unsigned long flags;
  2249. struct protection_domain *domain;
  2250. dma_addr_t addr;
  2251. u64 dma_mask;
  2252. phys_addr_t paddr = page_to_phys(page) + offset;
  2253. INC_STATS_COUNTER(cnt_map_single);
  2254. domain = get_domain(dev);
  2255. if (PTR_ERR(domain) == -EINVAL)
  2256. return (dma_addr_t)paddr;
  2257. else if (IS_ERR(domain))
  2258. return DMA_ERROR_CODE;
  2259. dma_mask = *dev->dma_mask;
  2260. spin_lock_irqsave(&domain->lock, flags);
  2261. addr = __map_single(dev, domain->priv, paddr, size, dir, false,
  2262. dma_mask);
  2263. if (addr == DMA_ERROR_CODE)
  2264. goto out;
  2265. domain_flush_complete(domain);
  2266. out:
  2267. spin_unlock_irqrestore(&domain->lock, flags);
  2268. return addr;
  2269. }
  2270. /*
  2271. * The exported unmap_single function for dma_ops.
  2272. */
  2273. static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
  2274. enum dma_data_direction dir, struct dma_attrs *attrs)
  2275. {
  2276. unsigned long flags;
  2277. struct protection_domain *domain;
  2278. INC_STATS_COUNTER(cnt_unmap_single);
  2279. domain = get_domain(dev);
  2280. if (IS_ERR(domain))
  2281. return;
  2282. spin_lock_irqsave(&domain->lock, flags);
  2283. __unmap_single(domain->priv, dma_addr, size, dir);
  2284. domain_flush_complete(domain);
  2285. spin_unlock_irqrestore(&domain->lock, flags);
  2286. }
  2287. /*
  2288. * The exported map_sg function for dma_ops (handles scatter-gather
  2289. * lists).
  2290. */
  2291. static int map_sg(struct device *dev, struct scatterlist *sglist,
  2292. int nelems, enum dma_data_direction dir,
  2293. struct dma_attrs *attrs)
  2294. {
  2295. unsigned long flags;
  2296. struct protection_domain *domain;
  2297. int i;
  2298. struct scatterlist *s;
  2299. phys_addr_t paddr;
  2300. int mapped_elems = 0;
  2301. u64 dma_mask;
  2302. INC_STATS_COUNTER(cnt_map_sg);
  2303. domain = get_domain(dev);
  2304. if (IS_ERR(domain))
  2305. return 0;
  2306. dma_mask = *dev->dma_mask;
  2307. spin_lock_irqsave(&domain->lock, flags);
  2308. for_each_sg(sglist, s, nelems, i) {
  2309. paddr = sg_phys(s);
  2310. s->dma_address = __map_single(dev, domain->priv,
  2311. paddr, s->length, dir, false,
  2312. dma_mask);
  2313. if (s->dma_address) {
  2314. s->dma_length = s->length;
  2315. mapped_elems++;
  2316. } else
  2317. goto unmap;
  2318. }
  2319. domain_flush_complete(domain);
  2320. out:
  2321. spin_unlock_irqrestore(&domain->lock, flags);
  2322. return mapped_elems;
  2323. unmap:
  2324. for_each_sg(sglist, s, mapped_elems, i) {
  2325. if (s->dma_address)
  2326. __unmap_single(domain->priv, s->dma_address,
  2327. s->dma_length, dir);
  2328. s->dma_address = s->dma_length = 0;
  2329. }
  2330. mapped_elems = 0;
  2331. goto out;
  2332. }
  2333. /*
  2334. * The exported map_sg function for dma_ops (handles scatter-gather
  2335. * lists).
  2336. */
  2337. static void unmap_sg(struct device *dev, struct scatterlist *sglist,
  2338. int nelems, enum dma_data_direction dir,
  2339. struct dma_attrs *attrs)
  2340. {
  2341. unsigned long flags;
  2342. struct protection_domain *domain;
  2343. struct scatterlist *s;
  2344. int i;
  2345. INC_STATS_COUNTER(cnt_unmap_sg);
  2346. domain = get_domain(dev);
  2347. if (IS_ERR(domain))
  2348. return;
  2349. spin_lock_irqsave(&domain->lock, flags);
  2350. for_each_sg(sglist, s, nelems, i) {
  2351. __unmap_single(domain->priv, s->dma_address,
  2352. s->dma_length, dir);
  2353. s->dma_address = s->dma_length = 0;
  2354. }
  2355. domain_flush_complete(domain);
  2356. spin_unlock_irqrestore(&domain->lock, flags);
  2357. }
  2358. /*
  2359. * The exported alloc_coherent function for dma_ops.
  2360. */
  2361. static void *alloc_coherent(struct device *dev, size_t size,
  2362. dma_addr_t *dma_addr, gfp_t flag,
  2363. struct dma_attrs *attrs)
  2364. {
  2365. unsigned long flags;
  2366. void *virt_addr;
  2367. struct protection_domain *domain;
  2368. phys_addr_t paddr;
  2369. u64 dma_mask = dev->coherent_dma_mask;
  2370. INC_STATS_COUNTER(cnt_alloc_coherent);
  2371. domain = get_domain(dev);
  2372. if (PTR_ERR(domain) == -EINVAL) {
  2373. virt_addr = (void *)__get_free_pages(flag, get_order(size));
  2374. *dma_addr = __pa(virt_addr);
  2375. return virt_addr;
  2376. } else if (IS_ERR(domain))
  2377. return NULL;
  2378. dma_mask = dev->coherent_dma_mask;
  2379. flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  2380. flag |= __GFP_ZERO;
  2381. virt_addr = (void *)__get_free_pages(flag, get_order(size));
  2382. if (!virt_addr)
  2383. return NULL;
  2384. paddr = virt_to_phys(virt_addr);
  2385. if (!dma_mask)
  2386. dma_mask = *dev->dma_mask;
  2387. spin_lock_irqsave(&domain->lock, flags);
  2388. *dma_addr = __map_single(dev, domain->priv, paddr,
  2389. size, DMA_BIDIRECTIONAL, true, dma_mask);
  2390. if (*dma_addr == DMA_ERROR_CODE) {
  2391. spin_unlock_irqrestore(&domain->lock, flags);
  2392. goto out_free;
  2393. }
  2394. domain_flush_complete(domain);
  2395. spin_unlock_irqrestore(&domain->lock, flags);
  2396. return virt_addr;
  2397. out_free:
  2398. free_pages((unsigned long)virt_addr, get_order(size));
  2399. return NULL;
  2400. }
  2401. /*
  2402. * The exported free_coherent function for dma_ops.
  2403. */
  2404. static void free_coherent(struct device *dev, size_t size,
  2405. void *virt_addr, dma_addr_t dma_addr,
  2406. struct dma_attrs *attrs)
  2407. {
  2408. unsigned long flags;
  2409. struct protection_domain *domain;
  2410. INC_STATS_COUNTER(cnt_free_coherent);
  2411. domain = get_domain(dev);
  2412. if (IS_ERR(domain))
  2413. goto free_mem;
  2414. spin_lock_irqsave(&domain->lock, flags);
  2415. __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
  2416. domain_flush_complete(domain);
  2417. spin_unlock_irqrestore(&domain->lock, flags);
  2418. free_mem:
  2419. free_pages((unsigned long)virt_addr, get_order(size));
  2420. }
  2421. /*
  2422. * This function is called by the DMA layer to find out if we can handle a
  2423. * particular device. It is part of the dma_ops.
  2424. */
  2425. static int amd_iommu_dma_supported(struct device *dev, u64 mask)
  2426. {
  2427. return check_device(dev);
  2428. }
  2429. /*
  2430. * The function for pre-allocating protection domains.
  2431. *
  2432. * If the driver core informs the DMA layer if a driver grabs a device
  2433. * we don't need to preallocate the protection domains anymore.
  2434. * For now we have to.
  2435. */
  2436. static void __init prealloc_protection_domains(void)
  2437. {
  2438. struct iommu_dev_data *dev_data;
  2439. struct dma_ops_domain *dma_dom;
  2440. struct pci_dev *dev = NULL;
  2441. u16 devid;
  2442. for_each_pci_dev(dev) {
  2443. /* Do we handle this device? */
  2444. if (!check_device(&dev->dev))
  2445. continue;
  2446. dev_data = get_dev_data(&dev->dev);
  2447. if (!amd_iommu_force_isolation && dev_data->iommu_v2) {
  2448. /* Make sure passthrough domain is allocated */
  2449. alloc_passthrough_domain();
  2450. dev_data->passthrough = true;
  2451. attach_device(&dev->dev, pt_domain);
  2452. pr_info("AMD-Vi: Using passthrough domain for device %s\n",
  2453. dev_name(&dev->dev));
  2454. }
  2455. /* Is there already any domain for it? */
  2456. if (domain_for_device(&dev->dev))
  2457. continue;
  2458. devid = get_device_id(&dev->dev);
  2459. dma_dom = dma_ops_domain_alloc();
  2460. if (!dma_dom)
  2461. continue;
  2462. init_unity_mappings_for_device(dma_dom, devid);
  2463. dma_dom->target_dev = devid;
  2464. attach_device(&dev->dev, &dma_dom->domain);
  2465. list_add_tail(&dma_dom->list, &iommu_pd_list);
  2466. }
  2467. }
  2468. static struct dma_map_ops amd_iommu_dma_ops = {
  2469. .alloc = alloc_coherent,
  2470. .free = free_coherent,
  2471. .map_page = map_page,
  2472. .unmap_page = unmap_page,
  2473. .map_sg = map_sg,
  2474. .unmap_sg = unmap_sg,
  2475. .dma_supported = amd_iommu_dma_supported,
  2476. };
  2477. static unsigned device_dma_ops_init(void)
  2478. {
  2479. struct iommu_dev_data *dev_data;
  2480. struct pci_dev *pdev = NULL;
  2481. unsigned unhandled = 0;
  2482. for_each_pci_dev(pdev) {
  2483. if (!check_device(&pdev->dev)) {
  2484. iommu_ignore_device(&pdev->dev);
  2485. unhandled += 1;
  2486. continue;
  2487. }
  2488. dev_data = get_dev_data(&pdev->dev);
  2489. if (!dev_data->passthrough)
  2490. pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
  2491. else
  2492. pdev->dev.archdata.dma_ops = &nommu_dma_ops;
  2493. }
  2494. return unhandled;
  2495. }
  2496. /*
  2497. * The function which clues the AMD IOMMU driver into dma_ops.
  2498. */
  2499. void __init amd_iommu_init_api(void)
  2500. {
  2501. bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
  2502. }
  2503. int __init amd_iommu_init_dma_ops(void)
  2504. {
  2505. struct amd_iommu *iommu;
  2506. int ret, unhandled;
  2507. /*
  2508. * first allocate a default protection domain for every IOMMU we
  2509. * found in the system. Devices not assigned to any other
  2510. * protection domain will be assigned to the default one.
  2511. */
  2512. for_each_iommu(iommu) {
  2513. iommu->default_dom = dma_ops_domain_alloc();
  2514. if (iommu->default_dom == NULL)
  2515. return -ENOMEM;
  2516. iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
  2517. ret = iommu_init_unity_mappings(iommu);
  2518. if (ret)
  2519. goto free_domains;
  2520. }
  2521. /*
  2522. * Pre-allocate the protection domains for each device.
  2523. */
  2524. prealloc_protection_domains();
  2525. iommu_detected = 1;
  2526. swiotlb = 0;
  2527. /* Make the driver finally visible to the drivers */
  2528. unhandled = device_dma_ops_init();
  2529. if (unhandled && max_pfn > MAX_DMA32_PFN) {
  2530. /* There are unhandled devices - initialize swiotlb for them */
  2531. swiotlb = 1;
  2532. }
  2533. amd_iommu_stats_init();
  2534. if (amd_iommu_unmap_flush)
  2535. pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
  2536. else
  2537. pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
  2538. return 0;
  2539. free_domains:
  2540. for_each_iommu(iommu) {
  2541. dma_ops_domain_free(iommu->default_dom);
  2542. }
  2543. return ret;
  2544. }
  2545. /*****************************************************************************
  2546. *
  2547. * The following functions belong to the exported interface of AMD IOMMU
  2548. *
  2549. * This interface allows access to lower level functions of the IOMMU
  2550. * like protection domain handling and assignement of devices to domains
  2551. * which is not possible with the dma_ops interface.
  2552. *
  2553. *****************************************************************************/
  2554. static void cleanup_domain(struct protection_domain *domain)
  2555. {
  2556. struct iommu_dev_data *dev_data, *next;
  2557. unsigned long flags;
  2558. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  2559. list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
  2560. __detach_device(dev_data);
  2561. atomic_set(&dev_data->bind, 0);
  2562. }
  2563. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  2564. }
  2565. static void protection_domain_free(struct protection_domain *domain)
  2566. {
  2567. if (!domain)
  2568. return;
  2569. del_domain_from_list(domain);
  2570. if (domain->id)
  2571. domain_id_free(domain->id);
  2572. kfree(domain);
  2573. }
  2574. static struct protection_domain *protection_domain_alloc(void)
  2575. {
  2576. struct protection_domain *domain;
  2577. domain = kzalloc(sizeof(*domain), GFP_KERNEL);
  2578. if (!domain)
  2579. return NULL;
  2580. spin_lock_init(&domain->lock);
  2581. mutex_init(&domain->api_lock);
  2582. domain->id = domain_id_alloc();
  2583. if (!domain->id)
  2584. goto out_err;
  2585. INIT_LIST_HEAD(&domain->dev_list);
  2586. add_domain_to_list(domain);
  2587. return domain;
  2588. out_err:
  2589. kfree(domain);
  2590. return NULL;
  2591. }
  2592. static int __init alloc_passthrough_domain(void)
  2593. {
  2594. if (pt_domain != NULL)
  2595. return 0;
  2596. /* allocate passthrough domain */
  2597. pt_domain = protection_domain_alloc();
  2598. if (!pt_domain)
  2599. return -ENOMEM;
  2600. pt_domain->mode = PAGE_MODE_NONE;
  2601. return 0;
  2602. }
  2603. static int amd_iommu_domain_init(struct iommu_domain *dom)
  2604. {
  2605. struct protection_domain *domain;
  2606. domain = protection_domain_alloc();
  2607. if (!domain)
  2608. goto out_free;
  2609. domain->mode = PAGE_MODE_3_LEVEL;
  2610. domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
  2611. if (!domain->pt_root)
  2612. goto out_free;
  2613. domain->iommu_domain = dom;
  2614. dom->priv = domain;
  2615. dom->geometry.aperture_start = 0;
  2616. dom->geometry.aperture_end = ~0ULL;
  2617. dom->geometry.force_aperture = true;
  2618. return 0;
  2619. out_free:
  2620. protection_domain_free(domain);
  2621. return -ENOMEM;
  2622. }
  2623. static void amd_iommu_domain_destroy(struct iommu_domain *dom)
  2624. {
  2625. struct protection_domain *domain = dom->priv;
  2626. if (!domain)
  2627. return;
  2628. if (domain->dev_cnt > 0)
  2629. cleanup_domain(domain);
  2630. BUG_ON(domain->dev_cnt != 0);
  2631. if (domain->mode != PAGE_MODE_NONE)
  2632. free_pagetable(domain);
  2633. if (domain->flags & PD_IOMMUV2_MASK)
  2634. free_gcr3_table(domain);
  2635. protection_domain_free(domain);
  2636. dom->priv = NULL;
  2637. }
  2638. static void amd_iommu_detach_device(struct iommu_domain *dom,
  2639. struct device *dev)
  2640. {
  2641. struct iommu_dev_data *dev_data = dev->archdata.iommu;
  2642. struct amd_iommu *iommu;
  2643. u16 devid;
  2644. if (!check_device(dev))
  2645. return;
  2646. devid = get_device_id(dev);
  2647. if (dev_data->domain != NULL)
  2648. detach_device(dev);
  2649. iommu = amd_iommu_rlookup_table[devid];
  2650. if (!iommu)
  2651. return;
  2652. iommu_completion_wait(iommu);
  2653. }
  2654. static int amd_iommu_attach_device(struct iommu_domain *dom,
  2655. struct device *dev)
  2656. {
  2657. struct protection_domain *domain = dom->priv;
  2658. struct iommu_dev_data *dev_data;
  2659. struct amd_iommu *iommu;
  2660. int ret;
  2661. if (!check_device(dev))
  2662. return -EINVAL;
  2663. dev_data = dev->archdata.iommu;
  2664. iommu = amd_iommu_rlookup_table[dev_data->devid];
  2665. if (!iommu)
  2666. return -EINVAL;
  2667. if (dev_data->domain)
  2668. detach_device(dev);
  2669. ret = attach_device(dev, domain);
  2670. iommu_completion_wait(iommu);
  2671. return ret;
  2672. }
  2673. static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
  2674. phys_addr_t paddr, size_t page_size, int iommu_prot)
  2675. {
  2676. struct protection_domain *domain = dom->priv;
  2677. int prot = 0;
  2678. int ret;
  2679. if (domain->mode == PAGE_MODE_NONE)
  2680. return -EINVAL;
  2681. if (iommu_prot & IOMMU_READ)
  2682. prot |= IOMMU_PROT_IR;
  2683. if (iommu_prot & IOMMU_WRITE)
  2684. prot |= IOMMU_PROT_IW;
  2685. mutex_lock(&domain->api_lock);
  2686. ret = iommu_map_page(domain, iova, paddr, prot, page_size);
  2687. mutex_unlock(&domain->api_lock);
  2688. return ret;
  2689. }
  2690. static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
  2691. size_t page_size)
  2692. {
  2693. struct protection_domain *domain = dom->priv;
  2694. size_t unmap_size;
  2695. if (domain->mode == PAGE_MODE_NONE)
  2696. return -EINVAL;
  2697. mutex_lock(&domain->api_lock);
  2698. unmap_size = iommu_unmap_page(domain, iova, page_size);
  2699. mutex_unlock(&domain->api_lock);
  2700. domain_flush_tlb_pde(domain);
  2701. return unmap_size;
  2702. }
  2703. static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
  2704. dma_addr_t iova)
  2705. {
  2706. struct protection_domain *domain = dom->priv;
  2707. unsigned long offset_mask;
  2708. phys_addr_t paddr;
  2709. u64 *pte, __pte;
  2710. if (domain->mode == PAGE_MODE_NONE)
  2711. return iova;
  2712. pte = fetch_pte(domain, iova);
  2713. if (!pte || !IOMMU_PTE_PRESENT(*pte))
  2714. return 0;
  2715. if (PM_PTE_LEVEL(*pte) == 0)
  2716. offset_mask = PAGE_SIZE - 1;
  2717. else
  2718. offset_mask = PTE_PAGE_SIZE(*pte) - 1;
  2719. __pte = *pte & PM_ADDR_MASK;
  2720. paddr = (__pte & ~offset_mask) | (iova & offset_mask);
  2721. return paddr;
  2722. }
  2723. static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
  2724. unsigned long cap)
  2725. {
  2726. switch (cap) {
  2727. case IOMMU_CAP_CACHE_COHERENCY:
  2728. return 1;
  2729. case IOMMU_CAP_INTR_REMAP:
  2730. return irq_remapping_enabled;
  2731. }
  2732. return 0;
  2733. }
  2734. static struct iommu_ops amd_iommu_ops = {
  2735. .domain_init = amd_iommu_domain_init,
  2736. .domain_destroy = amd_iommu_domain_destroy,
  2737. .attach_dev = amd_iommu_attach_device,
  2738. .detach_dev = amd_iommu_detach_device,
  2739. .map = amd_iommu_map,
  2740. .unmap = amd_iommu_unmap,
  2741. .iova_to_phys = amd_iommu_iova_to_phys,
  2742. .domain_has_cap = amd_iommu_domain_has_cap,
  2743. .pgsize_bitmap = AMD_IOMMU_PGSIZES,
  2744. };
  2745. /*****************************************************************************
  2746. *
  2747. * The next functions do a basic initialization of IOMMU for pass through
  2748. * mode
  2749. *
  2750. * In passthrough mode the IOMMU is initialized and enabled but not used for
  2751. * DMA-API translation.
  2752. *
  2753. *****************************************************************************/
  2754. int __init amd_iommu_init_passthrough(void)
  2755. {
  2756. struct iommu_dev_data *dev_data;
  2757. struct pci_dev *dev = NULL;
  2758. int ret;
  2759. ret = alloc_passthrough_domain();
  2760. if (ret)
  2761. return ret;
  2762. for_each_pci_dev(dev) {
  2763. if (!check_device(&dev->dev))
  2764. continue;
  2765. dev_data = get_dev_data(&dev->dev);
  2766. dev_data->passthrough = true;
  2767. attach_device(&dev->dev, pt_domain);
  2768. }
  2769. amd_iommu_stats_init();
  2770. pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
  2771. return 0;
  2772. }
  2773. /* IOMMUv2 specific functions */
  2774. int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
  2775. {
  2776. return atomic_notifier_chain_register(&ppr_notifier, nb);
  2777. }
  2778. EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
  2779. int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
  2780. {
  2781. return atomic_notifier_chain_unregister(&ppr_notifier, nb);
  2782. }
  2783. EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
  2784. void amd_iommu_domain_direct_map(struct iommu_domain *dom)
  2785. {
  2786. struct protection_domain *domain = dom->priv;
  2787. unsigned long flags;
  2788. spin_lock_irqsave(&domain->lock, flags);
  2789. /* Update data structure */
  2790. domain->mode = PAGE_MODE_NONE;
  2791. domain->updated = true;
  2792. /* Make changes visible to IOMMUs */
  2793. update_domain(domain);
  2794. /* Page-table is not visible to IOMMU anymore, so free it */
  2795. free_pagetable(domain);
  2796. spin_unlock_irqrestore(&domain->lock, flags);
  2797. }
  2798. EXPORT_SYMBOL(amd_iommu_domain_direct_map);
  2799. int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
  2800. {
  2801. struct protection_domain *domain = dom->priv;
  2802. unsigned long flags;
  2803. int levels, ret;
  2804. if (pasids <= 0 || pasids > (PASID_MASK + 1))
  2805. return -EINVAL;
  2806. /* Number of GCR3 table levels required */
  2807. for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
  2808. levels += 1;
  2809. if (levels > amd_iommu_max_glx_val)
  2810. return -EINVAL;
  2811. spin_lock_irqsave(&domain->lock, flags);
  2812. /*
  2813. * Save us all sanity checks whether devices already in the
  2814. * domain support IOMMUv2. Just force that the domain has no
  2815. * devices attached when it is switched into IOMMUv2 mode.
  2816. */
  2817. ret = -EBUSY;
  2818. if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
  2819. goto out;
  2820. ret = -ENOMEM;
  2821. domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
  2822. if (domain->gcr3_tbl == NULL)
  2823. goto out;
  2824. domain->glx = levels;
  2825. domain->flags |= PD_IOMMUV2_MASK;
  2826. domain->updated = true;
  2827. update_domain(domain);
  2828. ret = 0;
  2829. out:
  2830. spin_unlock_irqrestore(&domain->lock, flags);
  2831. return ret;
  2832. }
  2833. EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
  2834. static int __flush_pasid(struct protection_domain *domain, int pasid,
  2835. u64 address, bool size)
  2836. {
  2837. struct iommu_dev_data *dev_data;
  2838. struct iommu_cmd cmd;
  2839. int i, ret;
  2840. if (!(domain->flags & PD_IOMMUV2_MASK))
  2841. return -EINVAL;
  2842. build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
  2843. /*
  2844. * IOMMU TLB needs to be flushed before Device TLB to
  2845. * prevent device TLB refill from IOMMU TLB
  2846. */
  2847. for (i = 0; i < amd_iommus_present; ++i) {
  2848. if (domain->dev_iommu[i] == 0)
  2849. continue;
  2850. ret = iommu_queue_command(amd_iommus[i], &cmd);
  2851. if (ret != 0)
  2852. goto out;
  2853. }
  2854. /* Wait until IOMMU TLB flushes are complete */
  2855. domain_flush_complete(domain);
  2856. /* Now flush device TLBs */
  2857. list_for_each_entry(dev_data, &domain->dev_list, list) {
  2858. struct amd_iommu *iommu;
  2859. int qdep;
  2860. BUG_ON(!dev_data->ats.enabled);
  2861. qdep = dev_data->ats.qdep;
  2862. iommu = amd_iommu_rlookup_table[dev_data->devid];
  2863. build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
  2864. qdep, address, size);
  2865. ret = iommu_queue_command(iommu, &cmd);
  2866. if (ret != 0)
  2867. goto out;
  2868. }
  2869. /* Wait until all device TLBs are flushed */
  2870. domain_flush_complete(domain);
  2871. ret = 0;
  2872. out:
  2873. return ret;
  2874. }
  2875. static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
  2876. u64 address)
  2877. {
  2878. INC_STATS_COUNTER(invalidate_iotlb);
  2879. return __flush_pasid(domain, pasid, address, false);
  2880. }
  2881. int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
  2882. u64 address)
  2883. {
  2884. struct protection_domain *domain = dom->priv;
  2885. unsigned long flags;
  2886. int ret;
  2887. spin_lock_irqsave(&domain->lock, flags);
  2888. ret = __amd_iommu_flush_page(domain, pasid, address);
  2889. spin_unlock_irqrestore(&domain->lock, flags);
  2890. return ret;
  2891. }
  2892. EXPORT_SYMBOL(amd_iommu_flush_page);
  2893. static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
  2894. {
  2895. INC_STATS_COUNTER(invalidate_iotlb_all);
  2896. return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
  2897. true);
  2898. }
  2899. int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
  2900. {
  2901. struct protection_domain *domain = dom->priv;
  2902. unsigned long flags;
  2903. int ret;
  2904. spin_lock_irqsave(&domain->lock, flags);
  2905. ret = __amd_iommu_flush_tlb(domain, pasid);
  2906. spin_unlock_irqrestore(&domain->lock, flags);
  2907. return ret;
  2908. }
  2909. EXPORT_SYMBOL(amd_iommu_flush_tlb);
  2910. static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
  2911. {
  2912. int index;
  2913. u64 *pte;
  2914. while (true) {
  2915. index = (pasid >> (9 * level)) & 0x1ff;
  2916. pte = &root[index];
  2917. if (level == 0)
  2918. break;
  2919. if (!(*pte & GCR3_VALID)) {
  2920. if (!alloc)
  2921. return NULL;
  2922. root = (void *)get_zeroed_page(GFP_ATOMIC);
  2923. if (root == NULL)
  2924. return NULL;
  2925. *pte = __pa(root) | GCR3_VALID;
  2926. }
  2927. root = __va(*pte & PAGE_MASK);
  2928. level -= 1;
  2929. }
  2930. return pte;
  2931. }
  2932. static int __set_gcr3(struct protection_domain *domain, int pasid,
  2933. unsigned long cr3)
  2934. {
  2935. u64 *pte;
  2936. if (domain->mode != PAGE_MODE_NONE)
  2937. return -EINVAL;
  2938. pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
  2939. if (pte == NULL)
  2940. return -ENOMEM;
  2941. *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
  2942. return __amd_iommu_flush_tlb(domain, pasid);
  2943. }
  2944. static int __clear_gcr3(struct protection_domain *domain, int pasid)
  2945. {
  2946. u64 *pte;
  2947. if (domain->mode != PAGE_MODE_NONE)
  2948. return -EINVAL;
  2949. pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
  2950. if (pte == NULL)
  2951. return 0;
  2952. *pte = 0;
  2953. return __amd_iommu_flush_tlb(domain, pasid);
  2954. }
  2955. int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
  2956. unsigned long cr3)
  2957. {
  2958. struct protection_domain *domain = dom->priv;
  2959. unsigned long flags;
  2960. int ret;
  2961. spin_lock_irqsave(&domain->lock, flags);
  2962. ret = __set_gcr3(domain, pasid, cr3);
  2963. spin_unlock_irqrestore(&domain->lock, flags);
  2964. return ret;
  2965. }
  2966. EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
  2967. int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
  2968. {
  2969. struct protection_domain *domain = dom->priv;
  2970. unsigned long flags;
  2971. int ret;
  2972. spin_lock_irqsave(&domain->lock, flags);
  2973. ret = __clear_gcr3(domain, pasid);
  2974. spin_unlock_irqrestore(&domain->lock, flags);
  2975. return ret;
  2976. }
  2977. EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
  2978. int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
  2979. int status, int tag)
  2980. {
  2981. struct iommu_dev_data *dev_data;
  2982. struct amd_iommu *iommu;
  2983. struct iommu_cmd cmd;
  2984. INC_STATS_COUNTER(complete_ppr);
  2985. dev_data = get_dev_data(&pdev->dev);
  2986. iommu = amd_iommu_rlookup_table[dev_data->devid];
  2987. build_complete_ppr(&cmd, dev_data->devid, pasid, status,
  2988. tag, dev_data->pri_tlp);
  2989. return iommu_queue_command(iommu, &cmd);
  2990. }
  2991. EXPORT_SYMBOL(amd_iommu_complete_ppr);
  2992. struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
  2993. {
  2994. struct protection_domain *domain;
  2995. domain = get_domain(&pdev->dev);
  2996. if (IS_ERR(domain))
  2997. return NULL;
  2998. /* Only return IOMMUv2 domains */
  2999. if (!(domain->flags & PD_IOMMUV2_MASK))
  3000. return NULL;
  3001. return domain->iommu_domain;
  3002. }
  3003. EXPORT_SYMBOL(amd_iommu_get_v2_domain);
  3004. void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
  3005. {
  3006. struct iommu_dev_data *dev_data;
  3007. if (!amd_iommu_v2_supported())
  3008. return;
  3009. dev_data = get_dev_data(&pdev->dev);
  3010. dev_data->errata |= (1 << erratum);
  3011. }
  3012. EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
  3013. int amd_iommu_device_info(struct pci_dev *pdev,
  3014. struct amd_iommu_device_info *info)
  3015. {
  3016. int max_pasids;
  3017. int pos;
  3018. if (pdev == NULL || info == NULL)
  3019. return -EINVAL;
  3020. if (!amd_iommu_v2_supported())
  3021. return -EINVAL;
  3022. memset(info, 0, sizeof(*info));
  3023. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
  3024. if (pos)
  3025. info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
  3026. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
  3027. if (pos)
  3028. info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
  3029. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
  3030. if (pos) {
  3031. int features;
  3032. max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
  3033. max_pasids = min(max_pasids, (1 << 20));
  3034. info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
  3035. info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
  3036. features = pci_pasid_features(pdev);
  3037. if (features & PCI_PASID_CAP_EXEC)
  3038. info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
  3039. if (features & PCI_PASID_CAP_PRIV)
  3040. info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
  3041. }
  3042. return 0;
  3043. }
  3044. EXPORT_SYMBOL(amd_iommu_device_info);
  3045. #ifdef CONFIG_IRQ_REMAP
  3046. /*****************************************************************************
  3047. *
  3048. * Interrupt Remapping Implementation
  3049. *
  3050. *****************************************************************************/
  3051. union irte {
  3052. u32 val;
  3053. struct {
  3054. u32 valid : 1,
  3055. no_fault : 1,
  3056. int_type : 3,
  3057. rq_eoi : 1,
  3058. dm : 1,
  3059. rsvd_1 : 1,
  3060. destination : 8,
  3061. vector : 8,
  3062. rsvd_2 : 8;
  3063. } fields;
  3064. };
  3065. #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
  3066. #define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
  3067. #define DTE_IRQ_TABLE_LEN (8ULL << 1)
  3068. #define DTE_IRQ_REMAP_ENABLE 1ULL
  3069. static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
  3070. {
  3071. u64 dte;
  3072. dte = amd_iommu_dev_table[devid].data[2];
  3073. dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
  3074. dte |= virt_to_phys(table->table);
  3075. dte |= DTE_IRQ_REMAP_INTCTL;
  3076. dte |= DTE_IRQ_TABLE_LEN;
  3077. dte |= DTE_IRQ_REMAP_ENABLE;
  3078. amd_iommu_dev_table[devid].data[2] = dte;
  3079. }
  3080. #define IRTE_ALLOCATED (~1U)
  3081. static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
  3082. {
  3083. struct irq_remap_table *table = NULL;
  3084. struct amd_iommu *iommu;
  3085. unsigned long flags;
  3086. u16 alias;
  3087. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  3088. iommu = amd_iommu_rlookup_table[devid];
  3089. if (!iommu)
  3090. goto out_unlock;
  3091. table = irq_lookup_table[devid];
  3092. if (table)
  3093. goto out;
  3094. alias = amd_iommu_alias_table[devid];
  3095. table = irq_lookup_table[alias];
  3096. if (table) {
  3097. irq_lookup_table[devid] = table;
  3098. set_dte_irq_entry(devid, table);
  3099. iommu_flush_dte(iommu, devid);
  3100. goto out;
  3101. }
  3102. /* Nothing there yet, allocate new irq remapping table */
  3103. table = kzalloc(sizeof(*table), GFP_ATOMIC);
  3104. if (!table)
  3105. goto out;
  3106. /* Initialize table spin-lock */
  3107. spin_lock_init(&table->lock);
  3108. if (ioapic)
  3109. /* Keep the first 32 indexes free for IOAPIC interrupts */
  3110. table->min_index = 32;
  3111. table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
  3112. if (!table->table) {
  3113. kfree(table);
  3114. table = NULL;
  3115. goto out;
  3116. }
  3117. memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32));
  3118. if (ioapic) {
  3119. int i;
  3120. for (i = 0; i < 32; ++i)
  3121. table->table[i] = IRTE_ALLOCATED;
  3122. }
  3123. irq_lookup_table[devid] = table;
  3124. set_dte_irq_entry(devid, table);
  3125. iommu_flush_dte(iommu, devid);
  3126. if (devid != alias) {
  3127. irq_lookup_table[alias] = table;
  3128. set_dte_irq_entry(alias, table);
  3129. iommu_flush_dte(iommu, alias);
  3130. }
  3131. out:
  3132. iommu_completion_wait(iommu);
  3133. out_unlock:
  3134. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  3135. return table;
  3136. }
  3137. static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
  3138. {
  3139. struct irq_remap_table *table;
  3140. unsigned long flags;
  3141. int index, c;
  3142. table = get_irq_table(devid, false);
  3143. if (!table)
  3144. return -ENODEV;
  3145. spin_lock_irqsave(&table->lock, flags);
  3146. /* Scan table for free entries */
  3147. for (c = 0, index = table->min_index;
  3148. index < MAX_IRQS_PER_TABLE;
  3149. ++index) {
  3150. if (table->table[index] == 0)
  3151. c += 1;
  3152. else
  3153. c = 0;
  3154. if (c == count) {
  3155. struct irq_2_irte *irte_info;
  3156. for (; c != 0; --c)
  3157. table->table[index - c + 1] = IRTE_ALLOCATED;
  3158. index -= count - 1;
  3159. cfg->remapped = 1;
  3160. irte_info = &cfg->irq_2_irte;
  3161. irte_info->devid = devid;
  3162. irte_info->index = index;
  3163. goto out;
  3164. }
  3165. }
  3166. index = -ENOSPC;
  3167. out:
  3168. spin_unlock_irqrestore(&table->lock, flags);
  3169. return index;
  3170. }
  3171. static int get_irte(u16 devid, int index, union irte *irte)
  3172. {
  3173. struct irq_remap_table *table;
  3174. unsigned long flags;
  3175. table = get_irq_table(devid, false);
  3176. if (!table)
  3177. return -ENOMEM;
  3178. spin_lock_irqsave(&table->lock, flags);
  3179. irte->val = table->table[index];
  3180. spin_unlock_irqrestore(&table->lock, flags);
  3181. return 0;
  3182. }
  3183. static int modify_irte(u16 devid, int index, union irte irte)
  3184. {
  3185. struct irq_remap_table *table;
  3186. struct amd_iommu *iommu;
  3187. unsigned long flags;
  3188. iommu = amd_iommu_rlookup_table[devid];
  3189. if (iommu == NULL)
  3190. return -EINVAL;
  3191. table = get_irq_table(devid, false);
  3192. if (!table)
  3193. return -ENOMEM;
  3194. spin_lock_irqsave(&table->lock, flags);
  3195. table->table[index] = irte.val;
  3196. spin_unlock_irqrestore(&table->lock, flags);
  3197. iommu_flush_irt(iommu, devid);
  3198. iommu_completion_wait(iommu);
  3199. return 0;
  3200. }
  3201. static void free_irte(u16 devid, int index)
  3202. {
  3203. struct irq_remap_table *table;
  3204. struct amd_iommu *iommu;
  3205. unsigned long flags;
  3206. iommu = amd_iommu_rlookup_table[devid];
  3207. if (iommu == NULL)
  3208. return;
  3209. table = get_irq_table(devid, false);
  3210. if (!table)
  3211. return;
  3212. spin_lock_irqsave(&table->lock, flags);
  3213. table->table[index] = 0;
  3214. spin_unlock_irqrestore(&table->lock, flags);
  3215. iommu_flush_irt(iommu, devid);
  3216. iommu_completion_wait(iommu);
  3217. }
  3218. static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
  3219. unsigned int destination, int vector,
  3220. struct io_apic_irq_attr *attr)
  3221. {
  3222. struct irq_remap_table *table;
  3223. struct irq_2_irte *irte_info;
  3224. struct irq_cfg *cfg;
  3225. union irte irte;
  3226. int ioapic_id;
  3227. int index;
  3228. int devid;
  3229. int ret;
  3230. cfg = irq_get_chip_data(irq);
  3231. if (!cfg)
  3232. return -EINVAL;
  3233. irte_info = &cfg->irq_2_irte;
  3234. ioapic_id = mpc_ioapic_id(attr->ioapic);
  3235. devid = get_ioapic_devid(ioapic_id);
  3236. if (devid < 0)
  3237. return devid;
  3238. table = get_irq_table(devid, true);
  3239. if (table == NULL)
  3240. return -ENOMEM;
  3241. index = attr->ioapic_pin;
  3242. /* Setup IRQ remapping info */
  3243. cfg->remapped = 1;
  3244. irte_info->devid = devid;
  3245. irte_info->index = index;
  3246. /* Setup IRTE for IOMMU */
  3247. irte.val = 0;
  3248. irte.fields.vector = vector;
  3249. irte.fields.int_type = apic->irq_delivery_mode;
  3250. irte.fields.destination = destination;
  3251. irte.fields.dm = apic->irq_dest_mode;
  3252. irte.fields.valid = 1;
  3253. ret = modify_irte(devid, index, irte);
  3254. if (ret)
  3255. return ret;
  3256. /* Setup IOAPIC entry */
  3257. memset(entry, 0, sizeof(*entry));
  3258. entry->vector = index;
  3259. entry->mask = 0;
  3260. entry->trigger = attr->trigger;
  3261. entry->polarity = attr->polarity;
  3262. /*
  3263. * Mask level triggered irqs.
  3264. */
  3265. if (attr->trigger)
  3266. entry->mask = 1;
  3267. return 0;
  3268. }
  3269. static int set_affinity(struct irq_data *data, const struct cpumask *mask,
  3270. bool force)
  3271. {
  3272. struct irq_2_irte *irte_info;
  3273. unsigned int dest, irq;
  3274. struct irq_cfg *cfg;
  3275. union irte irte;
  3276. int err;
  3277. if (!config_enabled(CONFIG_SMP))
  3278. return -1;
  3279. cfg = data->chip_data;
  3280. irq = data->irq;
  3281. irte_info = &cfg->irq_2_irte;
  3282. if (!cpumask_intersects(mask, cpu_online_mask))
  3283. return -EINVAL;
  3284. if (get_irte(irte_info->devid, irte_info->index, &irte))
  3285. return -EBUSY;
  3286. if (assign_irq_vector(irq, cfg, mask))
  3287. return -EBUSY;
  3288. err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
  3289. if (err) {
  3290. if (assign_irq_vector(irq, cfg, data->affinity))
  3291. pr_err("AMD-Vi: Failed to recover vector for irq %d\n", irq);
  3292. return err;
  3293. }
  3294. irte.fields.vector = cfg->vector;
  3295. irte.fields.destination = dest;
  3296. modify_irte(irte_info->devid, irte_info->index, irte);
  3297. if (cfg->move_in_progress)
  3298. send_cleanup_vector(cfg);
  3299. cpumask_copy(data->affinity, mask);
  3300. return 0;
  3301. }
  3302. static int free_irq(int irq)
  3303. {
  3304. struct irq_2_irte *irte_info;
  3305. struct irq_cfg *cfg;
  3306. cfg = irq_get_chip_data(irq);
  3307. if (!cfg)
  3308. return -EINVAL;
  3309. irte_info = &cfg->irq_2_irte;
  3310. free_irte(irte_info->devid, irte_info->index);
  3311. return 0;
  3312. }
  3313. static void compose_msi_msg(struct pci_dev *pdev,
  3314. unsigned int irq, unsigned int dest,
  3315. struct msi_msg *msg, u8 hpet_id)
  3316. {
  3317. struct irq_2_irte *irte_info;
  3318. struct irq_cfg *cfg;
  3319. union irte irte;
  3320. cfg = irq_get_chip_data(irq);
  3321. if (!cfg)
  3322. return;
  3323. irte_info = &cfg->irq_2_irte;
  3324. irte.val = 0;
  3325. irte.fields.vector = cfg->vector;
  3326. irte.fields.int_type = apic->irq_delivery_mode;
  3327. irte.fields.destination = dest;
  3328. irte.fields.dm = apic->irq_dest_mode;
  3329. irte.fields.valid = 1;
  3330. modify_irte(irte_info->devid, irte_info->index, irte);
  3331. msg->address_hi = MSI_ADDR_BASE_HI;
  3332. msg->address_lo = MSI_ADDR_BASE_LO;
  3333. msg->data = irte_info->index;
  3334. }
  3335. static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
  3336. {
  3337. struct irq_cfg *cfg;
  3338. int index;
  3339. u16 devid;
  3340. if (!pdev)
  3341. return -EINVAL;
  3342. cfg = irq_get_chip_data(irq);
  3343. if (!cfg)
  3344. return -EINVAL;
  3345. devid = get_device_id(&pdev->dev);
  3346. index = alloc_irq_index(cfg, devid, nvec);
  3347. return index < 0 ? MAX_IRQS_PER_TABLE : index;
  3348. }
  3349. static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
  3350. int index, int offset)
  3351. {
  3352. struct irq_2_irte *irte_info;
  3353. struct irq_cfg *cfg;
  3354. u16 devid;
  3355. if (!pdev)
  3356. return -EINVAL;
  3357. cfg = irq_get_chip_data(irq);
  3358. if (!cfg)
  3359. return -EINVAL;
  3360. if (index >= MAX_IRQS_PER_TABLE)
  3361. return 0;
  3362. devid = get_device_id(&pdev->dev);
  3363. irte_info = &cfg->irq_2_irte;
  3364. cfg->remapped = 1;
  3365. irte_info->devid = devid;
  3366. irte_info->index = index + offset;
  3367. return 0;
  3368. }
  3369. static int setup_hpet_msi(unsigned int irq, unsigned int id)
  3370. {
  3371. struct irq_2_irte *irte_info;
  3372. struct irq_cfg *cfg;
  3373. int index, devid;
  3374. cfg = irq_get_chip_data(irq);
  3375. if (!cfg)
  3376. return -EINVAL;
  3377. irte_info = &cfg->irq_2_irte;
  3378. devid = get_hpet_devid(id);
  3379. if (devid < 0)
  3380. return devid;
  3381. index = alloc_irq_index(cfg, devid, 1);
  3382. if (index < 0)
  3383. return index;
  3384. cfg->remapped = 1;
  3385. irte_info->devid = devid;
  3386. irte_info->index = index;
  3387. return 0;
  3388. }
  3389. struct irq_remap_ops amd_iommu_irq_ops = {
  3390. .supported = amd_iommu_supported,
  3391. .prepare = amd_iommu_prepare,
  3392. .enable = amd_iommu_enable,
  3393. .disable = amd_iommu_disable,
  3394. .reenable = amd_iommu_reenable,
  3395. .enable_faulting = amd_iommu_enable_faulting,
  3396. .setup_ioapic_entry = setup_ioapic_entry,
  3397. .set_affinity = set_affinity,
  3398. .free_irq = free_irq,
  3399. .compose_msi_msg = compose_msi_msg,
  3400. .msi_alloc_irq = msi_alloc_irq,
  3401. .msi_setup_irq = msi_setup_irq,
  3402. .setup_hpet_msi = setup_hpet_msi,
  3403. };
  3404. #endif