ntb_hw_intel.c 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2012 Intel Corporation. All rights reserved.
  8. * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
  9. * Copyright (C) 2016 T-Platforms. All Rights Reserved.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of version 2 of the GNU General Public License as
  13. * published by the Free Software Foundation.
  14. *
  15. * BSD LICENSE
  16. *
  17. * Copyright(c) 2012 Intel Corporation. All rights reserved.
  18. * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
  19. * Copyright (C) 2016 T-Platforms. All Rights Reserved.
  20. *
  21. * Redistribution and use in source and binary forms, with or without
  22. * modification, are permitted provided that the following conditions
  23. * are met:
  24. *
  25. * * Redistributions of source code must retain the above copyright
  26. * notice, this list of conditions and the following disclaimer.
  27. * * Redistributions in binary form must reproduce the above copy
  28. * notice, this list of conditions and the following disclaimer in
  29. * the documentation and/or other materials provided with the
  30. * distribution.
  31. * * Neither the name of Intel Corporation nor the names of its
  32. * contributors may be used to endorse or promote products derived
  33. * from this software without specific prior written permission.
  34. *
  35. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  36. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  37. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  38. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  39. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  40. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  41. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  42. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  43. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  44. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  45. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  46. *
  47. * Intel PCIe NTB Linux driver
  48. *
  49. * Contact Information:
  50. * Jon Mason <jon.mason@intel.com>
  51. */
  52. #include <linux/debugfs.h>
  53. #include <linux/delay.h>
  54. #include <linux/init.h>
  55. #include <linux/interrupt.h>
  56. #include <linux/module.h>
  57. #include <linux/pci.h>
  58. #include <linux/random.h>
  59. #include <linux/slab.h>
  60. #include <linux/ntb.h>
  61. #include "ntb_hw_intel.h"
  62. #define NTB_NAME "ntb_hw_intel"
  63. #define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver"
  64. #define NTB_VER "2.0"
  65. MODULE_DESCRIPTION(NTB_DESC);
  66. MODULE_VERSION(NTB_VER);
  67. MODULE_LICENSE("Dual BSD/GPL");
  68. MODULE_AUTHOR("Intel Corporation");
  69. #define bar0_off(base, bar) ((base) + ((bar) << 2))
  70. #define bar2_off(base, bar) bar0_off(base, (bar) - 2)
  71. static const struct intel_ntb_reg xeon_reg;
  72. static const struct intel_ntb_alt_reg xeon_pri_reg;
  73. static const struct intel_ntb_alt_reg xeon_sec_reg;
  74. static const struct intel_ntb_alt_reg xeon_b2b_reg;
  75. static const struct intel_ntb_xlat_reg xeon_pri_xlat;
  76. static const struct intel_ntb_xlat_reg xeon_sec_xlat;
  77. static struct intel_b2b_addr xeon_b2b_usd_addr;
  78. static struct intel_b2b_addr xeon_b2b_dsd_addr;
  79. static const struct intel_ntb_reg skx_reg;
  80. static const struct intel_ntb_alt_reg skx_pri_reg;
  81. static const struct intel_ntb_alt_reg skx_b2b_reg;
  82. static const struct intel_ntb_xlat_reg skx_sec_xlat;
  83. static const struct ntb_dev_ops intel_ntb_ops;
  84. static const struct ntb_dev_ops intel_ntb3_ops;
  85. static const struct file_operations intel_ntb_debugfs_info;
  86. static struct dentry *debugfs_dir;
  87. static int b2b_mw_idx = -1;
  88. module_param(b2b_mw_idx, int, 0644);
  89. MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb. A "
  90. "value of zero or positive starts from first mw idx, and a "
  91. "negative value starts from last mw idx. Both sides MUST "
  92. "set the same value here!");
  93. static unsigned int b2b_mw_share;
  94. module_param(b2b_mw_share, uint, 0644);
  95. MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
  96. "ntb so that the peer ntb only occupies the first half of "
  97. "the mw, so the second half can still be used as a mw. Both "
  98. "sides MUST set the same value here!");
  99. module_param_named(xeon_b2b_usd_bar2_addr64,
  100. xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
  101. MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
  102. "XEON B2B USD BAR 2 64-bit address");
  103. module_param_named(xeon_b2b_usd_bar4_addr64,
  104. xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
  105. MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
  106. "XEON B2B USD BAR 4 64-bit address");
  107. module_param_named(xeon_b2b_usd_bar4_addr32,
  108. xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
  109. MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
  110. "XEON B2B USD split-BAR 4 32-bit address");
  111. module_param_named(xeon_b2b_usd_bar5_addr32,
  112. xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
  113. MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
  114. "XEON B2B USD split-BAR 5 32-bit address");
  115. module_param_named(xeon_b2b_dsd_bar2_addr64,
  116. xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
  117. MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
  118. "XEON B2B DSD BAR 2 64-bit address");
  119. module_param_named(xeon_b2b_dsd_bar4_addr64,
  120. xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
  121. MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
  122. "XEON B2B DSD BAR 4 64-bit address");
  123. module_param_named(xeon_b2b_dsd_bar4_addr32,
  124. xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
  125. MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
  126. "XEON B2B DSD split-BAR 4 32-bit address");
  127. module_param_named(xeon_b2b_dsd_bar5_addr32,
  128. xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
  129. MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
  130. "XEON B2B DSD split-BAR 5 32-bit address");
  131. static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
  132. static int xeon_init_isr(struct intel_ntb_dev *ndev);
  133. #ifndef ioread64
  134. #ifdef readq
  135. #define ioread64 readq
  136. #else
  137. #define ioread64 _ioread64
  138. static inline u64 _ioread64(void __iomem *mmio)
  139. {
  140. u64 low, high;
  141. low = ioread32(mmio);
  142. high = ioread32(mmio + sizeof(u32));
  143. return low | (high << 32);
  144. }
  145. #endif
  146. #endif
  147. #ifndef iowrite64
  148. #ifdef writeq
  149. #define iowrite64 writeq
  150. #else
  151. #define iowrite64 _iowrite64
  152. static inline void _iowrite64(u64 val, void __iomem *mmio)
  153. {
  154. iowrite32(val, mmio);
  155. iowrite32(val >> 32, mmio + sizeof(u32));
  156. }
  157. #endif
  158. #endif
  159. static inline int pdev_is_xeon(struct pci_dev *pdev)
  160. {
  161. switch (pdev->device) {
  162. case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
  163. case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
  164. case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
  165. case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
  166. case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
  167. case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
  168. case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
  169. case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
  170. case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
  171. case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
  172. case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
  173. case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
  174. case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
  175. case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
  176. case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
  177. return 1;
  178. }
  179. return 0;
  180. }
  181. static inline int pdev_is_skx_xeon(struct pci_dev *pdev)
  182. {
  183. if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)
  184. return 1;
  185. return 0;
  186. }
  187. static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
  188. {
  189. ndev->unsafe_flags = 0;
  190. ndev->unsafe_flags_ignore = 0;
  191. /* Only B2B has a workaround to avoid SDOORBELL */
  192. if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
  193. if (!ntb_topo_is_b2b(ndev->ntb.topo))
  194. ndev->unsafe_flags |= NTB_UNSAFE_DB;
  195. /* No low level workaround to avoid SB01BASE */
  196. if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
  197. ndev->unsafe_flags |= NTB_UNSAFE_DB;
  198. ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
  199. }
  200. }
  201. static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
  202. unsigned long flag)
  203. {
  204. return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
  205. }
  206. static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
  207. unsigned long flag)
  208. {
  209. flag &= ndev->unsafe_flags;
  210. ndev->unsafe_flags_ignore |= flag;
  211. return !!flag;
  212. }
  213. static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
  214. {
  215. if (idx < 0 || idx >= ndev->mw_count)
  216. return -EINVAL;
  217. return ndev->reg->mw_bar[idx];
  218. }
  219. static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
  220. phys_addr_t *db_addr, resource_size_t *db_size,
  221. phys_addr_t reg_addr, unsigned long reg)
  222. {
  223. if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
  224. pr_warn_once("%s: NTB unsafe doorbell access", __func__);
  225. if (db_addr) {
  226. *db_addr = reg_addr + reg;
  227. dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx\n", *db_addr);
  228. }
  229. if (db_size) {
  230. *db_size = ndev->reg->db_size;
  231. dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size);
  232. }
  233. return 0;
  234. }
  235. static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
  236. void __iomem *mmio)
  237. {
  238. if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
  239. pr_warn_once("%s: NTB unsafe doorbell access", __func__);
  240. return ndev->reg->db_ioread(mmio);
  241. }
  242. static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
  243. void __iomem *mmio)
  244. {
  245. if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
  246. pr_warn_once("%s: NTB unsafe doorbell access", __func__);
  247. if (db_bits & ~ndev->db_valid_mask)
  248. return -EINVAL;
  249. ndev->reg->db_iowrite(db_bits, mmio);
  250. return 0;
  251. }
  252. static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
  253. void __iomem *mmio)
  254. {
  255. unsigned long irqflags;
  256. if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
  257. pr_warn_once("%s: NTB unsafe doorbell access", __func__);
  258. if (db_bits & ~ndev->db_valid_mask)
  259. return -EINVAL;
  260. spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
  261. {
  262. ndev->db_mask |= db_bits;
  263. ndev->reg->db_iowrite(ndev->db_mask, mmio);
  264. }
  265. spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
  266. return 0;
  267. }
  268. static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
  269. void __iomem *mmio)
  270. {
  271. unsigned long irqflags;
  272. if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
  273. pr_warn_once("%s: NTB unsafe doorbell access", __func__);
  274. if (db_bits & ~ndev->db_valid_mask)
  275. return -EINVAL;
  276. spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
  277. {
  278. ndev->db_mask &= ~db_bits;
  279. ndev->reg->db_iowrite(ndev->db_mask, mmio);
  280. }
  281. spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
  282. return 0;
  283. }
  284. static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
  285. {
  286. u64 shift, mask;
  287. shift = ndev->db_vec_shift;
  288. mask = BIT_ULL(shift) - 1;
  289. return mask << (shift * db_vector);
  290. }
  291. static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
  292. phys_addr_t *spad_addr, phys_addr_t reg_addr,
  293. unsigned long reg)
  294. {
  295. if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
  296. pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
  297. if (idx < 0 || idx >= ndev->spad_count)
  298. return -EINVAL;
  299. if (spad_addr) {
  300. *spad_addr = reg_addr + reg + (idx << 2);
  301. dev_dbg(&ndev->ntb.pdev->dev, "Peer spad addr %llx\n",
  302. *spad_addr);
  303. }
  304. return 0;
  305. }
  306. static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
  307. void __iomem *mmio)
  308. {
  309. if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
  310. pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
  311. if (idx < 0 || idx >= ndev->spad_count)
  312. return 0;
  313. return ioread32(mmio + (idx << 2));
  314. }
  315. static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
  316. void __iomem *mmio)
  317. {
  318. if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
  319. pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
  320. if (idx < 0 || idx >= ndev->spad_count)
  321. return -EINVAL;
  322. iowrite32(val, mmio + (idx << 2));
  323. return 0;
  324. }
  325. static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
  326. {
  327. u64 vec_mask;
  328. vec_mask = ndev_vec_mask(ndev, vec);
  329. if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
  330. vec_mask |= ndev->db_link_mask;
  331. dev_dbg(&ndev->ntb.pdev->dev, "vec %d vec_mask %llx\n", vec, vec_mask);
  332. ndev->last_ts = jiffies;
  333. if (vec_mask & ndev->db_link_mask) {
  334. if (ndev->reg->poll_link(ndev))
  335. ntb_link_event(&ndev->ntb);
  336. }
  337. if (vec_mask & ndev->db_valid_mask)
  338. ntb_db_event(&ndev->ntb, vec);
  339. return IRQ_HANDLED;
  340. }
  341. static irqreturn_t ndev_vec_isr(int irq, void *dev)
  342. {
  343. struct intel_ntb_vec *nvec = dev;
  344. dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d nvec->num: %d\n",
  345. irq, nvec->num);
  346. return ndev_interrupt(nvec->ndev, nvec->num);
  347. }
  348. static irqreturn_t ndev_irq_isr(int irq, void *dev)
  349. {
  350. struct intel_ntb_dev *ndev = dev;
  351. return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
  352. }
  353. static int ndev_init_isr(struct intel_ntb_dev *ndev,
  354. int msix_min, int msix_max,
  355. int msix_shift, int total_shift)
  356. {
  357. struct pci_dev *pdev;
  358. int rc, i, msix_count, node;
  359. pdev = ndev->ntb.pdev;
  360. node = dev_to_node(&pdev->dev);
  361. /* Mask all doorbell interrupts */
  362. ndev->db_mask = ndev->db_valid_mask;
  363. ndev->reg->db_iowrite(ndev->db_mask,
  364. ndev->self_mmio +
  365. ndev->self_reg->db_mask);
  366. /* Try to set up msix irq */
  367. ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
  368. GFP_KERNEL, node);
  369. if (!ndev->vec)
  370. goto err_msix_vec_alloc;
  371. ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
  372. GFP_KERNEL, node);
  373. if (!ndev->msix)
  374. goto err_msix_alloc;
  375. for (i = 0; i < msix_max; ++i)
  376. ndev->msix[i].entry = i;
  377. msix_count = pci_enable_msix_range(pdev, ndev->msix,
  378. msix_min, msix_max);
  379. if (msix_count < 0)
  380. goto err_msix_enable;
  381. for (i = 0; i < msix_count; ++i) {
  382. ndev->vec[i].ndev = ndev;
  383. ndev->vec[i].num = i;
  384. rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
  385. "ndev_vec_isr", &ndev->vec[i]);
  386. if (rc)
  387. goto err_msix_request;
  388. }
  389. dev_dbg(&pdev->dev, "Using %d msix interrupts\n", msix_count);
  390. ndev->db_vec_count = msix_count;
  391. ndev->db_vec_shift = msix_shift;
  392. return 0;
  393. err_msix_request:
  394. while (i-- > 0)
  395. free_irq(ndev->msix[i].vector, &ndev->vec[i]);
  396. pci_disable_msix(pdev);
  397. err_msix_enable:
  398. kfree(ndev->msix);
  399. err_msix_alloc:
  400. kfree(ndev->vec);
  401. err_msix_vec_alloc:
  402. ndev->msix = NULL;
  403. ndev->vec = NULL;
  404. /* Try to set up msi irq */
  405. rc = pci_enable_msi(pdev);
  406. if (rc)
  407. goto err_msi_enable;
  408. rc = request_irq(pdev->irq, ndev_irq_isr, 0,
  409. "ndev_irq_isr", ndev);
  410. if (rc)
  411. goto err_msi_request;
  412. dev_dbg(&pdev->dev, "Using msi interrupts\n");
  413. ndev->db_vec_count = 1;
  414. ndev->db_vec_shift = total_shift;
  415. return 0;
  416. err_msi_request:
  417. pci_disable_msi(pdev);
  418. err_msi_enable:
  419. /* Try to set up intx irq */
  420. pci_intx(pdev, 1);
  421. rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
  422. "ndev_irq_isr", ndev);
  423. if (rc)
  424. goto err_intx_request;
  425. dev_dbg(&pdev->dev, "Using intx interrupts\n");
  426. ndev->db_vec_count = 1;
  427. ndev->db_vec_shift = total_shift;
  428. return 0;
  429. err_intx_request:
  430. return rc;
  431. }
  432. static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
  433. {
  434. struct pci_dev *pdev;
  435. int i;
  436. pdev = ndev->ntb.pdev;
  437. /* Mask all doorbell interrupts */
  438. ndev->db_mask = ndev->db_valid_mask;
  439. ndev->reg->db_iowrite(ndev->db_mask,
  440. ndev->self_mmio +
  441. ndev->self_reg->db_mask);
  442. if (ndev->msix) {
  443. i = ndev->db_vec_count;
  444. while (i--)
  445. free_irq(ndev->msix[i].vector, &ndev->vec[i]);
  446. pci_disable_msix(pdev);
  447. kfree(ndev->msix);
  448. kfree(ndev->vec);
  449. } else {
  450. free_irq(pdev->irq, ndev);
  451. if (pci_dev_msi_enabled(pdev))
  452. pci_disable_msi(pdev);
  453. }
  454. }
  455. static ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
  456. size_t count, loff_t *offp)
  457. {
  458. struct intel_ntb_dev *ndev;
  459. void __iomem *mmio;
  460. char *buf;
  461. size_t buf_size;
  462. ssize_t ret, off;
  463. union { u64 v64; u32 v32; u16 v16; } u;
  464. ndev = filp->private_data;
  465. mmio = ndev->self_mmio;
  466. buf_size = min(count, 0x800ul);
  467. buf = kmalloc(buf_size, GFP_KERNEL);
  468. if (!buf)
  469. return -ENOMEM;
  470. off = 0;
  471. off += scnprintf(buf + off, buf_size - off,
  472. "NTB Device Information:\n");
  473. off += scnprintf(buf + off, buf_size - off,
  474. "Connection Topology -\t%s\n",
  475. ntb_topo_string(ndev->ntb.topo));
  476. off += scnprintf(buf + off, buf_size - off,
  477. "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
  478. off += scnprintf(buf + off, buf_size - off,
  479. "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
  480. if (!ndev->reg->link_is_up(ndev))
  481. off += scnprintf(buf + off, buf_size - off,
  482. "Link Status -\t\tDown\n");
  483. else {
  484. off += scnprintf(buf + off, buf_size - off,
  485. "Link Status -\t\tUp\n");
  486. off += scnprintf(buf + off, buf_size - off,
  487. "Link Speed -\t\tPCI-E Gen %u\n",
  488. NTB_LNK_STA_SPEED(ndev->lnk_sta));
  489. off += scnprintf(buf + off, buf_size - off,
  490. "Link Width -\t\tx%u\n",
  491. NTB_LNK_STA_WIDTH(ndev->lnk_sta));
  492. }
  493. off += scnprintf(buf + off, buf_size - off,
  494. "Memory Window Count -\t%u\n", ndev->mw_count);
  495. off += scnprintf(buf + off, buf_size - off,
  496. "Scratchpad Count -\t%u\n", ndev->spad_count);
  497. off += scnprintf(buf + off, buf_size - off,
  498. "Doorbell Count -\t%u\n", ndev->db_count);
  499. off += scnprintf(buf + off, buf_size - off,
  500. "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
  501. off += scnprintf(buf + off, buf_size - off,
  502. "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
  503. off += scnprintf(buf + off, buf_size - off,
  504. "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
  505. off += scnprintf(buf + off, buf_size - off,
  506. "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
  507. off += scnprintf(buf + off, buf_size - off,
  508. "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
  509. u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
  510. off += scnprintf(buf + off, buf_size - off,
  511. "Doorbell Mask -\t\t%#llx\n", u.v64);
  512. u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
  513. off += scnprintf(buf + off, buf_size - off,
  514. "Doorbell Bell -\t\t%#llx\n", u.v64);
  515. off += scnprintf(buf + off, buf_size - off,
  516. "\nNTB Incoming XLAT:\n");
  517. u.v64 = ioread64(mmio + SKX_IMBAR1XBASE_OFFSET);
  518. off += scnprintf(buf + off, buf_size - off,
  519. "IMBAR1XBASE -\t\t%#018llx\n", u.v64);
  520. u.v64 = ioread64(mmio + SKX_IMBAR2XBASE_OFFSET);
  521. off += scnprintf(buf + off, buf_size - off,
  522. "IMBAR2XBASE -\t\t%#018llx\n", u.v64);
  523. u.v64 = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
  524. off += scnprintf(buf + off, buf_size - off,
  525. "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64);
  526. u.v64 = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
  527. off += scnprintf(buf + off, buf_size - off,
  528. "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64);
  529. if (ntb_topo_is_b2b(ndev->ntb.topo)) {
  530. off += scnprintf(buf + off, buf_size - off,
  531. "\nNTB Outgoing B2B XLAT:\n");
  532. u.v64 = ioread64(mmio + SKX_EMBAR1XBASE_OFFSET);
  533. off += scnprintf(buf + off, buf_size - off,
  534. "EMBAR1XBASE -\t\t%#018llx\n", u.v64);
  535. u.v64 = ioread64(mmio + SKX_EMBAR2XBASE_OFFSET);
  536. off += scnprintf(buf + off, buf_size - off,
  537. "EMBAR2XBASE -\t\t%#018llx\n", u.v64);
  538. u.v64 = ioread64(mmio + SKX_EMBAR1XLMT_OFFSET);
  539. off += scnprintf(buf + off, buf_size - off,
  540. "EMBAR1XLMT -\t\t%#018llx\n", u.v64);
  541. u.v64 = ioread64(mmio + SKX_EMBAR2XLMT_OFFSET);
  542. off += scnprintf(buf + off, buf_size - off,
  543. "EMBAR2XLMT -\t\t%#018llx\n", u.v64);
  544. off += scnprintf(buf + off, buf_size - off,
  545. "\nNTB Secondary BAR:\n");
  546. u.v64 = ioread64(mmio + SKX_EMBAR0_OFFSET);
  547. off += scnprintf(buf + off, buf_size - off,
  548. "EMBAR0 -\t\t%#018llx\n", u.v64);
  549. u.v64 = ioread64(mmio + SKX_EMBAR1_OFFSET);
  550. off += scnprintf(buf + off, buf_size - off,
  551. "EMBAR1 -\t\t%#018llx\n", u.v64);
  552. u.v64 = ioread64(mmio + SKX_EMBAR2_OFFSET);
  553. off += scnprintf(buf + off, buf_size - off,
  554. "EMBAR2 -\t\t%#018llx\n", u.v64);
  555. }
  556. off += scnprintf(buf + off, buf_size - off,
  557. "\nNTB Statistics:\n");
  558. u.v16 = ioread16(mmio + SKX_USMEMMISS_OFFSET);
  559. off += scnprintf(buf + off, buf_size - off,
  560. "Upstream Memory Miss -\t%u\n", u.v16);
  561. off += scnprintf(buf + off, buf_size - off,
  562. "\nNTB Hardware Errors:\n");
  563. if (!pci_read_config_word(ndev->ntb.pdev,
  564. SKX_DEVSTS_OFFSET, &u.v16))
  565. off += scnprintf(buf + off, buf_size - off,
  566. "DEVSTS -\t\t%#06x\n", u.v16);
  567. if (!pci_read_config_word(ndev->ntb.pdev,
  568. SKX_LINK_STATUS_OFFSET, &u.v16))
  569. off += scnprintf(buf + off, buf_size - off,
  570. "LNKSTS -\t\t%#06x\n", u.v16);
  571. if (!pci_read_config_dword(ndev->ntb.pdev,
  572. SKX_UNCERRSTS_OFFSET, &u.v32))
  573. off += scnprintf(buf + off, buf_size - off,
  574. "UNCERRSTS -\t\t%#06x\n", u.v32);
  575. if (!pci_read_config_dword(ndev->ntb.pdev,
  576. SKX_CORERRSTS_OFFSET, &u.v32))
  577. off += scnprintf(buf + off, buf_size - off,
  578. "CORERRSTS -\t\t%#06x\n", u.v32);
  579. ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
  580. kfree(buf);
  581. return ret;
  582. }
  583. static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
  584. size_t count, loff_t *offp)
  585. {
  586. struct intel_ntb_dev *ndev;
  587. struct pci_dev *pdev;
  588. void __iomem *mmio;
  589. char *buf;
  590. size_t buf_size;
  591. ssize_t ret, off;
  592. union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
  593. ndev = filp->private_data;
  594. pdev = ndev->ntb.pdev;
  595. mmio = ndev->self_mmio;
  596. buf_size = min(count, 0x800ul);
  597. buf = kmalloc(buf_size, GFP_KERNEL);
  598. if (!buf)
  599. return -ENOMEM;
  600. off = 0;
  601. off += scnprintf(buf + off, buf_size - off,
  602. "NTB Device Information:\n");
  603. off += scnprintf(buf + off, buf_size - off,
  604. "Connection Topology -\t%s\n",
  605. ntb_topo_string(ndev->ntb.topo));
  606. if (ndev->b2b_idx != UINT_MAX) {
  607. off += scnprintf(buf + off, buf_size - off,
  608. "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
  609. off += scnprintf(buf + off, buf_size - off,
  610. "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
  611. }
  612. off += scnprintf(buf + off, buf_size - off,
  613. "BAR4 Split -\t\t%s\n",
  614. ndev->bar4_split ? "yes" : "no");
  615. off += scnprintf(buf + off, buf_size - off,
  616. "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
  617. off += scnprintf(buf + off, buf_size - off,
  618. "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
  619. if (!ndev->reg->link_is_up(ndev)) {
  620. off += scnprintf(buf + off, buf_size - off,
  621. "Link Status -\t\tDown\n");
  622. } else {
  623. off += scnprintf(buf + off, buf_size - off,
  624. "Link Status -\t\tUp\n");
  625. off += scnprintf(buf + off, buf_size - off,
  626. "Link Speed -\t\tPCI-E Gen %u\n",
  627. NTB_LNK_STA_SPEED(ndev->lnk_sta));
  628. off += scnprintf(buf + off, buf_size - off,
  629. "Link Width -\t\tx%u\n",
  630. NTB_LNK_STA_WIDTH(ndev->lnk_sta));
  631. }
  632. off += scnprintf(buf + off, buf_size - off,
  633. "Memory Window Count -\t%u\n", ndev->mw_count);
  634. off += scnprintf(buf + off, buf_size - off,
  635. "Scratchpad Count -\t%u\n", ndev->spad_count);
  636. off += scnprintf(buf + off, buf_size - off,
  637. "Doorbell Count -\t%u\n", ndev->db_count);
  638. off += scnprintf(buf + off, buf_size - off,
  639. "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
  640. off += scnprintf(buf + off, buf_size - off,
  641. "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
  642. off += scnprintf(buf + off, buf_size - off,
  643. "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
  644. off += scnprintf(buf + off, buf_size - off,
  645. "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
  646. off += scnprintf(buf + off, buf_size - off,
  647. "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
  648. u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
  649. off += scnprintf(buf + off, buf_size - off,
  650. "Doorbell Mask -\t\t%#llx\n", u.v64);
  651. u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
  652. off += scnprintf(buf + off, buf_size - off,
  653. "Doorbell Bell -\t\t%#llx\n", u.v64);
  654. off += scnprintf(buf + off, buf_size - off,
  655. "\nNTB Window Size:\n");
  656. pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
  657. off += scnprintf(buf + off, buf_size - off,
  658. "PBAR23SZ %hhu\n", u.v8);
  659. if (!ndev->bar4_split) {
  660. pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
  661. off += scnprintf(buf + off, buf_size - off,
  662. "PBAR45SZ %hhu\n", u.v8);
  663. } else {
  664. pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
  665. off += scnprintf(buf + off, buf_size - off,
  666. "PBAR4SZ %hhu\n", u.v8);
  667. pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
  668. off += scnprintf(buf + off, buf_size - off,
  669. "PBAR5SZ %hhu\n", u.v8);
  670. }
  671. pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
  672. off += scnprintf(buf + off, buf_size - off,
  673. "SBAR23SZ %hhu\n", u.v8);
  674. if (!ndev->bar4_split) {
  675. pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
  676. off += scnprintf(buf + off, buf_size - off,
  677. "SBAR45SZ %hhu\n", u.v8);
  678. } else {
  679. pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
  680. off += scnprintf(buf + off, buf_size - off,
  681. "SBAR4SZ %hhu\n", u.v8);
  682. pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
  683. off += scnprintf(buf + off, buf_size - off,
  684. "SBAR5SZ %hhu\n", u.v8);
  685. }
  686. off += scnprintf(buf + off, buf_size - off,
  687. "\nNTB Incoming XLAT:\n");
  688. u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
  689. off += scnprintf(buf + off, buf_size - off,
  690. "XLAT23 -\t\t%#018llx\n", u.v64);
  691. if (ndev->bar4_split) {
  692. u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
  693. off += scnprintf(buf + off, buf_size - off,
  694. "XLAT4 -\t\t\t%#06x\n", u.v32);
  695. u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
  696. off += scnprintf(buf + off, buf_size - off,
  697. "XLAT5 -\t\t\t%#06x\n", u.v32);
  698. } else {
  699. u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
  700. off += scnprintf(buf + off, buf_size - off,
  701. "XLAT45 -\t\t%#018llx\n", u.v64);
  702. }
  703. u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
  704. off += scnprintf(buf + off, buf_size - off,
  705. "LMT23 -\t\t\t%#018llx\n", u.v64);
  706. if (ndev->bar4_split) {
  707. u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
  708. off += scnprintf(buf + off, buf_size - off,
  709. "LMT4 -\t\t\t%#06x\n", u.v32);
  710. u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
  711. off += scnprintf(buf + off, buf_size - off,
  712. "LMT5 -\t\t\t%#06x\n", u.v32);
  713. } else {
  714. u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
  715. off += scnprintf(buf + off, buf_size - off,
  716. "LMT45 -\t\t\t%#018llx\n", u.v64);
  717. }
  718. if (pdev_is_xeon(pdev)) {
  719. if (ntb_topo_is_b2b(ndev->ntb.topo)) {
  720. off += scnprintf(buf + off, buf_size - off,
  721. "\nNTB Outgoing B2B XLAT:\n");
  722. u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
  723. off += scnprintf(buf + off, buf_size - off,
  724. "B2B XLAT23 -\t\t%#018llx\n", u.v64);
  725. if (ndev->bar4_split) {
  726. u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
  727. off += scnprintf(buf + off, buf_size - off,
  728. "B2B XLAT4 -\t\t%#06x\n",
  729. u.v32);
  730. u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
  731. off += scnprintf(buf + off, buf_size - off,
  732. "B2B XLAT5 -\t\t%#06x\n",
  733. u.v32);
  734. } else {
  735. u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
  736. off += scnprintf(buf + off, buf_size - off,
  737. "B2B XLAT45 -\t\t%#018llx\n",
  738. u.v64);
  739. }
  740. u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
  741. off += scnprintf(buf + off, buf_size - off,
  742. "B2B LMT23 -\t\t%#018llx\n", u.v64);
  743. if (ndev->bar4_split) {
  744. u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
  745. off += scnprintf(buf + off, buf_size - off,
  746. "B2B LMT4 -\t\t%#06x\n",
  747. u.v32);
  748. u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
  749. off += scnprintf(buf + off, buf_size - off,
  750. "B2B LMT5 -\t\t%#06x\n",
  751. u.v32);
  752. } else {
  753. u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
  754. off += scnprintf(buf + off, buf_size - off,
  755. "B2B LMT45 -\t\t%#018llx\n",
  756. u.v64);
  757. }
  758. off += scnprintf(buf + off, buf_size - off,
  759. "\nNTB Secondary BAR:\n");
  760. u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
  761. off += scnprintf(buf + off, buf_size - off,
  762. "SBAR01 -\t\t%#018llx\n", u.v64);
  763. u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
  764. off += scnprintf(buf + off, buf_size - off,
  765. "SBAR23 -\t\t%#018llx\n", u.v64);
  766. if (ndev->bar4_split) {
  767. u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
  768. off += scnprintf(buf + off, buf_size - off,
  769. "SBAR4 -\t\t\t%#06x\n", u.v32);
  770. u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
  771. off += scnprintf(buf + off, buf_size - off,
  772. "SBAR5 -\t\t\t%#06x\n", u.v32);
  773. } else {
  774. u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
  775. off += scnprintf(buf + off, buf_size - off,
  776. "SBAR45 -\t\t%#018llx\n",
  777. u.v64);
  778. }
  779. }
  780. off += scnprintf(buf + off, buf_size - off,
  781. "\nXEON NTB Statistics:\n");
  782. u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
  783. off += scnprintf(buf + off, buf_size - off,
  784. "Upstream Memory Miss -\t%u\n", u.v16);
  785. off += scnprintf(buf + off, buf_size - off,
  786. "\nXEON NTB Hardware Errors:\n");
  787. if (!pci_read_config_word(pdev,
  788. XEON_DEVSTS_OFFSET, &u.v16))
  789. off += scnprintf(buf + off, buf_size - off,
  790. "DEVSTS -\t\t%#06x\n", u.v16);
  791. if (!pci_read_config_word(pdev,
  792. XEON_LINK_STATUS_OFFSET, &u.v16))
  793. off += scnprintf(buf + off, buf_size - off,
  794. "LNKSTS -\t\t%#06x\n", u.v16);
  795. if (!pci_read_config_dword(pdev,
  796. XEON_UNCERRSTS_OFFSET, &u.v32))
  797. off += scnprintf(buf + off, buf_size - off,
  798. "UNCERRSTS -\t\t%#06x\n", u.v32);
  799. if (!pci_read_config_dword(pdev,
  800. XEON_CORERRSTS_OFFSET, &u.v32))
  801. off += scnprintf(buf + off, buf_size - off,
  802. "CORERRSTS -\t\t%#06x\n", u.v32);
  803. }
  804. ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
  805. kfree(buf);
  806. return ret;
  807. }
  808. static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
  809. size_t count, loff_t *offp)
  810. {
  811. struct intel_ntb_dev *ndev = filp->private_data;
  812. if (pdev_is_xeon(ndev->ntb.pdev))
  813. return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
  814. else if (pdev_is_skx_xeon(ndev->ntb.pdev))
  815. return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
  816. return -ENXIO;
  817. }
  818. static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
  819. {
  820. if (!debugfs_dir) {
  821. ndev->debugfs_dir = NULL;
  822. ndev->debugfs_info = NULL;
  823. } else {
  824. ndev->debugfs_dir =
  825. debugfs_create_dir(pci_name(ndev->ntb.pdev),
  826. debugfs_dir);
  827. if (!ndev->debugfs_dir)
  828. ndev->debugfs_info = NULL;
  829. else
  830. ndev->debugfs_info =
  831. debugfs_create_file("info", S_IRUSR,
  832. ndev->debugfs_dir, ndev,
  833. &intel_ntb_debugfs_info);
  834. }
  835. }
  836. static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
  837. {
  838. debugfs_remove_recursive(ndev->debugfs_dir);
  839. }
  840. static int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx)
  841. {
  842. if (pidx != NTB_DEF_PEER_IDX)
  843. return -EINVAL;
  844. return ntb_ndev(ntb)->mw_count;
  845. }
  846. static int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
  847. resource_size_t *addr_align,
  848. resource_size_t *size_align,
  849. resource_size_t *size_max)
  850. {
  851. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  852. resource_size_t bar_size, mw_size;
  853. int bar;
  854. if (pidx != NTB_DEF_PEER_IDX)
  855. return -EINVAL;
  856. if (idx >= ndev->b2b_idx && !ndev->b2b_off)
  857. idx += 1;
  858. bar = ndev_mw_to_bar(ndev, idx);
  859. if (bar < 0)
  860. return bar;
  861. bar_size = pci_resource_len(ndev->ntb.pdev, bar);
  862. if (idx == ndev->b2b_idx)
  863. mw_size = bar_size - ndev->b2b_off;
  864. else
  865. mw_size = bar_size;
  866. if (addr_align)
  867. *addr_align = pci_resource_len(ndev->ntb.pdev, bar);
  868. if (size_align)
  869. *size_align = 1;
  870. if (size_max)
  871. *size_max = mw_size;
  872. return 0;
  873. }
  874. static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
  875. dma_addr_t addr, resource_size_t size)
  876. {
  877. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  878. unsigned long base_reg, xlat_reg, limit_reg;
  879. resource_size_t bar_size, mw_size;
  880. void __iomem *mmio;
  881. u64 base, limit, reg_val;
  882. int bar;
  883. if (pidx != NTB_DEF_PEER_IDX)
  884. return -EINVAL;
  885. if (idx >= ndev->b2b_idx && !ndev->b2b_off)
  886. idx += 1;
  887. bar = ndev_mw_to_bar(ndev, idx);
  888. if (bar < 0)
  889. return bar;
  890. bar_size = pci_resource_len(ndev->ntb.pdev, bar);
  891. if (idx == ndev->b2b_idx)
  892. mw_size = bar_size - ndev->b2b_off;
  893. else
  894. mw_size = bar_size;
  895. /* hardware requires that addr is aligned to bar size */
  896. if (addr & (bar_size - 1))
  897. return -EINVAL;
  898. /* make sure the range fits in the usable mw size */
  899. if (size > mw_size)
  900. return -EINVAL;
  901. mmio = ndev->self_mmio;
  902. base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
  903. xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
  904. limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
  905. if (bar < 4 || !ndev->bar4_split) {
  906. base = ioread64(mmio + base_reg) & NTB_BAR_MASK_64;
  907. /* Set the limit if supported, if size is not mw_size */
  908. if (limit_reg && size != mw_size)
  909. limit = base + size;
  910. else
  911. limit = 0;
  912. /* set and verify setting the translation address */
  913. iowrite64(addr, mmio + xlat_reg);
  914. reg_val = ioread64(mmio + xlat_reg);
  915. if (reg_val != addr) {
  916. iowrite64(0, mmio + xlat_reg);
  917. return -EIO;
  918. }
  919. /* set and verify setting the limit */
  920. iowrite64(limit, mmio + limit_reg);
  921. reg_val = ioread64(mmio + limit_reg);
  922. if (reg_val != limit) {
  923. iowrite64(base, mmio + limit_reg);
  924. iowrite64(0, mmio + xlat_reg);
  925. return -EIO;
  926. }
  927. } else {
  928. /* split bar addr range must all be 32 bit */
  929. if (addr & (~0ull << 32))
  930. return -EINVAL;
  931. if ((addr + size) & (~0ull << 32))
  932. return -EINVAL;
  933. base = ioread32(mmio + base_reg) & NTB_BAR_MASK_32;
  934. /* Set the limit if supported, if size is not mw_size */
  935. if (limit_reg && size != mw_size)
  936. limit = base + size;
  937. else
  938. limit = 0;
  939. /* set and verify setting the translation address */
  940. iowrite32(addr, mmio + xlat_reg);
  941. reg_val = ioread32(mmio + xlat_reg);
  942. if (reg_val != addr) {
  943. iowrite32(0, mmio + xlat_reg);
  944. return -EIO;
  945. }
  946. /* set and verify setting the limit */
  947. iowrite32(limit, mmio + limit_reg);
  948. reg_val = ioread32(mmio + limit_reg);
  949. if (reg_val != limit) {
  950. iowrite32(base, mmio + limit_reg);
  951. iowrite32(0, mmio + xlat_reg);
  952. return -EIO;
  953. }
  954. }
  955. return 0;
  956. }
  957. static u64 intel_ntb_link_is_up(struct ntb_dev *ntb,
  958. enum ntb_speed *speed,
  959. enum ntb_width *width)
  960. {
  961. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  962. if (ndev->reg->link_is_up(ndev)) {
  963. if (speed)
  964. *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
  965. if (width)
  966. *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
  967. return 1;
  968. } else {
  969. /* TODO MAYBE: is it possible to observe the link speed and
  970. * width while link is training? */
  971. if (speed)
  972. *speed = NTB_SPEED_NONE;
  973. if (width)
  974. *width = NTB_WIDTH_NONE;
  975. return 0;
  976. }
  977. }
  978. static int intel_ntb_link_enable(struct ntb_dev *ntb,
  979. enum ntb_speed max_speed,
  980. enum ntb_width max_width)
  981. {
  982. struct intel_ntb_dev *ndev;
  983. u32 ntb_ctl;
  984. ndev = container_of(ntb, struct intel_ntb_dev, ntb);
  985. if (ndev->ntb.topo == NTB_TOPO_SEC)
  986. return -EINVAL;
  987. dev_dbg(&ntb->pdev->dev,
  988. "Enabling link with max_speed %d max_width %d\n",
  989. max_speed, max_width);
  990. if (max_speed != NTB_SPEED_AUTO)
  991. dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
  992. if (max_width != NTB_WIDTH_AUTO)
  993. dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
  994. ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
  995. ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
  996. ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
  997. ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
  998. if (ndev->bar4_split)
  999. ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
  1000. iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
  1001. return 0;
  1002. }
  1003. static int intel_ntb_link_disable(struct ntb_dev *ntb)
  1004. {
  1005. struct intel_ntb_dev *ndev;
  1006. u32 ntb_cntl;
  1007. ndev = container_of(ntb, struct intel_ntb_dev, ntb);
  1008. if (ndev->ntb.topo == NTB_TOPO_SEC)
  1009. return -EINVAL;
  1010. dev_dbg(&ntb->pdev->dev, "Disabling link\n");
  1011. /* Bring NTB link down */
  1012. ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
  1013. ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
  1014. ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
  1015. if (ndev->bar4_split)
  1016. ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
  1017. ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
  1018. iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
  1019. return 0;
  1020. }
  1021. static int intel_ntb_peer_mw_count(struct ntb_dev *ntb)
  1022. {
  1023. /* Numbers of inbound and outbound memory windows match */
  1024. return ntb_ndev(ntb)->mw_count;
  1025. }
  1026. static int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
  1027. phys_addr_t *base, resource_size_t *size)
  1028. {
  1029. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1030. int bar;
  1031. if (idx >= ndev->b2b_idx && !ndev->b2b_off)
  1032. idx += 1;
  1033. bar = ndev_mw_to_bar(ndev, idx);
  1034. if (bar < 0)
  1035. return bar;
  1036. if (base)
  1037. *base = pci_resource_start(ndev->ntb.pdev, bar) +
  1038. (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
  1039. if (size)
  1040. *size = pci_resource_len(ndev->ntb.pdev, bar) -
  1041. (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
  1042. return 0;
  1043. }
  1044. static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
  1045. {
  1046. return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
  1047. }
  1048. static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
  1049. {
  1050. return ntb_ndev(ntb)->db_valid_mask;
  1051. }
  1052. static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
  1053. {
  1054. struct intel_ntb_dev *ndev;
  1055. ndev = container_of(ntb, struct intel_ntb_dev, ntb);
  1056. return ndev->db_vec_count;
  1057. }
  1058. static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
  1059. {
  1060. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1061. if (db_vector < 0 || db_vector > ndev->db_vec_count)
  1062. return 0;
  1063. return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
  1064. }
  1065. static u64 intel_ntb_db_read(struct ntb_dev *ntb)
  1066. {
  1067. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1068. return ndev_db_read(ndev,
  1069. ndev->self_mmio +
  1070. ndev->self_reg->db_bell);
  1071. }
  1072. static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
  1073. {
  1074. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1075. return ndev_db_write(ndev, db_bits,
  1076. ndev->self_mmio +
  1077. ndev->self_reg->db_bell);
  1078. }
  1079. static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
  1080. {
  1081. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1082. return ndev_db_set_mask(ndev, db_bits,
  1083. ndev->self_mmio +
  1084. ndev->self_reg->db_mask);
  1085. }
  1086. static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
  1087. {
  1088. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1089. return ndev_db_clear_mask(ndev, db_bits,
  1090. ndev->self_mmio +
  1091. ndev->self_reg->db_mask);
  1092. }
  1093. static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
  1094. phys_addr_t *db_addr,
  1095. resource_size_t *db_size)
  1096. {
  1097. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1098. return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
  1099. ndev->peer_reg->db_bell);
  1100. }
  1101. static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
  1102. {
  1103. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1104. return ndev_db_write(ndev, db_bits,
  1105. ndev->peer_mmio +
  1106. ndev->peer_reg->db_bell);
  1107. }
  1108. static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
  1109. {
  1110. return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
  1111. }
  1112. static int intel_ntb_spad_count(struct ntb_dev *ntb)
  1113. {
  1114. struct intel_ntb_dev *ndev;
  1115. ndev = container_of(ntb, struct intel_ntb_dev, ntb);
  1116. return ndev->spad_count;
  1117. }
  1118. static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
  1119. {
  1120. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1121. return ndev_spad_read(ndev, idx,
  1122. ndev->self_mmio +
  1123. ndev->self_reg->spad);
  1124. }
  1125. static int intel_ntb_spad_write(struct ntb_dev *ntb,
  1126. int idx, u32 val)
  1127. {
  1128. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1129. return ndev_spad_write(ndev, idx, val,
  1130. ndev->self_mmio +
  1131. ndev->self_reg->spad);
  1132. }
  1133. static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
  1134. phys_addr_t *spad_addr)
  1135. {
  1136. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1137. return ndev_spad_addr(ndev, sidx, spad_addr, ndev->peer_addr,
  1138. ndev->peer_reg->spad);
  1139. }
  1140. static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
  1141. {
  1142. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1143. return ndev_spad_read(ndev, sidx,
  1144. ndev->peer_mmio +
  1145. ndev->peer_reg->spad);
  1146. }
  1147. static int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
  1148. int sidx, u32 val)
  1149. {
  1150. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1151. return ndev_spad_write(ndev, sidx, val,
  1152. ndev->peer_mmio +
  1153. ndev->peer_reg->spad);
  1154. }
  1155. /* Skylake Xeon NTB */
  1156. static int skx_poll_link(struct intel_ntb_dev *ndev)
  1157. {
  1158. u16 reg_val;
  1159. int rc;
  1160. ndev->reg->db_iowrite(ndev->db_link_mask,
  1161. ndev->self_mmio +
  1162. ndev->self_reg->db_clear);
  1163. rc = pci_read_config_word(ndev->ntb.pdev,
  1164. SKX_LINK_STATUS_OFFSET, &reg_val);
  1165. if (rc)
  1166. return 0;
  1167. if (reg_val == ndev->lnk_sta)
  1168. return 0;
  1169. ndev->lnk_sta = reg_val;
  1170. return 1;
  1171. }
  1172. static u64 skx_db_ioread(void __iomem *mmio)
  1173. {
  1174. return ioread64(mmio);
  1175. }
  1176. static void skx_db_iowrite(u64 bits, void __iomem *mmio)
  1177. {
  1178. iowrite64(bits, mmio);
  1179. }
  1180. static int skx_init_isr(struct intel_ntb_dev *ndev)
  1181. {
  1182. int i;
  1183. /*
  1184. * The MSIX vectors and the interrupt status bits are not lined up
  1185. * on Skylake. By default the link status bit is bit 32, however it
  1186. * is by default MSIX vector0. We need to fixup to line them up.
  1187. * The vectors at reset is 1-32,0. We need to reprogram to 0-32.
  1188. */
  1189. for (i = 0; i < SKX_DB_MSIX_VECTOR_COUNT; i++)
  1190. iowrite8(i, ndev->self_mmio + SKX_INTVEC_OFFSET + i);
  1191. /* move link status down one as workaround */
  1192. if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) {
  1193. iowrite8(SKX_DB_MSIX_VECTOR_COUNT - 2,
  1194. ndev->self_mmio + SKX_INTVEC_OFFSET +
  1195. (SKX_DB_MSIX_VECTOR_COUNT - 1));
  1196. }
  1197. return ndev_init_isr(ndev, SKX_DB_MSIX_VECTOR_COUNT,
  1198. SKX_DB_MSIX_VECTOR_COUNT,
  1199. SKX_DB_MSIX_VECTOR_SHIFT,
  1200. SKX_DB_TOTAL_SHIFT);
  1201. }
  1202. static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
  1203. const struct intel_b2b_addr *addr,
  1204. const struct intel_b2b_addr *peer_addr)
  1205. {
  1206. struct pci_dev *pdev;
  1207. void __iomem *mmio;
  1208. phys_addr_t bar_addr;
  1209. pdev = ndev->ntb.pdev;
  1210. mmio = ndev->self_mmio;
  1211. /* setup incoming bar limits == base addrs (zero length windows) */
  1212. bar_addr = addr->bar2_addr64;
  1213. iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
  1214. bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
  1215. dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr);
  1216. bar_addr = addr->bar4_addr64;
  1217. iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
  1218. bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
  1219. dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
  1220. /* zero incoming translation addrs */
  1221. iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET);
  1222. iowrite64(0, mmio + SKX_IMBAR2XBASE_OFFSET);
  1223. ndev->peer_mmio = ndev->self_mmio;
  1224. return 0;
  1225. }
  1226. static int skx_init_ntb(struct intel_ntb_dev *ndev)
  1227. {
  1228. int rc;
  1229. ndev->mw_count = XEON_MW_COUNT;
  1230. ndev->spad_count = SKX_SPAD_COUNT;
  1231. ndev->db_count = SKX_DB_COUNT;
  1232. ndev->db_link_mask = SKX_DB_LINK_BIT;
  1233. /* DB fixup for using 31 right now */
  1234. if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD)
  1235. ndev->db_link_mask |= BIT_ULL(31);
  1236. switch (ndev->ntb.topo) {
  1237. case NTB_TOPO_B2B_USD:
  1238. case NTB_TOPO_B2B_DSD:
  1239. ndev->self_reg = &skx_pri_reg;
  1240. ndev->peer_reg = &skx_b2b_reg;
  1241. ndev->xlat_reg = &skx_sec_xlat;
  1242. if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
  1243. rc = skx_setup_b2b_mw(ndev,
  1244. &xeon_b2b_dsd_addr,
  1245. &xeon_b2b_usd_addr);
  1246. } else {
  1247. rc = skx_setup_b2b_mw(ndev,
  1248. &xeon_b2b_usd_addr,
  1249. &xeon_b2b_dsd_addr);
  1250. }
  1251. if (rc)
  1252. return rc;
  1253. /* Enable Bus Master and Memory Space on the secondary side */
  1254. iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
  1255. ndev->self_mmio + SKX_SPCICMD_OFFSET);
  1256. break;
  1257. default:
  1258. return -EINVAL;
  1259. }
  1260. ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
  1261. ndev->reg->db_iowrite(ndev->db_valid_mask,
  1262. ndev->self_mmio +
  1263. ndev->self_reg->db_mask);
  1264. return 0;
  1265. }
  1266. static int skx_init_dev(struct intel_ntb_dev *ndev)
  1267. {
  1268. struct pci_dev *pdev;
  1269. u8 ppd;
  1270. int rc;
  1271. pdev = ndev->ntb.pdev;
  1272. ndev->reg = &skx_reg;
  1273. rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
  1274. if (rc)
  1275. return -EIO;
  1276. ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
  1277. dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
  1278. ntb_topo_string(ndev->ntb.topo));
  1279. if (ndev->ntb.topo == NTB_TOPO_NONE)
  1280. return -EINVAL;
  1281. if (pdev_is_skx_xeon(pdev))
  1282. ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD;
  1283. rc = skx_init_ntb(ndev);
  1284. if (rc)
  1285. return rc;
  1286. return skx_init_isr(ndev);
  1287. }
  1288. static int intel_ntb3_link_enable(struct ntb_dev *ntb,
  1289. enum ntb_speed max_speed,
  1290. enum ntb_width max_width)
  1291. {
  1292. struct intel_ntb_dev *ndev;
  1293. u32 ntb_ctl;
  1294. ndev = container_of(ntb, struct intel_ntb_dev, ntb);
  1295. dev_dbg(&ntb->pdev->dev,
  1296. "Enabling link with max_speed %d max_width %d\n",
  1297. max_speed, max_width);
  1298. if (max_speed != NTB_SPEED_AUTO)
  1299. dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
  1300. if (max_width != NTB_WIDTH_AUTO)
  1301. dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
  1302. ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
  1303. ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
  1304. ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
  1305. ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
  1306. iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
  1307. return 0;
  1308. }
  1309. static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
  1310. dma_addr_t addr, resource_size_t size)
  1311. {
  1312. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1313. unsigned long xlat_reg, limit_reg;
  1314. resource_size_t bar_size, mw_size;
  1315. void __iomem *mmio;
  1316. u64 base, limit, reg_val;
  1317. int bar;
  1318. if (pidx != NTB_DEF_PEER_IDX)
  1319. return -EINVAL;
  1320. if (idx >= ndev->b2b_idx && !ndev->b2b_off)
  1321. idx += 1;
  1322. bar = ndev_mw_to_bar(ndev, idx);
  1323. if (bar < 0)
  1324. return bar;
  1325. bar_size = pci_resource_len(ndev->ntb.pdev, bar);
  1326. if (idx == ndev->b2b_idx)
  1327. mw_size = bar_size - ndev->b2b_off;
  1328. else
  1329. mw_size = bar_size;
  1330. /* hardware requires that addr is aligned to bar size */
  1331. if (addr & (bar_size - 1))
  1332. return -EINVAL;
  1333. /* make sure the range fits in the usable mw size */
  1334. if (size > mw_size)
  1335. return -EINVAL;
  1336. mmio = ndev->self_mmio;
  1337. xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
  1338. limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
  1339. base = pci_resource_start(ndev->ntb.pdev, bar);
  1340. /* Set the limit if supported, if size is not mw_size */
  1341. if (limit_reg && size != mw_size)
  1342. limit = base + size;
  1343. else
  1344. limit = base + mw_size;
  1345. /* set and verify setting the translation address */
  1346. iowrite64(addr, mmio + xlat_reg);
  1347. reg_val = ioread64(mmio + xlat_reg);
  1348. if (reg_val != addr) {
  1349. iowrite64(0, mmio + xlat_reg);
  1350. return -EIO;
  1351. }
  1352. dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
  1353. /* set and verify setting the limit */
  1354. iowrite64(limit, mmio + limit_reg);
  1355. reg_val = ioread64(mmio + limit_reg);
  1356. if (reg_val != limit) {
  1357. iowrite64(base, mmio + limit_reg);
  1358. iowrite64(0, mmio + xlat_reg);
  1359. return -EIO;
  1360. }
  1361. dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
  1362. /* setup the EP */
  1363. limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000;
  1364. base = ioread64(mmio + SKX_EMBAR1_OFFSET + (8 * idx));
  1365. base &= ~0xf;
  1366. if (limit_reg && size != mw_size)
  1367. limit = base + size;
  1368. else
  1369. limit = base + mw_size;
  1370. /* set and verify setting the limit */
  1371. iowrite64(limit, mmio + limit_reg);
  1372. reg_val = ioread64(mmio + limit_reg);
  1373. if (reg_val != limit) {
  1374. iowrite64(base, mmio + limit_reg);
  1375. iowrite64(0, mmio + xlat_reg);
  1376. return -EIO;
  1377. }
  1378. dev_dbg(&ntb->pdev->dev, "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
  1379. return 0;
  1380. }
  1381. static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
  1382. {
  1383. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1384. int bit;
  1385. if (db_bits & ~ndev->db_valid_mask)
  1386. return -EINVAL;
  1387. while (db_bits) {
  1388. bit = __ffs(db_bits);
  1389. iowrite32(1, ndev->peer_mmio +
  1390. ndev->peer_reg->db_bell + (bit * 4));
  1391. db_bits &= db_bits - 1;
  1392. }
  1393. return 0;
  1394. }
  1395. static u64 intel_ntb3_db_read(struct ntb_dev *ntb)
  1396. {
  1397. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1398. return ndev_db_read(ndev,
  1399. ndev->self_mmio +
  1400. ndev->self_reg->db_clear);
  1401. }
  1402. static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
  1403. {
  1404. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  1405. return ndev_db_write(ndev, db_bits,
  1406. ndev->self_mmio +
  1407. ndev->self_reg->db_clear);
  1408. }
  1409. /* XEON */
  1410. static u64 xeon_db_ioread(void __iomem *mmio)
  1411. {
  1412. return (u64)ioread16(mmio);
  1413. }
  1414. static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
  1415. {
  1416. iowrite16((u16)bits, mmio);
  1417. }
  1418. static int xeon_poll_link(struct intel_ntb_dev *ndev)
  1419. {
  1420. u16 reg_val;
  1421. int rc;
  1422. ndev->reg->db_iowrite(ndev->db_link_mask,
  1423. ndev->self_mmio +
  1424. ndev->self_reg->db_bell);
  1425. rc = pci_read_config_word(ndev->ntb.pdev,
  1426. XEON_LINK_STATUS_OFFSET, &reg_val);
  1427. if (rc)
  1428. return 0;
  1429. if (reg_val == ndev->lnk_sta)
  1430. return 0;
  1431. ndev->lnk_sta = reg_val;
  1432. return 1;
  1433. }
  1434. static int xeon_link_is_up(struct intel_ntb_dev *ndev)
  1435. {
  1436. if (ndev->ntb.topo == NTB_TOPO_SEC)
  1437. return 1;
  1438. return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
  1439. }
  1440. static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
  1441. {
  1442. switch (ppd & XEON_PPD_TOPO_MASK) {
  1443. case XEON_PPD_TOPO_B2B_USD:
  1444. return NTB_TOPO_B2B_USD;
  1445. case XEON_PPD_TOPO_B2B_DSD:
  1446. return NTB_TOPO_B2B_DSD;
  1447. case XEON_PPD_TOPO_PRI_USD:
  1448. case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
  1449. return NTB_TOPO_PRI;
  1450. case XEON_PPD_TOPO_SEC_USD:
  1451. case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
  1452. return NTB_TOPO_SEC;
  1453. }
  1454. return NTB_TOPO_NONE;
  1455. }
  1456. static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
  1457. {
  1458. if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
  1459. dev_dbg(&ndev->ntb.pdev->dev, "PPD %d split bar\n", ppd);
  1460. return 1;
  1461. }
  1462. return 0;
  1463. }
  1464. static int xeon_init_isr(struct intel_ntb_dev *ndev)
  1465. {
  1466. return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
  1467. XEON_DB_MSIX_VECTOR_COUNT,
  1468. XEON_DB_MSIX_VECTOR_SHIFT,
  1469. XEON_DB_TOTAL_SHIFT);
  1470. }
  1471. static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
  1472. {
  1473. ndev_deinit_isr(ndev);
  1474. }
  1475. static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
  1476. const struct intel_b2b_addr *addr,
  1477. const struct intel_b2b_addr *peer_addr)
  1478. {
  1479. struct pci_dev *pdev;
  1480. void __iomem *mmio;
  1481. resource_size_t bar_size;
  1482. phys_addr_t bar_addr;
  1483. int b2b_bar;
  1484. u8 bar_sz;
  1485. pdev = ndev->ntb.pdev;
  1486. mmio = ndev->self_mmio;
  1487. if (ndev->b2b_idx == UINT_MAX) {
  1488. dev_dbg(&pdev->dev, "not using b2b mw\n");
  1489. b2b_bar = 0;
  1490. ndev->b2b_off = 0;
  1491. } else {
  1492. b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
  1493. if (b2b_bar < 0)
  1494. return -EIO;
  1495. dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
  1496. bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
  1497. dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
  1498. if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
  1499. dev_dbg(&pdev->dev, "b2b using first half of bar\n");
  1500. ndev->b2b_off = bar_size >> 1;
  1501. } else if (XEON_B2B_MIN_SIZE <= bar_size) {
  1502. dev_dbg(&pdev->dev, "b2b using whole bar\n");
  1503. ndev->b2b_off = 0;
  1504. --ndev->mw_count;
  1505. } else {
  1506. dev_dbg(&pdev->dev, "b2b bar size is too small\n");
  1507. return -EIO;
  1508. }
  1509. }
  1510. /* Reset the secondary bar sizes to match the primary bar sizes,
  1511. * except disable or halve the size of the b2b secondary bar.
  1512. *
  1513. * Note: code for each specific bar size register, because the register
  1514. * offsets are not in a consistent order (bar5sz comes after ppd, odd).
  1515. */
  1516. pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
  1517. dev_dbg(&pdev->dev, "PBAR23SZ %#x\n", bar_sz);
  1518. if (b2b_bar == 2) {
  1519. if (ndev->b2b_off)
  1520. bar_sz -= 1;
  1521. else
  1522. bar_sz = 0;
  1523. }
  1524. pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
  1525. pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
  1526. dev_dbg(&pdev->dev, "SBAR23SZ %#x\n", bar_sz);
  1527. if (!ndev->bar4_split) {
  1528. pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
  1529. dev_dbg(&pdev->dev, "PBAR45SZ %#x\n", bar_sz);
  1530. if (b2b_bar == 4) {
  1531. if (ndev->b2b_off)
  1532. bar_sz -= 1;
  1533. else
  1534. bar_sz = 0;
  1535. }
  1536. pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
  1537. pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
  1538. dev_dbg(&pdev->dev, "SBAR45SZ %#x\n", bar_sz);
  1539. } else {
  1540. pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
  1541. dev_dbg(&pdev->dev, "PBAR4SZ %#x\n", bar_sz);
  1542. if (b2b_bar == 4) {
  1543. if (ndev->b2b_off)
  1544. bar_sz -= 1;
  1545. else
  1546. bar_sz = 0;
  1547. }
  1548. pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
  1549. pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
  1550. dev_dbg(&pdev->dev, "SBAR4SZ %#x\n", bar_sz);
  1551. pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
  1552. dev_dbg(&pdev->dev, "PBAR5SZ %#x\n", bar_sz);
  1553. if (b2b_bar == 5) {
  1554. if (ndev->b2b_off)
  1555. bar_sz -= 1;
  1556. else
  1557. bar_sz = 0;
  1558. }
  1559. pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
  1560. pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
  1561. dev_dbg(&pdev->dev, "SBAR5SZ %#x\n", bar_sz);
  1562. }
  1563. /* SBAR01 hit by first part of the b2b bar */
  1564. if (b2b_bar == 0)
  1565. bar_addr = addr->bar0_addr;
  1566. else if (b2b_bar == 2)
  1567. bar_addr = addr->bar2_addr64;
  1568. else if (b2b_bar == 4 && !ndev->bar4_split)
  1569. bar_addr = addr->bar4_addr64;
  1570. else if (b2b_bar == 4)
  1571. bar_addr = addr->bar4_addr32;
  1572. else if (b2b_bar == 5)
  1573. bar_addr = addr->bar5_addr32;
  1574. else
  1575. return -EIO;
  1576. dev_dbg(&pdev->dev, "SBAR01 %#018llx\n", bar_addr);
  1577. iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
  1578. /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
  1579. * The b2b bar is either disabled above, or configured half-size, and
  1580. * it starts at the PBAR xlat + offset.
  1581. */
  1582. bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
  1583. iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
  1584. bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
  1585. dev_dbg(&pdev->dev, "SBAR23 %#018llx\n", bar_addr);
  1586. if (!ndev->bar4_split) {
  1587. bar_addr = addr->bar4_addr64 +
  1588. (b2b_bar == 4 ? ndev->b2b_off : 0);
  1589. iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
  1590. bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
  1591. dev_dbg(&pdev->dev, "SBAR45 %#018llx\n", bar_addr);
  1592. } else {
  1593. bar_addr = addr->bar4_addr32 +
  1594. (b2b_bar == 4 ? ndev->b2b_off : 0);
  1595. iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
  1596. bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
  1597. dev_dbg(&pdev->dev, "SBAR4 %#010llx\n", bar_addr);
  1598. bar_addr = addr->bar5_addr32 +
  1599. (b2b_bar == 5 ? ndev->b2b_off : 0);
  1600. iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
  1601. bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
  1602. dev_dbg(&pdev->dev, "SBAR5 %#010llx\n", bar_addr);
  1603. }
  1604. /* setup incoming bar limits == base addrs (zero length windows) */
  1605. bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
  1606. iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
  1607. bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
  1608. dev_dbg(&pdev->dev, "SBAR23LMT %#018llx\n", bar_addr);
  1609. if (!ndev->bar4_split) {
  1610. bar_addr = addr->bar4_addr64 +
  1611. (b2b_bar == 4 ? ndev->b2b_off : 0);
  1612. iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
  1613. bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
  1614. dev_dbg(&pdev->dev, "SBAR45LMT %#018llx\n", bar_addr);
  1615. } else {
  1616. bar_addr = addr->bar4_addr32 +
  1617. (b2b_bar == 4 ? ndev->b2b_off : 0);
  1618. iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
  1619. bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
  1620. dev_dbg(&pdev->dev, "SBAR4LMT %#010llx\n", bar_addr);
  1621. bar_addr = addr->bar5_addr32 +
  1622. (b2b_bar == 5 ? ndev->b2b_off : 0);
  1623. iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
  1624. bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
  1625. dev_dbg(&pdev->dev, "SBAR5LMT %#05llx\n", bar_addr);
  1626. }
  1627. /* zero incoming translation addrs */
  1628. iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
  1629. if (!ndev->bar4_split) {
  1630. iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
  1631. } else {
  1632. iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
  1633. iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
  1634. }
  1635. /* zero outgoing translation limits (whole bar size windows) */
  1636. iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
  1637. if (!ndev->bar4_split) {
  1638. iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
  1639. } else {
  1640. iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
  1641. iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
  1642. }
  1643. /* set outgoing translation offsets */
  1644. bar_addr = peer_addr->bar2_addr64;
  1645. iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
  1646. bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
  1647. dev_dbg(&pdev->dev, "PBAR23XLAT %#018llx\n", bar_addr);
  1648. if (!ndev->bar4_split) {
  1649. bar_addr = peer_addr->bar4_addr64;
  1650. iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
  1651. bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
  1652. dev_dbg(&pdev->dev, "PBAR45XLAT %#018llx\n", bar_addr);
  1653. } else {
  1654. bar_addr = peer_addr->bar4_addr32;
  1655. iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
  1656. bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
  1657. dev_dbg(&pdev->dev, "PBAR4XLAT %#010llx\n", bar_addr);
  1658. bar_addr = peer_addr->bar5_addr32;
  1659. iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
  1660. bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
  1661. dev_dbg(&pdev->dev, "PBAR5XLAT %#010llx\n", bar_addr);
  1662. }
  1663. /* set the translation offset for b2b registers */
  1664. if (b2b_bar == 0)
  1665. bar_addr = peer_addr->bar0_addr;
  1666. else if (b2b_bar == 2)
  1667. bar_addr = peer_addr->bar2_addr64;
  1668. else if (b2b_bar == 4 && !ndev->bar4_split)
  1669. bar_addr = peer_addr->bar4_addr64;
  1670. else if (b2b_bar == 4)
  1671. bar_addr = peer_addr->bar4_addr32;
  1672. else if (b2b_bar == 5)
  1673. bar_addr = peer_addr->bar5_addr32;
  1674. else
  1675. return -EIO;
  1676. /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
  1677. dev_dbg(&pdev->dev, "B2BXLAT %#018llx\n", bar_addr);
  1678. iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
  1679. iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
  1680. if (b2b_bar) {
  1681. /* map peer ntb mmio config space registers */
  1682. ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
  1683. XEON_B2B_MIN_SIZE);
  1684. if (!ndev->peer_mmio)
  1685. return -EIO;
  1686. ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
  1687. }
  1688. return 0;
  1689. }
  1690. static int xeon_init_ntb(struct intel_ntb_dev *ndev)
  1691. {
  1692. struct device *dev = &ndev->ntb.pdev->dev;
  1693. int rc;
  1694. u32 ntb_ctl;
  1695. if (ndev->bar4_split)
  1696. ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
  1697. else
  1698. ndev->mw_count = XEON_MW_COUNT;
  1699. ndev->spad_count = XEON_SPAD_COUNT;
  1700. ndev->db_count = XEON_DB_COUNT;
  1701. ndev->db_link_mask = XEON_DB_LINK_BIT;
  1702. switch (ndev->ntb.topo) {
  1703. case NTB_TOPO_PRI:
  1704. if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
  1705. dev_err(dev, "NTB Primary config disabled\n");
  1706. return -EINVAL;
  1707. }
  1708. /* enable link to allow secondary side device to appear */
  1709. ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
  1710. ntb_ctl &= ~NTB_CTL_DISABLE;
  1711. iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
  1712. /* use half the spads for the peer */
  1713. ndev->spad_count >>= 1;
  1714. ndev->self_reg = &xeon_pri_reg;
  1715. ndev->peer_reg = &xeon_sec_reg;
  1716. ndev->xlat_reg = &xeon_sec_xlat;
  1717. break;
  1718. case NTB_TOPO_SEC:
  1719. if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
  1720. dev_err(dev, "NTB Secondary config disabled\n");
  1721. return -EINVAL;
  1722. }
  1723. /* use half the spads for the peer */
  1724. ndev->spad_count >>= 1;
  1725. ndev->self_reg = &xeon_sec_reg;
  1726. ndev->peer_reg = &xeon_pri_reg;
  1727. ndev->xlat_reg = &xeon_pri_xlat;
  1728. break;
  1729. case NTB_TOPO_B2B_USD:
  1730. case NTB_TOPO_B2B_DSD:
  1731. ndev->self_reg = &xeon_pri_reg;
  1732. ndev->peer_reg = &xeon_b2b_reg;
  1733. ndev->xlat_reg = &xeon_sec_xlat;
  1734. if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
  1735. ndev->peer_reg = &xeon_pri_reg;
  1736. if (b2b_mw_idx < 0)
  1737. ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
  1738. else
  1739. ndev->b2b_idx = b2b_mw_idx;
  1740. if (ndev->b2b_idx >= ndev->mw_count) {
  1741. dev_dbg(dev,
  1742. "b2b_mw_idx %d invalid for mw_count %u\n",
  1743. b2b_mw_idx, ndev->mw_count);
  1744. return -EINVAL;
  1745. }
  1746. dev_dbg(dev, "setting up b2b mw idx %d means %d\n",
  1747. b2b_mw_idx, ndev->b2b_idx);
  1748. } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
  1749. dev_warn(dev, "Reduce doorbell count by 1\n");
  1750. ndev->db_count -= 1;
  1751. }
  1752. if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
  1753. rc = xeon_setup_b2b_mw(ndev,
  1754. &xeon_b2b_dsd_addr,
  1755. &xeon_b2b_usd_addr);
  1756. } else {
  1757. rc = xeon_setup_b2b_mw(ndev,
  1758. &xeon_b2b_usd_addr,
  1759. &xeon_b2b_dsd_addr);
  1760. }
  1761. if (rc)
  1762. return rc;
  1763. /* Enable Bus Master and Memory Space on the secondary side */
  1764. iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
  1765. ndev->self_mmio + XEON_SPCICMD_OFFSET);
  1766. break;
  1767. default:
  1768. return -EINVAL;
  1769. }
  1770. ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
  1771. ndev->reg->db_iowrite(ndev->db_valid_mask,
  1772. ndev->self_mmio +
  1773. ndev->self_reg->db_mask);
  1774. return 0;
  1775. }
  1776. static int xeon_init_dev(struct intel_ntb_dev *ndev)
  1777. {
  1778. struct pci_dev *pdev;
  1779. u8 ppd;
  1780. int rc, mem;
  1781. pdev = ndev->ntb.pdev;
  1782. switch (pdev->device) {
  1783. /* There is a Xeon hardware errata related to writes to SDOORBELL or
  1784. * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
  1785. * which may hang the system. To workaround this use the second memory
  1786. * window to access the interrupt and scratch pad registers on the
  1787. * remote system.
  1788. */
  1789. case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
  1790. case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
  1791. case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
  1792. case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
  1793. case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
  1794. case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
  1795. case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
  1796. case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
  1797. case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
  1798. case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
  1799. case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
  1800. case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
  1801. case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
  1802. case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
  1803. case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
  1804. ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
  1805. break;
  1806. }
  1807. switch (pdev->device) {
  1808. /* There is a hardware errata related to accessing any register in
  1809. * SB01BASE in the presence of bidirectional traffic crossing the NTB.
  1810. */
  1811. case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
  1812. case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
  1813. case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
  1814. case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
  1815. case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
  1816. case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
  1817. case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
  1818. case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
  1819. case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
  1820. ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
  1821. break;
  1822. }
  1823. switch (pdev->device) {
  1824. /* HW Errata on bit 14 of b2bdoorbell register. Writes will not be
  1825. * mirrored to the remote system. Shrink the number of bits by one,
  1826. * since bit 14 is the last bit.
  1827. */
  1828. case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
  1829. case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
  1830. case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
  1831. case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
  1832. case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
  1833. case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
  1834. case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
  1835. case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
  1836. case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
  1837. case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
  1838. case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
  1839. case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
  1840. case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
  1841. case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
  1842. case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
  1843. ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
  1844. break;
  1845. }
  1846. ndev->reg = &xeon_reg;
  1847. rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
  1848. if (rc)
  1849. return -EIO;
  1850. ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
  1851. dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
  1852. ntb_topo_string(ndev->ntb.topo));
  1853. if (ndev->ntb.topo == NTB_TOPO_NONE)
  1854. return -EINVAL;
  1855. if (ndev->ntb.topo != NTB_TOPO_SEC) {
  1856. ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
  1857. dev_dbg(&pdev->dev, "ppd %#x bar4_split %d\n",
  1858. ppd, ndev->bar4_split);
  1859. } else {
  1860. /* This is a way for transparent BAR to figure out if we are
  1861. * doing split BAR or not. There is no way for the hw on the
  1862. * transparent side to know and set the PPD.
  1863. */
  1864. mem = pci_select_bars(pdev, IORESOURCE_MEM);
  1865. ndev->bar4_split = hweight32(mem) ==
  1866. HSX_SPLIT_BAR_MW_COUNT + 1;
  1867. dev_dbg(&pdev->dev, "mem %#x bar4_split %d\n",
  1868. mem, ndev->bar4_split);
  1869. }
  1870. rc = xeon_init_ntb(ndev);
  1871. if (rc)
  1872. return rc;
  1873. return xeon_init_isr(ndev);
  1874. }
  1875. static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
  1876. {
  1877. xeon_deinit_isr(ndev);
  1878. }
  1879. static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
  1880. {
  1881. int rc;
  1882. pci_set_drvdata(pdev, ndev);
  1883. rc = pci_enable_device(pdev);
  1884. if (rc)
  1885. goto err_pci_enable;
  1886. rc = pci_request_regions(pdev, NTB_NAME);
  1887. if (rc)
  1888. goto err_pci_regions;
  1889. pci_set_master(pdev);
  1890. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  1891. if (rc) {
  1892. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1893. if (rc)
  1894. goto err_dma_mask;
  1895. dev_warn(&pdev->dev, "Cannot DMA highmem\n");
  1896. }
  1897. rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  1898. if (rc) {
  1899. rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  1900. if (rc)
  1901. goto err_dma_mask;
  1902. dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
  1903. }
  1904. rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
  1905. dma_get_mask(&pdev->dev));
  1906. if (rc)
  1907. goto err_dma_mask;
  1908. ndev->self_mmio = pci_iomap(pdev, 0, 0);
  1909. if (!ndev->self_mmio) {
  1910. rc = -EIO;
  1911. goto err_mmio;
  1912. }
  1913. ndev->peer_mmio = ndev->self_mmio;
  1914. ndev->peer_addr = pci_resource_start(pdev, 0);
  1915. return 0;
  1916. err_mmio:
  1917. err_dma_mask:
  1918. pci_clear_master(pdev);
  1919. pci_release_regions(pdev);
  1920. err_pci_regions:
  1921. pci_disable_device(pdev);
  1922. err_pci_enable:
  1923. pci_set_drvdata(pdev, NULL);
  1924. return rc;
  1925. }
  1926. static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
  1927. {
  1928. struct pci_dev *pdev = ndev->ntb.pdev;
  1929. if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
  1930. pci_iounmap(pdev, ndev->peer_mmio);
  1931. pci_iounmap(pdev, ndev->self_mmio);
  1932. pci_clear_master(pdev);
  1933. pci_release_regions(pdev);
  1934. pci_disable_device(pdev);
  1935. pci_set_drvdata(pdev, NULL);
  1936. }
  1937. static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
  1938. struct pci_dev *pdev)
  1939. {
  1940. ndev->ntb.pdev = pdev;
  1941. ndev->ntb.topo = NTB_TOPO_NONE;
  1942. ndev->ntb.ops = &intel_ntb_ops;
  1943. ndev->b2b_off = 0;
  1944. ndev->b2b_idx = UINT_MAX;
  1945. ndev->bar4_split = 0;
  1946. ndev->mw_count = 0;
  1947. ndev->spad_count = 0;
  1948. ndev->db_count = 0;
  1949. ndev->db_vec_count = 0;
  1950. ndev->db_vec_shift = 0;
  1951. ndev->ntb_ctl = 0;
  1952. ndev->lnk_sta = 0;
  1953. ndev->db_valid_mask = 0;
  1954. ndev->db_link_mask = 0;
  1955. ndev->db_mask = 0;
  1956. spin_lock_init(&ndev->db_mask_lock);
  1957. }
  1958. static int intel_ntb_pci_probe(struct pci_dev *pdev,
  1959. const struct pci_device_id *id)
  1960. {
  1961. struct intel_ntb_dev *ndev;
  1962. int rc, node;
  1963. node = dev_to_node(&pdev->dev);
  1964. if (pdev_is_xeon(pdev)) {
  1965. ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
  1966. if (!ndev) {
  1967. rc = -ENOMEM;
  1968. goto err_ndev;
  1969. }
  1970. ndev_init_struct(ndev, pdev);
  1971. rc = intel_ntb_init_pci(ndev, pdev);
  1972. if (rc)
  1973. goto err_init_pci;
  1974. rc = xeon_init_dev(ndev);
  1975. if (rc)
  1976. goto err_init_dev;
  1977. } else if (pdev_is_skx_xeon(pdev)) {
  1978. ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
  1979. if (!ndev) {
  1980. rc = -ENOMEM;
  1981. goto err_ndev;
  1982. }
  1983. ndev_init_struct(ndev, pdev);
  1984. ndev->ntb.ops = &intel_ntb3_ops;
  1985. rc = intel_ntb_init_pci(ndev, pdev);
  1986. if (rc)
  1987. goto err_init_pci;
  1988. rc = skx_init_dev(ndev);
  1989. if (rc)
  1990. goto err_init_dev;
  1991. } else {
  1992. rc = -EINVAL;
  1993. goto err_ndev;
  1994. }
  1995. ndev_reset_unsafe_flags(ndev);
  1996. ndev->reg->poll_link(ndev);
  1997. ndev_init_debugfs(ndev);
  1998. rc = ntb_register_device(&ndev->ntb);
  1999. if (rc)
  2000. goto err_register;
  2001. dev_info(&pdev->dev, "NTB device registered.\n");
  2002. return 0;
  2003. err_register:
  2004. ndev_deinit_debugfs(ndev);
  2005. if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
  2006. xeon_deinit_dev(ndev);
  2007. err_init_dev:
  2008. intel_ntb_deinit_pci(ndev);
  2009. err_init_pci:
  2010. kfree(ndev);
  2011. err_ndev:
  2012. return rc;
  2013. }
  2014. static void intel_ntb_pci_remove(struct pci_dev *pdev)
  2015. {
  2016. struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
  2017. ntb_unregister_device(&ndev->ntb);
  2018. ndev_deinit_debugfs(ndev);
  2019. if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
  2020. xeon_deinit_dev(ndev);
  2021. intel_ntb_deinit_pci(ndev);
  2022. kfree(ndev);
  2023. }
  2024. static const struct intel_ntb_reg xeon_reg = {
  2025. .poll_link = xeon_poll_link,
  2026. .link_is_up = xeon_link_is_up,
  2027. .db_ioread = xeon_db_ioread,
  2028. .db_iowrite = xeon_db_iowrite,
  2029. .db_size = sizeof(u32),
  2030. .ntb_ctl = XEON_NTBCNTL_OFFSET,
  2031. .mw_bar = {2, 4, 5},
  2032. };
  2033. static const struct intel_ntb_alt_reg xeon_pri_reg = {
  2034. .db_bell = XEON_PDOORBELL_OFFSET,
  2035. .db_mask = XEON_PDBMSK_OFFSET,
  2036. .spad = XEON_SPAD_OFFSET,
  2037. };
  2038. static const struct intel_ntb_alt_reg xeon_sec_reg = {
  2039. .db_bell = XEON_SDOORBELL_OFFSET,
  2040. .db_mask = XEON_SDBMSK_OFFSET,
  2041. /* second half of the scratchpads */
  2042. .spad = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
  2043. };
  2044. static const struct intel_ntb_alt_reg xeon_b2b_reg = {
  2045. .db_bell = XEON_B2B_DOORBELL_OFFSET,
  2046. .spad = XEON_B2B_SPAD_OFFSET,
  2047. };
  2048. static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
  2049. /* Note: no primary .bar0_base visible to the secondary side.
  2050. *
  2051. * The secondary side cannot get the base address stored in primary
  2052. * bars. The base address is necessary to set the limit register to
  2053. * any value other than zero, or unlimited.
  2054. *
  2055. * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
  2056. * window by setting the limit equal to base, nor can it limit the size
  2057. * of the memory window by setting the limit to base + size.
  2058. */
  2059. .bar2_limit = XEON_PBAR23LMT_OFFSET,
  2060. .bar2_xlat = XEON_PBAR23XLAT_OFFSET,
  2061. };
  2062. static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
  2063. .bar0_base = XEON_SBAR0BASE_OFFSET,
  2064. .bar2_limit = XEON_SBAR23LMT_OFFSET,
  2065. .bar2_xlat = XEON_SBAR23XLAT_OFFSET,
  2066. };
  2067. static struct intel_b2b_addr xeon_b2b_usd_addr = {
  2068. .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
  2069. .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
  2070. .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
  2071. .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
  2072. };
  2073. static struct intel_b2b_addr xeon_b2b_dsd_addr = {
  2074. .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
  2075. .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
  2076. .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
  2077. .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
  2078. };
  2079. static const struct intel_ntb_reg skx_reg = {
  2080. .poll_link = skx_poll_link,
  2081. .link_is_up = xeon_link_is_up,
  2082. .db_ioread = skx_db_ioread,
  2083. .db_iowrite = skx_db_iowrite,
  2084. .db_size = sizeof(u32),
  2085. .ntb_ctl = SKX_NTBCNTL_OFFSET,
  2086. .mw_bar = {2, 4},
  2087. };
  2088. static const struct intel_ntb_alt_reg skx_pri_reg = {
  2089. .db_bell = SKX_EM_DOORBELL_OFFSET,
  2090. .db_clear = SKX_IM_INT_STATUS_OFFSET,
  2091. .db_mask = SKX_IM_INT_DISABLE_OFFSET,
  2092. .spad = SKX_IM_SPAD_OFFSET,
  2093. };
  2094. static const struct intel_ntb_alt_reg skx_b2b_reg = {
  2095. .db_bell = SKX_IM_DOORBELL_OFFSET,
  2096. .db_clear = SKX_EM_INT_STATUS_OFFSET,
  2097. .db_mask = SKX_EM_INT_DISABLE_OFFSET,
  2098. .spad = SKX_B2B_SPAD_OFFSET,
  2099. };
  2100. static const struct intel_ntb_xlat_reg skx_sec_xlat = {
  2101. /* .bar0_base = SKX_EMBAR0_OFFSET, */
  2102. .bar2_limit = SKX_IMBAR1XLMT_OFFSET,
  2103. .bar2_xlat = SKX_IMBAR1XBASE_OFFSET,
  2104. };
  2105. /* operations for primary side of local ntb */
  2106. static const struct ntb_dev_ops intel_ntb_ops = {
  2107. .mw_count = intel_ntb_mw_count,
  2108. .mw_get_align = intel_ntb_mw_get_align,
  2109. .mw_set_trans = intel_ntb_mw_set_trans,
  2110. .peer_mw_count = intel_ntb_peer_mw_count,
  2111. .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
  2112. .link_is_up = intel_ntb_link_is_up,
  2113. .link_enable = intel_ntb_link_enable,
  2114. .link_disable = intel_ntb_link_disable,
  2115. .db_is_unsafe = intel_ntb_db_is_unsafe,
  2116. .db_valid_mask = intel_ntb_db_valid_mask,
  2117. .db_vector_count = intel_ntb_db_vector_count,
  2118. .db_vector_mask = intel_ntb_db_vector_mask,
  2119. .db_read = intel_ntb_db_read,
  2120. .db_clear = intel_ntb_db_clear,
  2121. .db_set_mask = intel_ntb_db_set_mask,
  2122. .db_clear_mask = intel_ntb_db_clear_mask,
  2123. .peer_db_addr = intel_ntb_peer_db_addr,
  2124. .peer_db_set = intel_ntb_peer_db_set,
  2125. .spad_is_unsafe = intel_ntb_spad_is_unsafe,
  2126. .spad_count = intel_ntb_spad_count,
  2127. .spad_read = intel_ntb_spad_read,
  2128. .spad_write = intel_ntb_spad_write,
  2129. .peer_spad_addr = intel_ntb_peer_spad_addr,
  2130. .peer_spad_read = intel_ntb_peer_spad_read,
  2131. .peer_spad_write = intel_ntb_peer_spad_write,
  2132. };
  2133. static const struct ntb_dev_ops intel_ntb3_ops = {
  2134. .mw_count = intel_ntb_mw_count,
  2135. .mw_get_align = intel_ntb_mw_get_align,
  2136. .mw_set_trans = intel_ntb3_mw_set_trans,
  2137. .peer_mw_count = intel_ntb_peer_mw_count,
  2138. .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
  2139. .link_is_up = intel_ntb_link_is_up,
  2140. .link_enable = intel_ntb3_link_enable,
  2141. .link_disable = intel_ntb_link_disable,
  2142. .db_valid_mask = intel_ntb_db_valid_mask,
  2143. .db_vector_count = intel_ntb_db_vector_count,
  2144. .db_vector_mask = intel_ntb_db_vector_mask,
  2145. .db_read = intel_ntb3_db_read,
  2146. .db_clear = intel_ntb3_db_clear,
  2147. .db_set_mask = intel_ntb_db_set_mask,
  2148. .db_clear_mask = intel_ntb_db_clear_mask,
  2149. .peer_db_addr = intel_ntb_peer_db_addr,
  2150. .peer_db_set = intel_ntb3_peer_db_set,
  2151. .spad_is_unsafe = intel_ntb_spad_is_unsafe,
  2152. .spad_count = intel_ntb_spad_count,
  2153. .spad_read = intel_ntb_spad_read,
  2154. .spad_write = intel_ntb_spad_write,
  2155. .peer_spad_addr = intel_ntb_peer_spad_addr,
  2156. .peer_spad_read = intel_ntb_peer_spad_read,
  2157. .peer_spad_write = intel_ntb_peer_spad_write,
  2158. };
  2159. static const struct file_operations intel_ntb_debugfs_info = {
  2160. .owner = THIS_MODULE,
  2161. .open = simple_open,
  2162. .read = ndev_debugfs_read,
  2163. };
  2164. static const struct pci_device_id intel_ntb_pci_tbl[] = {
  2165. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
  2166. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
  2167. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
  2168. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
  2169. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
  2170. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
  2171. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
  2172. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
  2173. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
  2174. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
  2175. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
  2176. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
  2177. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
  2178. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
  2179. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
  2180. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
  2181. {0}
  2182. };
  2183. MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
  2184. static struct pci_driver intel_ntb_pci_driver = {
  2185. .name = KBUILD_MODNAME,
  2186. .id_table = intel_ntb_pci_tbl,
  2187. .probe = intel_ntb_pci_probe,
  2188. .remove = intel_ntb_pci_remove,
  2189. };
  2190. static int __init intel_ntb_pci_driver_init(void)
  2191. {
  2192. pr_info("%s %s\n", NTB_DESC, NTB_VER);
  2193. if (debugfs_initialized())
  2194. debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
  2195. return pci_register_driver(&intel_ntb_pci_driver);
  2196. }
  2197. module_init(intel_ntb_pci_driver_init);
  2198. static void __exit intel_ntb_pci_driver_exit(void)
  2199. {
  2200. pci_unregister_driver(&intel_ntb_pci_driver);
  2201. debugfs_remove_recursive(debugfs_dir);
  2202. }
  2203. module_exit(intel_ntb_pci_driver_exit);