ntb_hw_intel.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2012 Intel Corporation. All rights reserved.
  8. * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * BSD LICENSE
  15. *
  16. * Copyright(c) 2012 Intel Corporation. All rights reserved.
  17. * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
  18. *
  19. * Redistribution and use in source and binary forms, with or without
  20. * modification, are permitted provided that the following conditions
  21. * are met:
  22. *
  23. * * Redistributions of source code must retain the above copyright
  24. * notice, this list of conditions and the following disclaimer.
  25. * * Redistributions in binary form must reproduce the above copy
  26. * notice, this list of conditions and the following disclaimer in
  27. * the documentation and/or other materials provided with the
  28. * distribution.
  29. * * Neither the name of Intel Corporation nor the names of its
  30. * contributors may be used to endorse or promote products derived
  31. * from this software without specific prior written permission.
  32. *
  33. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  34. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  35. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  36. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  37. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  38. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  39. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  40. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  41. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  42. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  43. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  44. *
  45. * Intel PCIe NTB Linux driver
  46. *
  47. * Contact Information:
  48. * Jon Mason <jon.mason@intel.com>
  49. */
  50. #include <linux/debugfs.h>
  51. #include <linux/delay.h>
  52. #include <linux/init.h>
  53. #include <linux/interrupt.h>
  54. #include <linux/module.h>
  55. #include <linux/pci.h>
  56. #include <linux/random.h>
  57. #include <linux/slab.h>
  58. #include <linux/ntb.h>
  59. #include "ntb_hw_intel.h"
  60. #define NTB_NAME "ntb_hw_intel"
  61. #define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver"
  62. #define NTB_VER "2.0"
  63. MODULE_DESCRIPTION(NTB_DESC);
  64. MODULE_VERSION(NTB_VER);
  65. MODULE_LICENSE("Dual BSD/GPL");
  66. MODULE_AUTHOR("Intel Corporation");
  67. #define bar0_off(base, bar) ((base) + ((bar) << 2))
  68. #define bar2_off(base, bar) bar0_off(base, (bar) - 2)
  69. static const struct intel_ntb_reg atom_reg;
  70. static const struct intel_ntb_alt_reg atom_pri_reg;
  71. static const struct intel_ntb_alt_reg atom_sec_reg;
  72. static const struct intel_ntb_alt_reg atom_b2b_reg;
  73. static const struct intel_ntb_xlat_reg atom_pri_xlat;
  74. static const struct intel_ntb_xlat_reg atom_sec_xlat;
  75. static const struct intel_ntb_reg xeon_reg;
  76. static const struct intel_ntb_alt_reg xeon_pri_reg;
  77. static const struct intel_ntb_alt_reg xeon_sec_reg;
  78. static const struct intel_ntb_alt_reg xeon_b2b_reg;
  79. static const struct intel_ntb_xlat_reg xeon_pri_xlat;
  80. static const struct intel_ntb_xlat_reg xeon_sec_xlat;
  81. static struct intel_b2b_addr xeon_b2b_usd_addr;
  82. static struct intel_b2b_addr xeon_b2b_dsd_addr;
  83. static const struct ntb_dev_ops intel_ntb_ops;
  84. static const struct file_operations intel_ntb_debugfs_info;
  85. static struct dentry *debugfs_dir;
  86. static int b2b_mw_idx = -1;
  87. module_param(b2b_mw_idx, int, 0644);
  88. MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb. A "
  89. "value of zero or positive starts from first mw idx, and a "
  90. "negative value starts from last mw idx. Both sides MUST "
  91. "set the same value here!");
  92. static unsigned int b2b_mw_share;
  93. module_param(b2b_mw_share, uint, 0644);
  94. MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
  95. "ntb so that the peer ntb only occupies the first half of "
  96. "the mw, so the second half can still be used as a mw. Both "
  97. "sides MUST set the same value here!");
  98. module_param_named(xeon_b2b_usd_bar2_addr64,
  99. xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
  100. MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
  101. "XEON B2B USD BAR 2 64-bit address");
  102. module_param_named(xeon_b2b_usd_bar4_addr64,
  103. xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
  104. MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
  105. "XEON B2B USD BAR 4 64-bit address");
  106. module_param_named(xeon_b2b_usd_bar4_addr32,
  107. xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
  108. MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
  109. "XEON B2B USD split-BAR 4 32-bit address");
  110. module_param_named(xeon_b2b_usd_bar5_addr32,
  111. xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
  112. MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
  113. "XEON B2B USD split-BAR 5 32-bit address");
  114. module_param_named(xeon_b2b_dsd_bar2_addr64,
  115. xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
  116. MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
  117. "XEON B2B DSD BAR 2 64-bit address");
  118. module_param_named(xeon_b2b_dsd_bar4_addr64,
  119. xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
  120. MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
  121. "XEON B2B DSD BAR 4 64-bit address");
  122. module_param_named(xeon_b2b_dsd_bar4_addr32,
  123. xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
  124. MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
  125. "XEON B2B DSD split-BAR 4 32-bit address");
  126. module_param_named(xeon_b2b_dsd_bar5_addr32,
  127. xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
  128. MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
  129. "XEON B2B DSD split-BAR 5 32-bit address");
  130. #ifndef ioread64
  131. #ifdef readq
  132. #define ioread64 readq
  133. #else
  134. #define ioread64 _ioread64
  135. static inline u64 _ioread64(void __iomem *mmio)
  136. {
  137. u64 low, high;
  138. low = ioread32(mmio);
  139. high = ioread32(mmio + sizeof(u32));
  140. return low | (high << 32);
  141. }
  142. #endif
  143. #endif
  144. #ifndef iowrite64
  145. #ifdef writeq
  146. #define iowrite64 writeq
  147. #else
  148. #define iowrite64 _iowrite64
  149. static inline void _iowrite64(u64 val, void __iomem *mmio)
  150. {
  151. iowrite32(val, mmio);
  152. iowrite32(val >> 32, mmio + sizeof(u32));
  153. }
  154. #endif
  155. #endif
  156. static inline int pdev_is_atom(struct pci_dev *pdev)
  157. {
  158. switch (pdev->device) {
  159. case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
  160. return 1;
  161. }
  162. return 0;
  163. }
  164. static inline int pdev_is_xeon(struct pci_dev *pdev)
  165. {
  166. switch (pdev->device) {
  167. case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
  168. case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
  169. case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
  170. case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
  171. case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
  172. case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
  173. case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
  174. case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
  175. case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
  176. case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
  177. case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
  178. case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
  179. return 1;
  180. }
  181. return 0;
  182. }
  183. static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
  184. {
  185. ndev->unsafe_flags = 0;
  186. ndev->unsafe_flags_ignore = 0;
  187. /* Only B2B has a workaround to avoid SDOORBELL */
  188. if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
  189. if (!ntb_topo_is_b2b(ndev->ntb.topo))
  190. ndev->unsafe_flags |= NTB_UNSAFE_DB;
  191. /* No low level workaround to avoid SB01BASE */
  192. if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
  193. ndev->unsafe_flags |= NTB_UNSAFE_DB;
  194. ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
  195. }
  196. }
  197. static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
  198. unsigned long flag)
  199. {
  200. return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
  201. }
  202. static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
  203. unsigned long flag)
  204. {
  205. flag &= ndev->unsafe_flags;
  206. ndev->unsafe_flags_ignore |= flag;
  207. return !!flag;
  208. }
  209. static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
  210. {
  211. if (idx < 0 || idx > ndev->mw_count)
  212. return -EINVAL;
  213. return ndev->reg->mw_bar[idx];
  214. }
  215. static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
  216. phys_addr_t *db_addr, resource_size_t *db_size,
  217. phys_addr_t reg_addr, unsigned long reg)
  218. {
  219. WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
  220. if (db_addr) {
  221. *db_addr = reg_addr + reg;
  222. dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
  223. }
  224. if (db_size) {
  225. *db_size = ndev->reg->db_size;
  226. dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
  227. }
  228. return 0;
  229. }
  230. static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
  231. void __iomem *mmio)
  232. {
  233. WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
  234. return ndev->reg->db_ioread(mmio);
  235. }
  236. static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
  237. void __iomem *mmio)
  238. {
  239. WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
  240. if (db_bits & ~ndev->db_valid_mask)
  241. return -EINVAL;
  242. ndev->reg->db_iowrite(db_bits, mmio);
  243. return 0;
  244. }
  245. static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
  246. void __iomem *mmio)
  247. {
  248. unsigned long irqflags;
  249. WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
  250. if (db_bits & ~ndev->db_valid_mask)
  251. return -EINVAL;
  252. spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
  253. {
  254. ndev->db_mask |= db_bits;
  255. ndev->reg->db_iowrite(ndev->db_mask, mmio);
  256. }
  257. spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
  258. return 0;
  259. }
  260. static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
  261. void __iomem *mmio)
  262. {
  263. unsigned long irqflags;
  264. WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
  265. if (db_bits & ~ndev->db_valid_mask)
  266. return -EINVAL;
  267. spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
  268. {
  269. ndev->db_mask &= ~db_bits;
  270. ndev->reg->db_iowrite(ndev->db_mask, mmio);
  271. }
  272. spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
  273. return 0;
  274. }
  275. static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
  276. {
  277. u64 shift, mask;
  278. shift = ndev->db_vec_shift;
  279. mask = BIT_ULL(shift) - 1;
  280. return mask << (shift * db_vector);
  281. }
  282. static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
  283. phys_addr_t *spad_addr, phys_addr_t reg_addr,
  284. unsigned long reg)
  285. {
  286. WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
  287. if (idx < 0 || idx >= ndev->spad_count)
  288. return -EINVAL;
  289. if (spad_addr) {
  290. *spad_addr = reg_addr + reg + (idx << 2);
  291. dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
  292. }
  293. return 0;
  294. }
  295. static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
  296. void __iomem *mmio)
  297. {
  298. WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
  299. if (idx < 0 || idx >= ndev->spad_count)
  300. return 0;
  301. return ioread32(mmio + (idx << 2));
  302. }
  303. static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
  304. void __iomem *mmio)
  305. {
  306. WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
  307. if (idx < 0 || idx >= ndev->spad_count)
  308. return -EINVAL;
  309. iowrite32(val, mmio + (idx << 2));
  310. return 0;
  311. }
  312. static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
  313. {
  314. u64 vec_mask;
  315. vec_mask = ndev_vec_mask(ndev, vec);
  316. dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
  317. ndev->last_ts = jiffies;
  318. if (vec_mask & ndev->db_link_mask) {
  319. if (ndev->reg->poll_link(ndev))
  320. ntb_link_event(&ndev->ntb);
  321. }
  322. if (vec_mask & ndev->db_valid_mask)
  323. ntb_db_event(&ndev->ntb, vec);
  324. return IRQ_HANDLED;
  325. }
  326. static irqreturn_t ndev_vec_isr(int irq, void *dev)
  327. {
  328. struct intel_ntb_vec *nvec = dev;
  329. return ndev_interrupt(nvec->ndev, nvec->num);
  330. }
  331. static irqreturn_t ndev_irq_isr(int irq, void *dev)
  332. {
  333. struct intel_ntb_dev *ndev = dev;
  334. return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
  335. }
  336. static int ndev_init_isr(struct intel_ntb_dev *ndev,
  337. int msix_min, int msix_max,
  338. int msix_shift, int total_shift)
  339. {
  340. struct pci_dev *pdev;
  341. int rc, i, msix_count, node;
  342. pdev = ndev_pdev(ndev);
  343. node = dev_to_node(&pdev->dev);
  344. /* Mask all doorbell interrupts */
  345. ndev->db_mask = ndev->db_valid_mask;
  346. ndev->reg->db_iowrite(ndev->db_mask,
  347. ndev->self_mmio +
  348. ndev->self_reg->db_mask);
  349. /* Try to set up msix irq */
  350. ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
  351. GFP_KERNEL, node);
  352. if (!ndev->vec)
  353. goto err_msix_vec_alloc;
  354. ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
  355. GFP_KERNEL, node);
  356. if (!ndev->msix)
  357. goto err_msix_alloc;
  358. for (i = 0; i < msix_max; ++i)
  359. ndev->msix[i].entry = i;
  360. msix_count = pci_enable_msix_range(pdev, ndev->msix,
  361. msix_min, msix_max);
  362. if (msix_count < 0)
  363. goto err_msix_enable;
  364. for (i = 0; i < msix_count; ++i) {
  365. ndev->vec[i].ndev = ndev;
  366. ndev->vec[i].num = i;
  367. rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
  368. "ndev_vec_isr", &ndev->vec[i]);
  369. if (rc)
  370. goto err_msix_request;
  371. }
  372. dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
  373. ndev->db_vec_count = msix_count;
  374. ndev->db_vec_shift = msix_shift;
  375. return 0;
  376. err_msix_request:
  377. while (i-- > 0)
  378. free_irq(ndev->msix[i].vector, ndev);
  379. pci_disable_msix(pdev);
  380. err_msix_enable:
  381. kfree(ndev->msix);
  382. err_msix_alloc:
  383. kfree(ndev->vec);
  384. err_msix_vec_alloc:
  385. ndev->msix = NULL;
  386. ndev->vec = NULL;
  387. /* Try to set up msi irq */
  388. rc = pci_enable_msi(pdev);
  389. if (rc)
  390. goto err_msi_enable;
  391. rc = request_irq(pdev->irq, ndev_irq_isr, 0,
  392. "ndev_irq_isr", ndev);
  393. if (rc)
  394. goto err_msi_request;
  395. dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
  396. ndev->db_vec_count = 1;
  397. ndev->db_vec_shift = total_shift;
  398. return 0;
  399. err_msi_request:
  400. pci_disable_msi(pdev);
  401. err_msi_enable:
  402. /* Try to set up intx irq */
  403. pci_intx(pdev, 1);
  404. rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
  405. "ndev_irq_isr", ndev);
  406. if (rc)
  407. goto err_intx_request;
  408. dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
  409. ndev->db_vec_count = 1;
  410. ndev->db_vec_shift = total_shift;
  411. return 0;
  412. err_intx_request:
  413. return rc;
  414. }
  415. static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
  416. {
  417. struct pci_dev *pdev;
  418. int i;
  419. pdev = ndev_pdev(ndev);
  420. /* Mask all doorbell interrupts */
  421. ndev->db_mask = ndev->db_valid_mask;
  422. ndev->reg->db_iowrite(ndev->db_mask,
  423. ndev->self_mmio +
  424. ndev->self_reg->db_mask);
  425. if (ndev->msix) {
  426. i = ndev->db_vec_count;
  427. while (i--)
  428. free_irq(ndev->msix[i].vector, &ndev->vec[i]);
  429. pci_disable_msix(pdev);
  430. kfree(ndev->msix);
  431. kfree(ndev->vec);
  432. } else {
  433. free_irq(pdev->irq, ndev);
  434. if (pci_dev_msi_enabled(pdev))
  435. pci_disable_msi(pdev);
  436. }
  437. }
  438. static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
  439. size_t count, loff_t *offp)
  440. {
  441. struct intel_ntb_dev *ndev;
  442. void __iomem *mmio;
  443. char *buf;
  444. size_t buf_size;
  445. ssize_t ret, off;
  446. union { u64 v64; u32 v32; u16 v16; } u;
  447. ndev = filp->private_data;
  448. mmio = ndev->self_mmio;
  449. buf_size = min(count, 0x800ul);
  450. buf = kmalloc(buf_size, GFP_KERNEL);
  451. if (!buf)
  452. return -ENOMEM;
  453. off = 0;
  454. off += scnprintf(buf + off, buf_size - off,
  455. "NTB Device Information:\n");
  456. off += scnprintf(buf + off, buf_size - off,
  457. "Connection Topology -\t%s\n",
  458. ntb_topo_string(ndev->ntb.topo));
  459. off += scnprintf(buf + off, buf_size - off,
  460. "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
  461. off += scnprintf(buf + off, buf_size - off,
  462. "B2B MW Idx -\t\t%d\n", ndev->b2b_idx);
  463. off += scnprintf(buf + off, buf_size - off,
  464. "BAR4 Split -\t\t%s\n",
  465. ndev->bar4_split ? "yes" : "no");
  466. off += scnprintf(buf + off, buf_size - off,
  467. "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
  468. off += scnprintf(buf + off, buf_size - off,
  469. "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
  470. if (!ndev->reg->link_is_up(ndev)) {
  471. off += scnprintf(buf + off, buf_size - off,
  472. "Link Status -\t\tDown\n");
  473. } else {
  474. off += scnprintf(buf + off, buf_size - off,
  475. "Link Status -\t\tUp\n");
  476. off += scnprintf(buf + off, buf_size - off,
  477. "Link Speed -\t\tPCI-E Gen %u\n",
  478. NTB_LNK_STA_SPEED(ndev->lnk_sta));
  479. off += scnprintf(buf + off, buf_size - off,
  480. "Link Width -\t\tx%u\n",
  481. NTB_LNK_STA_WIDTH(ndev->lnk_sta));
  482. }
  483. off += scnprintf(buf + off, buf_size - off,
  484. "Memory Window Count -\t%u\n", ndev->mw_count);
  485. off += scnprintf(buf + off, buf_size - off,
  486. "Scratchpad Count -\t%u\n", ndev->spad_count);
  487. off += scnprintf(buf + off, buf_size - off,
  488. "Doorbell Count -\t%u\n", ndev->db_count);
  489. off += scnprintf(buf + off, buf_size - off,
  490. "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
  491. off += scnprintf(buf + off, buf_size - off,
  492. "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
  493. off += scnprintf(buf + off, buf_size - off,
  494. "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
  495. off += scnprintf(buf + off, buf_size - off,
  496. "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
  497. off += scnprintf(buf + off, buf_size - off,
  498. "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
  499. u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
  500. off += scnprintf(buf + off, buf_size - off,
  501. "Doorbell Mask -\t\t%#llx\n", u.v64);
  502. u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
  503. off += scnprintf(buf + off, buf_size - off,
  504. "Doorbell Bell -\t\t%#llx\n", u.v64);
  505. off += scnprintf(buf + off, buf_size - off,
  506. "\nNTB Incoming XLAT:\n");
  507. u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
  508. off += scnprintf(buf + off, buf_size - off,
  509. "XLAT23 -\t\t%#018llx\n", u.v64);
  510. u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
  511. off += scnprintf(buf + off, buf_size - off,
  512. "XLAT45 -\t\t%#018llx\n", u.v64);
  513. u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
  514. off += scnprintf(buf + off, buf_size - off,
  515. "LMT23 -\t\t\t%#018llx\n", u.v64);
  516. u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
  517. off += scnprintf(buf + off, buf_size - off,
  518. "LMT45 -\t\t\t%#018llx\n", u.v64);
  519. if (pdev_is_xeon(ndev->ntb.pdev)) {
  520. if (ntb_topo_is_b2b(ndev->ntb.topo)) {
  521. off += scnprintf(buf + off, buf_size - off,
  522. "\nNTB Outgoing B2B XLAT:\n");
  523. u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
  524. off += scnprintf(buf + off, buf_size - off,
  525. "B2B XLAT23 -\t\t%#018llx\n", u.v64);
  526. u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
  527. off += scnprintf(buf + off, buf_size - off,
  528. "B2B XLAT45 -\t\t%#018llx\n", u.v64);
  529. u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
  530. off += scnprintf(buf + off, buf_size - off,
  531. "B2B LMT23 -\t\t%#018llx\n", u.v64);
  532. u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
  533. off += scnprintf(buf + off, buf_size - off,
  534. "B2B LMT45 -\t\t%#018llx\n", u.v64);
  535. off += scnprintf(buf + off, buf_size - off,
  536. "\nNTB Secondary BAR:\n");
  537. u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
  538. off += scnprintf(buf + off, buf_size - off,
  539. "SBAR01 -\t\t%#018llx\n", u.v64);
  540. u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
  541. off += scnprintf(buf + off, buf_size - off,
  542. "SBAR23 -\t\t%#018llx\n", u.v64);
  543. u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
  544. off += scnprintf(buf + off, buf_size - off,
  545. "SBAR45 -\t\t%#018llx\n", u.v64);
  546. }
  547. off += scnprintf(buf + off, buf_size - off,
  548. "\nXEON NTB Statistics:\n");
  549. u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
  550. off += scnprintf(buf + off, buf_size - off,
  551. "Upstream Memory Miss -\t%u\n", u.v16);
  552. off += scnprintf(buf + off, buf_size - off,
  553. "\nXEON NTB Hardware Errors:\n");
  554. if (!pci_read_config_word(ndev->ntb.pdev,
  555. XEON_DEVSTS_OFFSET, &u.v16))
  556. off += scnprintf(buf + off, buf_size - off,
  557. "DEVSTS -\t\t%#06x\n", u.v16);
  558. if (!pci_read_config_word(ndev->ntb.pdev,
  559. XEON_LINK_STATUS_OFFSET, &u.v16))
  560. off += scnprintf(buf + off, buf_size - off,
  561. "LNKSTS -\t\t%#06x\n", u.v16);
  562. if (!pci_read_config_dword(ndev->ntb.pdev,
  563. XEON_UNCERRSTS_OFFSET, &u.v32))
  564. off += scnprintf(buf + off, buf_size - off,
  565. "UNCERRSTS -\t\t%#06x\n", u.v32);
  566. if (!pci_read_config_dword(ndev->ntb.pdev,
  567. XEON_CORERRSTS_OFFSET, &u.v32))
  568. off += scnprintf(buf + off, buf_size - off,
  569. "CORERRSTS -\t\t%#06x\n", u.v32);
  570. }
  571. ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
  572. kfree(buf);
  573. return ret;
  574. }
  575. static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
  576. {
  577. if (!debugfs_dir) {
  578. ndev->debugfs_dir = NULL;
  579. ndev->debugfs_info = NULL;
  580. } else {
  581. ndev->debugfs_dir =
  582. debugfs_create_dir(ndev_name(ndev), debugfs_dir);
  583. if (!ndev->debugfs_dir)
  584. ndev->debugfs_info = NULL;
  585. else
  586. ndev->debugfs_info =
  587. debugfs_create_file("info", S_IRUSR,
  588. ndev->debugfs_dir, ndev,
  589. &intel_ntb_debugfs_info);
  590. }
  591. }
  592. static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
  593. {
  594. debugfs_remove_recursive(ndev->debugfs_dir);
  595. }
  596. static int intel_ntb_mw_count(struct ntb_dev *ntb)
  597. {
  598. return ntb_ndev(ntb)->mw_count;
  599. }
  600. static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
  601. phys_addr_t *base,
  602. resource_size_t *size,
  603. resource_size_t *align,
  604. resource_size_t *align_size)
  605. {
  606. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  607. int bar;
  608. if (idx >= ndev->b2b_idx && !ndev->b2b_off)
  609. idx += 1;
  610. bar = ndev_mw_to_bar(ndev, idx);
  611. if (bar < 0)
  612. return bar;
  613. if (base)
  614. *base = pci_resource_start(ndev->ntb.pdev, bar) +
  615. (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
  616. if (size)
  617. *size = pci_resource_len(ndev->ntb.pdev, bar) -
  618. (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
  619. if (align)
  620. *align = pci_resource_len(ndev->ntb.pdev, bar);
  621. if (align_size)
  622. *align_size = 1;
  623. return 0;
  624. }
  625. static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
  626. dma_addr_t addr, resource_size_t size)
  627. {
  628. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  629. unsigned long base_reg, xlat_reg, limit_reg;
  630. resource_size_t bar_size, mw_size;
  631. void __iomem *mmio;
  632. u64 base, limit, reg_val;
  633. int bar;
  634. if (idx >= ndev->b2b_idx && !ndev->b2b_off)
  635. idx += 1;
  636. bar = ndev_mw_to_bar(ndev, idx);
  637. if (bar < 0)
  638. return bar;
  639. bar_size = pci_resource_len(ndev->ntb.pdev, bar);
  640. if (idx == ndev->b2b_idx)
  641. mw_size = bar_size - ndev->b2b_off;
  642. else
  643. mw_size = bar_size;
  644. /* hardware requires that addr is aligned to bar size */
  645. if (addr & (bar_size - 1))
  646. return -EINVAL;
  647. /* make sure the range fits in the usable mw size */
  648. if (size > mw_size)
  649. return -EINVAL;
  650. mmio = ndev->self_mmio;
  651. base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
  652. xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
  653. limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
  654. if (bar < 4 || !ndev->bar4_split) {
  655. base = ioread64(mmio + base_reg);
  656. /* Set the limit if supported, if size is not mw_size */
  657. if (limit_reg && size != mw_size)
  658. limit = base + size;
  659. else
  660. limit = 0;
  661. /* set and verify setting the translation address */
  662. iowrite64(addr, mmio + xlat_reg);
  663. reg_val = ioread64(mmio + xlat_reg);
  664. if (reg_val != addr) {
  665. iowrite64(0, mmio + xlat_reg);
  666. return -EIO;
  667. }
  668. /* set and verify setting the limit */
  669. iowrite64(limit, mmio + limit_reg);
  670. reg_val = ioread64(mmio + limit_reg);
  671. if (reg_val != limit) {
  672. iowrite64(base, mmio + limit_reg);
  673. iowrite64(0, mmio + xlat_reg);
  674. return -EIO;
  675. }
  676. } else {
  677. /* split bar addr range must all be 32 bit */
  678. if (addr & (~0ull << 32))
  679. return -EINVAL;
  680. if ((addr + size) & (~0ull << 32))
  681. return -EINVAL;
  682. base = ioread32(mmio + base_reg);
  683. /* Set the limit if supported, if size is not mw_size */
  684. if (limit_reg && size != mw_size)
  685. limit = base + size;
  686. else
  687. limit = 0;
  688. /* set and verify setting the translation address */
  689. iowrite32(addr, mmio + xlat_reg);
  690. reg_val = ioread32(mmio + xlat_reg);
  691. if (reg_val != addr) {
  692. iowrite32(0, mmio + xlat_reg);
  693. return -EIO;
  694. }
  695. /* set and verify setting the limit */
  696. iowrite32(limit, mmio + limit_reg);
  697. reg_val = ioread32(mmio + limit_reg);
  698. if (reg_val != limit) {
  699. iowrite32(base, mmio + limit_reg);
  700. iowrite32(0, mmio + xlat_reg);
  701. return -EIO;
  702. }
  703. }
  704. return 0;
  705. }
  706. static int intel_ntb_link_is_up(struct ntb_dev *ntb,
  707. enum ntb_speed *speed,
  708. enum ntb_width *width)
  709. {
  710. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  711. if (ndev->reg->link_is_up(ndev)) {
  712. if (speed)
  713. *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
  714. if (width)
  715. *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
  716. return 1;
  717. } else {
  718. /* TODO MAYBE: is it possible to observe the link speed and
  719. * width while link is training? */
  720. if (speed)
  721. *speed = NTB_SPEED_NONE;
  722. if (width)
  723. *width = NTB_WIDTH_NONE;
  724. return 0;
  725. }
  726. }
  727. static int intel_ntb_link_enable(struct ntb_dev *ntb,
  728. enum ntb_speed max_speed,
  729. enum ntb_width max_width)
  730. {
  731. struct intel_ntb_dev *ndev;
  732. u32 ntb_ctl;
  733. ndev = container_of(ntb, struct intel_ntb_dev, ntb);
  734. if (ndev->ntb.topo == NTB_TOPO_SEC)
  735. return -EINVAL;
  736. dev_dbg(ndev_dev(ndev),
  737. "Enabling link with max_speed %d max_width %d\n",
  738. max_speed, max_width);
  739. if (max_speed != NTB_SPEED_AUTO)
  740. dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
  741. if (max_width != NTB_WIDTH_AUTO)
  742. dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
  743. ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
  744. ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
  745. ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
  746. ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
  747. if (ndev->bar4_split)
  748. ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
  749. iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
  750. return 0;
  751. }
  752. static int intel_ntb_link_disable(struct ntb_dev *ntb)
  753. {
  754. struct intel_ntb_dev *ndev;
  755. u32 ntb_cntl;
  756. ndev = container_of(ntb, struct intel_ntb_dev, ntb);
  757. if (ndev->ntb.topo == NTB_TOPO_SEC)
  758. return -EINVAL;
  759. dev_dbg(ndev_dev(ndev), "Disabling link\n");
  760. /* Bring NTB link down */
  761. ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
  762. ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
  763. ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
  764. if (ndev->bar4_split)
  765. ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
  766. ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
  767. iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
  768. return 0;
  769. }
  770. static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
  771. {
  772. return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
  773. }
  774. static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
  775. {
  776. return ntb_ndev(ntb)->db_valid_mask;
  777. }
  778. static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
  779. {
  780. struct intel_ntb_dev *ndev;
  781. ndev = container_of(ntb, struct intel_ntb_dev, ntb);
  782. return ndev->db_vec_count;
  783. }
  784. static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
  785. {
  786. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  787. if (db_vector < 0 || db_vector > ndev->db_vec_count)
  788. return 0;
  789. return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
  790. }
  791. static u64 intel_ntb_db_read(struct ntb_dev *ntb)
  792. {
  793. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  794. return ndev_db_read(ndev,
  795. ndev->self_mmio +
  796. ndev->self_reg->db_bell);
  797. }
  798. static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
  799. {
  800. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  801. return ndev_db_write(ndev, db_bits,
  802. ndev->self_mmio +
  803. ndev->self_reg->db_bell);
  804. }
  805. static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
  806. {
  807. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  808. return ndev_db_set_mask(ndev, db_bits,
  809. ndev->self_mmio +
  810. ndev->self_reg->db_mask);
  811. }
  812. static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
  813. {
  814. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  815. return ndev_db_clear_mask(ndev, db_bits,
  816. ndev->self_mmio +
  817. ndev->self_reg->db_mask);
  818. }
  819. static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
  820. phys_addr_t *db_addr,
  821. resource_size_t *db_size)
  822. {
  823. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  824. return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
  825. ndev->peer_reg->db_bell);
  826. }
  827. static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
  828. {
  829. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  830. return ndev_db_write(ndev, db_bits,
  831. ndev->peer_mmio +
  832. ndev->peer_reg->db_bell);
  833. }
  834. static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
  835. {
  836. return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
  837. }
  838. static int intel_ntb_spad_count(struct ntb_dev *ntb)
  839. {
  840. struct intel_ntb_dev *ndev;
  841. ndev = container_of(ntb, struct intel_ntb_dev, ntb);
  842. return ndev->spad_count;
  843. }
  844. static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
  845. {
  846. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  847. return ndev_spad_read(ndev, idx,
  848. ndev->self_mmio +
  849. ndev->self_reg->spad);
  850. }
  851. static int intel_ntb_spad_write(struct ntb_dev *ntb,
  852. int idx, u32 val)
  853. {
  854. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  855. return ndev_spad_write(ndev, idx, val,
  856. ndev->self_mmio +
  857. ndev->self_reg->spad);
  858. }
  859. static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
  860. phys_addr_t *spad_addr)
  861. {
  862. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  863. return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr,
  864. ndev->peer_reg->spad);
  865. }
  866. static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
  867. {
  868. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  869. return ndev_spad_read(ndev, idx,
  870. ndev->peer_mmio +
  871. ndev->peer_reg->spad);
  872. }
  873. static int intel_ntb_peer_spad_write(struct ntb_dev *ntb,
  874. int idx, u32 val)
  875. {
  876. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  877. return ndev_spad_write(ndev, idx, val,
  878. ndev->peer_mmio +
  879. ndev->peer_reg->spad);
  880. }
  881. /* ATOM */
  882. static u64 atom_db_ioread(void __iomem *mmio)
  883. {
  884. return ioread64(mmio);
  885. }
  886. static void atom_db_iowrite(u64 bits, void __iomem *mmio)
  887. {
  888. iowrite64(bits, mmio);
  889. }
  890. static int atom_poll_link(struct intel_ntb_dev *ndev)
  891. {
  892. u32 ntb_ctl;
  893. ntb_ctl = ioread32(ndev->self_mmio + ATOM_NTBCNTL_OFFSET);
  894. if (ntb_ctl == ndev->ntb_ctl)
  895. return 0;
  896. ndev->ntb_ctl = ntb_ctl;
  897. ndev->lnk_sta = ioread32(ndev->self_mmio + ATOM_LINK_STATUS_OFFSET);
  898. return 1;
  899. }
  900. static int atom_link_is_up(struct intel_ntb_dev *ndev)
  901. {
  902. return ATOM_NTB_CTL_ACTIVE(ndev->ntb_ctl);
  903. }
  904. static int atom_link_is_err(struct intel_ntb_dev *ndev)
  905. {
  906. if (ioread32(ndev->self_mmio + ATOM_LTSSMSTATEJMP_OFFSET)
  907. & ATOM_LTSSMSTATEJMP_FORCEDETECT)
  908. return 1;
  909. if (ioread32(ndev->self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET)
  910. & ATOM_IBIST_ERR_OFLOW)
  911. return 1;
  912. return 0;
  913. }
  914. static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
  915. {
  916. switch (ppd & ATOM_PPD_TOPO_MASK) {
  917. case ATOM_PPD_TOPO_B2B_USD:
  918. dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd);
  919. return NTB_TOPO_B2B_USD;
  920. case ATOM_PPD_TOPO_B2B_DSD:
  921. dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd);
  922. return NTB_TOPO_B2B_DSD;
  923. case ATOM_PPD_TOPO_PRI_USD:
  924. case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
  925. case ATOM_PPD_TOPO_SEC_USD:
  926. case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
  927. dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd);
  928. return NTB_TOPO_NONE;
  929. }
  930. dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd);
  931. return NTB_TOPO_NONE;
  932. }
  933. static void atom_link_hb(struct work_struct *work)
  934. {
  935. struct intel_ntb_dev *ndev = hb_ndev(work);
  936. unsigned long poll_ts;
  937. void __iomem *mmio;
  938. u32 status32;
  939. poll_ts = ndev->last_ts + ATOM_LINK_HB_TIMEOUT;
  940. /* Delay polling the link status if an interrupt was received,
  941. * unless the cached link status says the link is down.
  942. */
  943. if (time_after(poll_ts, jiffies) && atom_link_is_up(ndev)) {
  944. schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
  945. return;
  946. }
  947. if (atom_poll_link(ndev))
  948. ntb_link_event(&ndev->ntb);
  949. if (atom_link_is_up(ndev) || !atom_link_is_err(ndev)) {
  950. schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
  951. return;
  952. }
  953. /* Link is down with error: recover the link! */
  954. mmio = ndev->self_mmio;
  955. /* Driver resets the NTB ModPhy lanes - magic! */
  956. iowrite8(0xe0, mmio + ATOM_MODPHY_PCSREG6);
  957. iowrite8(0x40, mmio + ATOM_MODPHY_PCSREG4);
  958. iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG4);
  959. iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG6);
  960. /* Driver waits 100ms to allow the NTB ModPhy to settle */
  961. msleep(100);
  962. /* Clear AER Errors, write to clear */
  963. status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET);
  964. dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32);
  965. status32 &= PCI_ERR_COR_REP_ROLL;
  966. iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET);
  967. /* Clear unexpected electrical idle event in LTSSM, write to clear */
  968. status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET);
  969. dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32);
  970. status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
  971. iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET);
  972. /* Clear DeSkew Buffer error, write to clear */
  973. status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET);
  974. dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32);
  975. status32 |= ATOM_DESKEWSTS_DBERR;
  976. iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET);
  977. status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
  978. dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32);
  979. status32 &= ATOM_IBIST_ERR_OFLOW;
  980. iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
  981. /* Releases the NTB state machine to allow the link to retrain */
  982. status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET);
  983. dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32);
  984. status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
  985. iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET);
  986. /* There is a potential race between the 2 NTB devices recovering at the
  987. * same time. If the times are the same, the link will not recover and
  988. * the driver will be stuck in this loop forever. Add a random interval
  989. * to the recovery time to prevent this race.
  990. */
  991. schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_RECOVERY_TIME
  992. + prandom_u32() % ATOM_LINK_RECOVERY_TIME);
  993. }
  994. static int atom_init_isr(struct intel_ntb_dev *ndev)
  995. {
  996. int rc;
  997. rc = ndev_init_isr(ndev, 1, ATOM_DB_MSIX_VECTOR_COUNT,
  998. ATOM_DB_MSIX_VECTOR_SHIFT, ATOM_DB_TOTAL_SHIFT);
  999. if (rc)
  1000. return rc;
  1001. /* ATOM doesn't have link status interrupt, poll on that platform */
  1002. ndev->last_ts = jiffies;
  1003. INIT_DELAYED_WORK(&ndev->hb_timer, atom_link_hb);
  1004. schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
  1005. return 0;
  1006. }
  1007. static void atom_deinit_isr(struct intel_ntb_dev *ndev)
  1008. {
  1009. cancel_delayed_work_sync(&ndev->hb_timer);
  1010. ndev_deinit_isr(ndev);
  1011. }
  1012. static int atom_init_ntb(struct intel_ntb_dev *ndev)
  1013. {
  1014. ndev->mw_count = ATOM_MW_COUNT;
  1015. ndev->spad_count = ATOM_SPAD_COUNT;
  1016. ndev->db_count = ATOM_DB_COUNT;
  1017. switch (ndev->ntb.topo) {
  1018. case NTB_TOPO_B2B_USD:
  1019. case NTB_TOPO_B2B_DSD:
  1020. ndev->self_reg = &atom_pri_reg;
  1021. ndev->peer_reg = &atom_b2b_reg;
  1022. ndev->xlat_reg = &atom_sec_xlat;
  1023. /* Enable Bus Master and Memory Space on the secondary side */
  1024. iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
  1025. ndev->self_mmio + ATOM_SPCICMD_OFFSET);
  1026. break;
  1027. default:
  1028. return -EINVAL;
  1029. }
  1030. ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
  1031. return 0;
  1032. }
  1033. static int atom_init_dev(struct intel_ntb_dev *ndev)
  1034. {
  1035. u32 ppd;
  1036. int rc;
  1037. rc = pci_read_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, &ppd);
  1038. if (rc)
  1039. return -EIO;
  1040. ndev->ntb.topo = atom_ppd_topo(ndev, ppd);
  1041. if (ndev->ntb.topo == NTB_TOPO_NONE)
  1042. return -EINVAL;
  1043. rc = atom_init_ntb(ndev);
  1044. if (rc)
  1045. return rc;
  1046. rc = atom_init_isr(ndev);
  1047. if (rc)
  1048. return rc;
  1049. if (ndev->ntb.topo != NTB_TOPO_SEC) {
  1050. /* Initiate PCI-E link training */
  1051. rc = pci_write_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET,
  1052. ppd | ATOM_PPD_INIT_LINK);
  1053. if (rc)
  1054. return rc;
  1055. }
  1056. return 0;
  1057. }
  1058. static void atom_deinit_dev(struct intel_ntb_dev *ndev)
  1059. {
  1060. atom_deinit_isr(ndev);
  1061. }
  1062. /* XEON */
  1063. static u64 xeon_db_ioread(void __iomem *mmio)
  1064. {
  1065. return (u64)ioread16(mmio);
  1066. }
  1067. static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
  1068. {
  1069. iowrite16((u16)bits, mmio);
  1070. }
  1071. static int xeon_poll_link(struct intel_ntb_dev *ndev)
  1072. {
  1073. u16 reg_val;
  1074. int rc;
  1075. ndev->reg->db_iowrite(ndev->db_link_mask,
  1076. ndev->self_mmio +
  1077. ndev->self_reg->db_bell);
  1078. rc = pci_read_config_word(ndev->ntb.pdev,
  1079. XEON_LINK_STATUS_OFFSET, &reg_val);
  1080. if (rc)
  1081. return 0;
  1082. if (reg_val == ndev->lnk_sta)
  1083. return 0;
  1084. ndev->lnk_sta = reg_val;
  1085. return 1;
  1086. }
  1087. static int xeon_link_is_up(struct intel_ntb_dev *ndev)
  1088. {
  1089. if (ndev->ntb.topo == NTB_TOPO_SEC)
  1090. return 1;
  1091. return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
  1092. }
  1093. static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
  1094. {
  1095. switch (ppd & XEON_PPD_TOPO_MASK) {
  1096. case XEON_PPD_TOPO_B2B_USD:
  1097. return NTB_TOPO_B2B_USD;
  1098. case XEON_PPD_TOPO_B2B_DSD:
  1099. return NTB_TOPO_B2B_DSD;
  1100. case XEON_PPD_TOPO_PRI_USD:
  1101. case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
  1102. return NTB_TOPO_PRI;
  1103. case XEON_PPD_TOPO_SEC_USD:
  1104. case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
  1105. return NTB_TOPO_SEC;
  1106. }
  1107. return NTB_TOPO_NONE;
  1108. }
  1109. static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
  1110. {
  1111. if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
  1112. dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd);
  1113. return 1;
  1114. }
  1115. return 0;
  1116. }
  1117. static int xeon_init_isr(struct intel_ntb_dev *ndev)
  1118. {
  1119. return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
  1120. XEON_DB_MSIX_VECTOR_COUNT,
  1121. XEON_DB_MSIX_VECTOR_SHIFT,
  1122. XEON_DB_TOTAL_SHIFT);
  1123. }
  1124. static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
  1125. {
  1126. ndev_deinit_isr(ndev);
  1127. }
  1128. static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
  1129. const struct intel_b2b_addr *addr,
  1130. const struct intel_b2b_addr *peer_addr)
  1131. {
  1132. struct pci_dev *pdev;
  1133. void __iomem *mmio;
  1134. resource_size_t bar_size;
  1135. phys_addr_t bar_addr;
  1136. int b2b_bar;
  1137. u8 bar_sz;
  1138. pdev = ndev_pdev(ndev);
  1139. mmio = ndev->self_mmio;
  1140. if (ndev->b2b_idx >= ndev->mw_count) {
  1141. dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
  1142. b2b_bar = 0;
  1143. ndev->b2b_off = 0;
  1144. } else {
  1145. b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
  1146. if (b2b_bar < 0)
  1147. return -EIO;
  1148. dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
  1149. bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
  1150. dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
  1151. if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
  1152. dev_dbg(ndev_dev(ndev),
  1153. "b2b using first half of bar\n");
  1154. ndev->b2b_off = bar_size >> 1;
  1155. } else if (XEON_B2B_MIN_SIZE <= bar_size) {
  1156. dev_dbg(ndev_dev(ndev),
  1157. "b2b using whole bar\n");
  1158. ndev->b2b_off = 0;
  1159. --ndev->mw_count;
  1160. } else {
  1161. dev_dbg(ndev_dev(ndev),
  1162. "b2b bar size is too small\n");
  1163. return -EIO;
  1164. }
  1165. }
  1166. /* Reset the secondary bar sizes to match the primary bar sizes,
  1167. * except disable or halve the size of the b2b secondary bar.
  1168. *
  1169. * Note: code for each specific bar size register, because the register
  1170. * offsets are not in a consistent order (bar5sz comes after ppd, odd).
  1171. */
  1172. pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
  1173. dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz);
  1174. if (b2b_bar == 2) {
  1175. if (ndev->b2b_off)
  1176. bar_sz -= 1;
  1177. else
  1178. bar_sz = 0;
  1179. }
  1180. pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
  1181. pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
  1182. dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz);
  1183. if (!ndev->bar4_split) {
  1184. pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
  1185. dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz);
  1186. if (b2b_bar == 4) {
  1187. if (ndev->b2b_off)
  1188. bar_sz -= 1;
  1189. else
  1190. bar_sz = 0;
  1191. }
  1192. pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
  1193. pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
  1194. dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz);
  1195. } else {
  1196. pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
  1197. dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz);
  1198. if (b2b_bar == 4) {
  1199. if (ndev->b2b_off)
  1200. bar_sz -= 1;
  1201. else
  1202. bar_sz = 0;
  1203. }
  1204. pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
  1205. pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
  1206. dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz);
  1207. pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
  1208. dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz);
  1209. if (b2b_bar == 5) {
  1210. if (ndev->b2b_off)
  1211. bar_sz -= 1;
  1212. else
  1213. bar_sz = 0;
  1214. }
  1215. pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
  1216. pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
  1217. dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz);
  1218. }
  1219. /* SBAR01 hit by first part of the b2b bar */
  1220. if (b2b_bar == 0)
  1221. bar_addr = addr->bar0_addr;
  1222. else if (b2b_bar == 2)
  1223. bar_addr = addr->bar2_addr64;
  1224. else if (b2b_bar == 4 && !ndev->bar4_split)
  1225. bar_addr = addr->bar4_addr64;
  1226. else if (b2b_bar == 4)
  1227. bar_addr = addr->bar4_addr32;
  1228. else if (b2b_bar == 5)
  1229. bar_addr = addr->bar5_addr32;
  1230. else
  1231. return -EIO;
  1232. dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr);
  1233. iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
  1234. /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
  1235. * The b2b bar is either disabled above, or configured half-size, and
  1236. * it starts at the PBAR xlat + offset.
  1237. */
  1238. bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
  1239. iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
  1240. bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
  1241. dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr);
  1242. if (!ndev->bar4_split) {
  1243. bar_addr = addr->bar4_addr64 +
  1244. (b2b_bar == 4 ? ndev->b2b_off : 0);
  1245. iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
  1246. bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
  1247. dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr);
  1248. } else {
  1249. bar_addr = addr->bar4_addr32 +
  1250. (b2b_bar == 4 ? ndev->b2b_off : 0);
  1251. iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
  1252. bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
  1253. dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr);
  1254. bar_addr = addr->bar5_addr32 +
  1255. (b2b_bar == 5 ? ndev->b2b_off : 0);
  1256. iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
  1257. bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
  1258. dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr);
  1259. }
  1260. /* setup incoming bar limits == base addrs (zero length windows) */
  1261. bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
  1262. iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
  1263. bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
  1264. dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr);
  1265. if (!ndev->bar4_split) {
  1266. bar_addr = addr->bar4_addr64 +
  1267. (b2b_bar == 4 ? ndev->b2b_off : 0);
  1268. iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
  1269. bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
  1270. dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr);
  1271. } else {
  1272. bar_addr = addr->bar4_addr32 +
  1273. (b2b_bar == 4 ? ndev->b2b_off : 0);
  1274. iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
  1275. bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
  1276. dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr);
  1277. bar_addr = addr->bar5_addr32 +
  1278. (b2b_bar == 5 ? ndev->b2b_off : 0);
  1279. iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
  1280. bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
  1281. dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr);
  1282. }
  1283. /* zero incoming translation addrs */
  1284. iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
  1285. if (!ndev->bar4_split) {
  1286. iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
  1287. } else {
  1288. iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
  1289. iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
  1290. }
  1291. /* zero outgoing translation limits (whole bar size windows) */
  1292. iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
  1293. if (!ndev->bar4_split) {
  1294. iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
  1295. } else {
  1296. iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
  1297. iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
  1298. }
  1299. /* set outgoing translation offsets */
  1300. bar_addr = peer_addr->bar2_addr64;
  1301. iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
  1302. bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
  1303. dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr);
  1304. if (!ndev->bar4_split) {
  1305. bar_addr = peer_addr->bar4_addr64;
  1306. iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
  1307. bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
  1308. dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr);
  1309. } else {
  1310. bar_addr = peer_addr->bar4_addr32;
  1311. iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
  1312. bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
  1313. dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr);
  1314. bar_addr = peer_addr->bar5_addr32;
  1315. iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
  1316. bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
  1317. dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr);
  1318. }
  1319. /* set the translation offset for b2b registers */
  1320. if (b2b_bar == 0)
  1321. bar_addr = peer_addr->bar0_addr;
  1322. else if (b2b_bar == 2)
  1323. bar_addr = peer_addr->bar2_addr64;
  1324. else if (b2b_bar == 4 && !ndev->bar4_split)
  1325. bar_addr = peer_addr->bar4_addr64;
  1326. else if (b2b_bar == 4)
  1327. bar_addr = peer_addr->bar4_addr32;
  1328. else if (b2b_bar == 5)
  1329. bar_addr = peer_addr->bar5_addr32;
  1330. else
  1331. return -EIO;
  1332. /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
  1333. dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr);
  1334. iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
  1335. iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
  1336. if (b2b_bar) {
  1337. /* map peer ntb mmio config space registers */
  1338. ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
  1339. XEON_B2B_MIN_SIZE);
  1340. if (!ndev->peer_mmio)
  1341. return -EIO;
  1342. }
  1343. return 0;
  1344. }
  1345. static int xeon_init_ntb(struct intel_ntb_dev *ndev)
  1346. {
  1347. int rc;
  1348. u32 ntb_ctl;
  1349. if (ndev->bar4_split)
  1350. ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
  1351. else
  1352. ndev->mw_count = XEON_MW_COUNT;
  1353. ndev->spad_count = XEON_SPAD_COUNT;
  1354. ndev->db_count = XEON_DB_COUNT;
  1355. ndev->db_link_mask = XEON_DB_LINK_BIT;
  1356. switch (ndev->ntb.topo) {
  1357. case NTB_TOPO_PRI:
  1358. if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
  1359. dev_err(ndev_dev(ndev), "NTB Primary config disabled\n");
  1360. return -EINVAL;
  1361. }
  1362. /* enable link to allow secondary side device to appear */
  1363. ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
  1364. ntb_ctl &= ~NTB_CTL_DISABLE;
  1365. iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
  1366. /* use half the spads for the peer */
  1367. ndev->spad_count >>= 1;
  1368. ndev->self_reg = &xeon_pri_reg;
  1369. ndev->peer_reg = &xeon_sec_reg;
  1370. ndev->xlat_reg = &xeon_sec_xlat;
  1371. break;
  1372. case NTB_TOPO_SEC:
  1373. if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
  1374. dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n");
  1375. return -EINVAL;
  1376. }
  1377. /* use half the spads for the peer */
  1378. ndev->spad_count >>= 1;
  1379. ndev->self_reg = &xeon_sec_reg;
  1380. ndev->peer_reg = &xeon_pri_reg;
  1381. ndev->xlat_reg = &xeon_pri_xlat;
  1382. break;
  1383. case NTB_TOPO_B2B_USD:
  1384. case NTB_TOPO_B2B_DSD:
  1385. ndev->self_reg = &xeon_pri_reg;
  1386. ndev->peer_reg = &xeon_b2b_reg;
  1387. ndev->xlat_reg = &xeon_sec_xlat;
  1388. if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
  1389. ndev->peer_reg = &xeon_pri_reg;
  1390. if (b2b_mw_idx < 0)
  1391. ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
  1392. else
  1393. ndev->b2b_idx = b2b_mw_idx;
  1394. dev_dbg(ndev_dev(ndev),
  1395. "setting up b2b mw idx %d means %d\n",
  1396. b2b_mw_idx, ndev->b2b_idx);
  1397. } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
  1398. dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n");
  1399. ndev->db_count -= 1;
  1400. }
  1401. if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
  1402. rc = xeon_setup_b2b_mw(ndev,
  1403. &xeon_b2b_dsd_addr,
  1404. &xeon_b2b_usd_addr);
  1405. } else {
  1406. rc = xeon_setup_b2b_mw(ndev,
  1407. &xeon_b2b_usd_addr,
  1408. &xeon_b2b_dsd_addr);
  1409. }
  1410. if (rc)
  1411. return rc;
  1412. /* Enable Bus Master and Memory Space on the secondary side */
  1413. iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
  1414. ndev->self_mmio + XEON_SPCICMD_OFFSET);
  1415. break;
  1416. default:
  1417. return -EINVAL;
  1418. }
  1419. ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
  1420. ndev->reg->db_iowrite(ndev->db_valid_mask,
  1421. ndev->self_mmio +
  1422. ndev->self_reg->db_mask);
  1423. return 0;
  1424. }
  1425. static int xeon_init_dev(struct intel_ntb_dev *ndev)
  1426. {
  1427. struct pci_dev *pdev;
  1428. u8 ppd;
  1429. int rc, mem;
  1430. pdev = ndev_pdev(ndev);
  1431. switch (pdev->device) {
  1432. /* There is a Xeon hardware errata related to writes to SDOORBELL or
  1433. * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
  1434. * which may hang the system. To workaround this use the second memory
  1435. * window to access the interrupt and scratch pad registers on the
  1436. * remote system.
  1437. */
  1438. case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
  1439. case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
  1440. case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
  1441. case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
  1442. case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
  1443. case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
  1444. case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
  1445. case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
  1446. case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
  1447. case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
  1448. case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
  1449. case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
  1450. ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
  1451. break;
  1452. }
  1453. switch (pdev->device) {
  1454. /* There is a hardware errata related to accessing any register in
  1455. * SB01BASE in the presence of bidirectional traffic crossing the NTB.
  1456. */
  1457. case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
  1458. case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
  1459. case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
  1460. case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
  1461. case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
  1462. case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
  1463. ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
  1464. break;
  1465. }
  1466. switch (pdev->device) {
  1467. /* HW Errata on bit 14 of b2bdoorbell register. Writes will not be
  1468. * mirrored to the remote system. Shrink the number of bits by one,
  1469. * since bit 14 is the last bit.
  1470. */
  1471. case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
  1472. case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
  1473. case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
  1474. case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
  1475. case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
  1476. case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
  1477. case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
  1478. case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
  1479. case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
  1480. case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
  1481. case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
  1482. case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
  1483. ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
  1484. break;
  1485. }
  1486. ndev->reg = &xeon_reg;
  1487. rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
  1488. if (rc)
  1489. return -EIO;
  1490. ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
  1491. dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
  1492. ntb_topo_string(ndev->ntb.topo));
  1493. if (ndev->ntb.topo == NTB_TOPO_NONE)
  1494. return -EINVAL;
  1495. if (ndev->ntb.topo != NTB_TOPO_SEC) {
  1496. ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
  1497. dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n",
  1498. ppd, ndev->bar4_split);
  1499. } else {
  1500. /* This is a way for transparent BAR to figure out if we are
  1501. * doing split BAR or not. There is no way for the hw on the
  1502. * transparent side to know and set the PPD.
  1503. */
  1504. mem = pci_select_bars(pdev, IORESOURCE_MEM);
  1505. ndev->bar4_split = hweight32(mem) ==
  1506. HSX_SPLIT_BAR_MW_COUNT + 1;
  1507. dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n",
  1508. mem, ndev->bar4_split);
  1509. }
  1510. rc = xeon_init_ntb(ndev);
  1511. if (rc)
  1512. return rc;
  1513. return xeon_init_isr(ndev);
  1514. }
  1515. static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
  1516. {
  1517. xeon_deinit_isr(ndev);
  1518. }
  1519. static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
  1520. {
  1521. int rc;
  1522. pci_set_drvdata(pdev, ndev);
  1523. rc = pci_enable_device(pdev);
  1524. if (rc)
  1525. goto err_pci_enable;
  1526. rc = pci_request_regions(pdev, NTB_NAME);
  1527. if (rc)
  1528. goto err_pci_regions;
  1529. pci_set_master(pdev);
  1530. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  1531. if (rc) {
  1532. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1533. if (rc)
  1534. goto err_dma_mask;
  1535. dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
  1536. }
  1537. rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  1538. if (rc) {
  1539. rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  1540. if (rc)
  1541. goto err_dma_mask;
  1542. dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
  1543. }
  1544. ndev->self_mmio = pci_iomap(pdev, 0, 0);
  1545. if (!ndev->self_mmio) {
  1546. rc = -EIO;
  1547. goto err_mmio;
  1548. }
  1549. ndev->peer_mmio = ndev->self_mmio;
  1550. return 0;
  1551. err_mmio:
  1552. err_dma_mask:
  1553. pci_clear_master(pdev);
  1554. pci_release_regions(pdev);
  1555. err_pci_regions:
  1556. pci_disable_device(pdev);
  1557. err_pci_enable:
  1558. pci_set_drvdata(pdev, NULL);
  1559. return rc;
  1560. }
  1561. static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
  1562. {
  1563. struct pci_dev *pdev = ndev_pdev(ndev);
  1564. if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
  1565. pci_iounmap(pdev, ndev->peer_mmio);
  1566. pci_iounmap(pdev, ndev->self_mmio);
  1567. pci_clear_master(pdev);
  1568. pci_release_regions(pdev);
  1569. pci_disable_device(pdev);
  1570. pci_set_drvdata(pdev, NULL);
  1571. }
  1572. static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
  1573. struct pci_dev *pdev)
  1574. {
  1575. ndev->ntb.pdev = pdev;
  1576. ndev->ntb.topo = NTB_TOPO_NONE;
  1577. ndev->ntb.ops = &intel_ntb_ops;
  1578. ndev->b2b_off = 0;
  1579. ndev->b2b_idx = INT_MAX;
  1580. ndev->bar4_split = 0;
  1581. ndev->mw_count = 0;
  1582. ndev->spad_count = 0;
  1583. ndev->db_count = 0;
  1584. ndev->db_vec_count = 0;
  1585. ndev->db_vec_shift = 0;
  1586. ndev->ntb_ctl = 0;
  1587. ndev->lnk_sta = 0;
  1588. ndev->db_valid_mask = 0;
  1589. ndev->db_link_mask = 0;
  1590. ndev->db_mask = 0;
  1591. spin_lock_init(&ndev->db_mask_lock);
  1592. }
  1593. static int intel_ntb_pci_probe(struct pci_dev *pdev,
  1594. const struct pci_device_id *id)
  1595. {
  1596. struct intel_ntb_dev *ndev;
  1597. int rc, node;
  1598. node = dev_to_node(&pdev->dev);
  1599. if (pdev_is_atom(pdev)) {
  1600. ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
  1601. if (!ndev) {
  1602. rc = -ENOMEM;
  1603. goto err_ndev;
  1604. }
  1605. ndev_init_struct(ndev, pdev);
  1606. rc = intel_ntb_init_pci(ndev, pdev);
  1607. if (rc)
  1608. goto err_init_pci;
  1609. rc = atom_init_dev(ndev);
  1610. if (rc)
  1611. goto err_init_dev;
  1612. } else if (pdev_is_xeon(pdev)) {
  1613. ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
  1614. if (!ndev) {
  1615. rc = -ENOMEM;
  1616. goto err_ndev;
  1617. }
  1618. ndev_init_struct(ndev, pdev);
  1619. rc = intel_ntb_init_pci(ndev, pdev);
  1620. if (rc)
  1621. goto err_init_pci;
  1622. rc = xeon_init_dev(ndev);
  1623. if (rc)
  1624. goto err_init_dev;
  1625. } else {
  1626. rc = -EINVAL;
  1627. goto err_ndev;
  1628. }
  1629. ndev_reset_unsafe_flags(ndev);
  1630. ndev->reg->poll_link(ndev);
  1631. ndev_init_debugfs(ndev);
  1632. rc = ntb_register_device(&ndev->ntb);
  1633. if (rc)
  1634. goto err_register;
  1635. dev_info(&pdev->dev, "NTB device registered.\n");
  1636. return 0;
  1637. err_register:
  1638. ndev_deinit_debugfs(ndev);
  1639. if (pdev_is_atom(pdev))
  1640. atom_deinit_dev(ndev);
  1641. else if (pdev_is_xeon(pdev))
  1642. xeon_deinit_dev(ndev);
  1643. err_init_dev:
  1644. intel_ntb_deinit_pci(ndev);
  1645. err_init_pci:
  1646. kfree(ndev);
  1647. err_ndev:
  1648. return rc;
  1649. }
  1650. static void intel_ntb_pci_remove(struct pci_dev *pdev)
  1651. {
  1652. struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
  1653. ntb_unregister_device(&ndev->ntb);
  1654. ndev_deinit_debugfs(ndev);
  1655. if (pdev_is_atom(pdev))
  1656. atom_deinit_dev(ndev);
  1657. else if (pdev_is_xeon(pdev))
  1658. xeon_deinit_dev(ndev);
  1659. intel_ntb_deinit_pci(ndev);
  1660. kfree(ndev);
  1661. }
  1662. static const struct intel_ntb_reg atom_reg = {
  1663. .poll_link = atom_poll_link,
  1664. .link_is_up = atom_link_is_up,
  1665. .db_ioread = atom_db_ioread,
  1666. .db_iowrite = atom_db_iowrite,
  1667. .db_size = sizeof(u64),
  1668. .ntb_ctl = ATOM_NTBCNTL_OFFSET,
  1669. .mw_bar = {2, 4},
  1670. };
  1671. static const struct intel_ntb_alt_reg atom_pri_reg = {
  1672. .db_bell = ATOM_PDOORBELL_OFFSET,
  1673. .db_mask = ATOM_PDBMSK_OFFSET,
  1674. .spad = ATOM_SPAD_OFFSET,
  1675. };
  1676. static const struct intel_ntb_alt_reg atom_b2b_reg = {
  1677. .db_bell = ATOM_B2B_DOORBELL_OFFSET,
  1678. .spad = ATOM_B2B_SPAD_OFFSET,
  1679. };
  1680. static const struct intel_ntb_xlat_reg atom_sec_xlat = {
  1681. /* FIXME : .bar0_base = ATOM_SBAR0BASE_OFFSET, */
  1682. /* FIXME : .bar2_limit = ATOM_SBAR2LMT_OFFSET, */
  1683. .bar2_xlat = ATOM_SBAR2XLAT_OFFSET,
  1684. };
  1685. static const struct intel_ntb_reg xeon_reg = {
  1686. .poll_link = xeon_poll_link,
  1687. .link_is_up = xeon_link_is_up,
  1688. .db_ioread = xeon_db_ioread,
  1689. .db_iowrite = xeon_db_iowrite,
  1690. .db_size = sizeof(u32),
  1691. .ntb_ctl = XEON_NTBCNTL_OFFSET,
  1692. .mw_bar = {2, 4, 5},
  1693. };
  1694. static const struct intel_ntb_alt_reg xeon_pri_reg = {
  1695. .db_bell = XEON_PDOORBELL_OFFSET,
  1696. .db_mask = XEON_PDBMSK_OFFSET,
  1697. .spad = XEON_SPAD_OFFSET,
  1698. };
  1699. static const struct intel_ntb_alt_reg xeon_sec_reg = {
  1700. .db_bell = XEON_SDOORBELL_OFFSET,
  1701. .db_mask = XEON_SDBMSK_OFFSET,
  1702. /* second half of the scratchpads */
  1703. .spad = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
  1704. };
  1705. static const struct intel_ntb_alt_reg xeon_b2b_reg = {
  1706. .db_bell = XEON_B2B_DOORBELL_OFFSET,
  1707. .spad = XEON_B2B_SPAD_OFFSET,
  1708. };
  1709. static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
  1710. /* Note: no primary .bar0_base visible to the secondary side.
  1711. *
  1712. * The secondary side cannot get the base address stored in primary
  1713. * bars. The base address is necessary to set the limit register to
  1714. * any value other than zero, or unlimited.
  1715. *
  1716. * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
  1717. * window by setting the limit equal to base, nor can it limit the size
  1718. * of the memory window by setting the limit to base + size.
  1719. */
  1720. .bar2_limit = XEON_PBAR23LMT_OFFSET,
  1721. .bar2_xlat = XEON_PBAR23XLAT_OFFSET,
  1722. };
  1723. static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
  1724. .bar0_base = XEON_SBAR0BASE_OFFSET,
  1725. .bar2_limit = XEON_SBAR23LMT_OFFSET,
  1726. .bar2_xlat = XEON_SBAR23XLAT_OFFSET,
  1727. };
  1728. static struct intel_b2b_addr xeon_b2b_usd_addr = {
  1729. .bar2_addr64 = XEON_B2B_BAR2_USD_ADDR64,
  1730. .bar4_addr64 = XEON_B2B_BAR4_USD_ADDR64,
  1731. .bar4_addr32 = XEON_B2B_BAR4_USD_ADDR32,
  1732. .bar5_addr32 = XEON_B2B_BAR5_USD_ADDR32,
  1733. };
  1734. static struct intel_b2b_addr xeon_b2b_dsd_addr = {
  1735. .bar2_addr64 = XEON_B2B_BAR2_DSD_ADDR64,
  1736. .bar4_addr64 = XEON_B2B_BAR4_DSD_ADDR64,
  1737. .bar4_addr32 = XEON_B2B_BAR4_DSD_ADDR32,
  1738. .bar5_addr32 = XEON_B2B_BAR5_DSD_ADDR32,
  1739. };
  1740. /* operations for primary side of local ntb */
  1741. static const struct ntb_dev_ops intel_ntb_ops = {
  1742. .mw_count = intel_ntb_mw_count,
  1743. .mw_get_range = intel_ntb_mw_get_range,
  1744. .mw_set_trans = intel_ntb_mw_set_trans,
  1745. .link_is_up = intel_ntb_link_is_up,
  1746. .link_enable = intel_ntb_link_enable,
  1747. .link_disable = intel_ntb_link_disable,
  1748. .db_is_unsafe = intel_ntb_db_is_unsafe,
  1749. .db_valid_mask = intel_ntb_db_valid_mask,
  1750. .db_vector_count = intel_ntb_db_vector_count,
  1751. .db_vector_mask = intel_ntb_db_vector_mask,
  1752. .db_read = intel_ntb_db_read,
  1753. .db_clear = intel_ntb_db_clear,
  1754. .db_set_mask = intel_ntb_db_set_mask,
  1755. .db_clear_mask = intel_ntb_db_clear_mask,
  1756. .peer_db_addr = intel_ntb_peer_db_addr,
  1757. .peer_db_set = intel_ntb_peer_db_set,
  1758. .spad_is_unsafe = intel_ntb_spad_is_unsafe,
  1759. .spad_count = intel_ntb_spad_count,
  1760. .spad_read = intel_ntb_spad_read,
  1761. .spad_write = intel_ntb_spad_write,
  1762. .peer_spad_addr = intel_ntb_peer_spad_addr,
  1763. .peer_spad_read = intel_ntb_peer_spad_read,
  1764. .peer_spad_write = intel_ntb_peer_spad_write,
  1765. };
  1766. static const struct file_operations intel_ntb_debugfs_info = {
  1767. .owner = THIS_MODULE,
  1768. .open = simple_open,
  1769. .read = ndev_debugfs_read,
  1770. };
  1771. static const struct pci_device_id intel_ntb_pci_tbl[] = {
  1772. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
  1773. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
  1774. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
  1775. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
  1776. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
  1777. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
  1778. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
  1779. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
  1780. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
  1781. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
  1782. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
  1783. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
  1784. {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
  1785. {0}
  1786. };
  1787. MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
  1788. static struct pci_driver intel_ntb_pci_driver = {
  1789. .name = KBUILD_MODNAME,
  1790. .id_table = intel_ntb_pci_tbl,
  1791. .probe = intel_ntb_pci_probe,
  1792. .remove = intel_ntb_pci_remove,
  1793. };
  1794. static int __init intel_ntb_pci_driver_init(void)
  1795. {
  1796. pr_info("%s %s\n", NTB_DESC, NTB_VER);
  1797. if (debugfs_initialized())
  1798. debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
  1799. return pci_register_driver(&intel_ntb_pci_driver);
  1800. }
  1801. module_init(intel_ntb_pci_driver_init);
  1802. static void __exit intel_ntb_pci_driver_exit(void)
  1803. {
  1804. pci_unregister_driver(&intel_ntb_pci_driver);
  1805. debugfs_remove_recursive(debugfs_dir);
  1806. }
  1807. module_exit(intel_ntb_pci_driver_exit);