ntb_hw_switchtec.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587
  1. /*
  2. * Microsemi Switchtec(tm) PCIe Management Driver
  3. * Copyright (c) 2017, Microsemi Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. */
  15. #include <linux/switchtec.h>
  16. #include <linux/module.h>
  17. #include <linux/delay.h>
  18. #include <linux/kthread.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/ntb.h>
  21. MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
  22. MODULE_VERSION("0.1");
  23. MODULE_LICENSE("GPL");
  24. MODULE_AUTHOR("Microsemi Corporation");
  25. static ulong max_mw_size = SZ_2M;
  26. module_param(max_mw_size, ulong, 0644);
  27. MODULE_PARM_DESC(max_mw_size,
  28. "Max memory window size reported to the upper layer");
  29. static bool use_lut_mws;
  30. module_param(use_lut_mws, bool, 0644);
  31. MODULE_PARM_DESC(use_lut_mws,
  32. "Enable the use of the LUT based memory windows");
  33. #ifndef ioread64
  34. #ifdef readq
  35. #define ioread64 readq
  36. #else
  37. #define ioread64 _ioread64
  38. static inline u64 _ioread64(void __iomem *mmio)
  39. {
  40. u64 low, high;
  41. low = ioread32(mmio);
  42. high = ioread32(mmio + sizeof(u32));
  43. return low | (high << 32);
  44. }
  45. #endif
  46. #endif
  47. #ifndef iowrite64
  48. #ifdef writeq
  49. #define iowrite64 writeq
  50. #else
  51. #define iowrite64 _iowrite64
  52. static inline void _iowrite64(u64 val, void __iomem *mmio)
  53. {
  54. iowrite32(val, mmio);
  55. iowrite32(val >> 32, mmio + sizeof(u32));
  56. }
  57. #endif
  58. #endif
  59. #define SWITCHTEC_NTB_MAGIC 0x45CC0001
  60. #define MAX_MWS 128
  61. struct shared_mw {
  62. u32 magic;
  63. u32 link_sta;
  64. u32 partition_id;
  65. u64 mw_sizes[MAX_MWS];
  66. u32 spad[128];
  67. };
  68. #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
  69. #define LUT_SIZE SZ_64K
  70. struct switchtec_ntb {
  71. struct ntb_dev ntb;
  72. struct switchtec_dev *stdev;
  73. int self_partition;
  74. int peer_partition;
  75. int doorbell_irq;
  76. int message_irq;
  77. struct ntb_info_regs __iomem *mmio_ntb;
  78. struct ntb_ctrl_regs __iomem *mmio_ctrl;
  79. struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
  80. struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
  81. struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
  82. struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
  83. struct ntb_dbmsg_regs __iomem *mmio_peer_dbmsg;
  84. void __iomem *mmio_xlink_win;
  85. struct shared_mw *self_shared;
  86. struct shared_mw __iomem *peer_shared;
  87. dma_addr_t self_shared_dma;
  88. u64 db_mask;
  89. u64 db_valid_mask;
  90. int db_shift;
  91. int db_peer_shift;
  92. /* synchronize rmw access of db_mask and hw reg */
  93. spinlock_t db_mask_lock;
  94. int nr_direct_mw;
  95. int nr_lut_mw;
  96. int nr_rsvd_luts;
  97. int direct_mw_to_bar[MAX_DIRECT_MW];
  98. int peer_nr_direct_mw;
  99. int peer_nr_lut_mw;
  100. int peer_direct_mw_to_bar[MAX_DIRECT_MW];
  101. bool link_is_up;
  102. enum ntb_speed link_speed;
  103. enum ntb_width link_width;
  104. struct work_struct link_reinit_work;
  105. };
  106. static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
  107. {
  108. return container_of(ntb, struct switchtec_ntb, ntb);
  109. }
  110. static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
  111. struct ntb_ctrl_regs __iomem *ctl,
  112. u32 op, int wait_status)
  113. {
  114. static const char * const op_text[] = {
  115. [NTB_CTRL_PART_OP_LOCK] = "lock",
  116. [NTB_CTRL_PART_OP_CFG] = "configure",
  117. [NTB_CTRL_PART_OP_RESET] = "reset",
  118. };
  119. int i;
  120. u32 ps;
  121. int status;
  122. switch (op) {
  123. case NTB_CTRL_PART_OP_LOCK:
  124. status = NTB_CTRL_PART_STATUS_LOCKING;
  125. break;
  126. case NTB_CTRL_PART_OP_CFG:
  127. status = NTB_CTRL_PART_STATUS_CONFIGURING;
  128. break;
  129. case NTB_CTRL_PART_OP_RESET:
  130. status = NTB_CTRL_PART_STATUS_RESETTING;
  131. break;
  132. default:
  133. return -EINVAL;
  134. }
  135. iowrite32(op, &ctl->partition_op);
  136. for (i = 0; i < 1000; i++) {
  137. if (msleep_interruptible(50) != 0) {
  138. iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
  139. return -EINTR;
  140. }
  141. ps = ioread32(&ctl->partition_status) & 0xFFFF;
  142. if (ps != status)
  143. break;
  144. }
  145. if (ps == wait_status)
  146. return 0;
  147. if (ps == status) {
  148. dev_err(&sndev->stdev->dev,
  149. "Timed out while performing %s (%d). (%08x)\n",
  150. op_text[op], op,
  151. ioread32(&ctl->partition_status));
  152. return -ETIMEDOUT;
  153. }
  154. return -EIO;
  155. }
  156. static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
  157. u32 val)
  158. {
  159. if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_peer_dbmsg->omsg))
  160. return -EINVAL;
  161. iowrite32(val, &sndev->mmio_peer_dbmsg->omsg[idx].msg);
  162. return 0;
  163. }
  164. static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
  165. {
  166. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  167. int nr_direct_mw = sndev->peer_nr_direct_mw;
  168. int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts;
  169. if (pidx != NTB_DEF_PEER_IDX)
  170. return -EINVAL;
  171. if (!use_lut_mws)
  172. nr_lut_mw = 0;
  173. return nr_direct_mw + nr_lut_mw;
  174. }
  175. static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
  176. {
  177. return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts;
  178. }
  179. static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
  180. {
  181. return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts;
  182. }
  183. static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
  184. int widx, resource_size_t *addr_align,
  185. resource_size_t *size_align,
  186. resource_size_t *size_max)
  187. {
  188. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  189. int lut;
  190. resource_size_t size;
  191. if (pidx != NTB_DEF_PEER_IDX)
  192. return -EINVAL;
  193. lut = widx >= sndev->peer_nr_direct_mw;
  194. size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
  195. if (size == 0)
  196. return -EINVAL;
  197. if (addr_align)
  198. *addr_align = lut ? size : SZ_4K;
  199. if (size_align)
  200. *size_align = lut ? size : SZ_4K;
  201. if (size_max)
  202. *size_max = size;
  203. return 0;
  204. }
  205. static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
  206. {
  207. struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
  208. int bar = sndev->peer_direct_mw_to_bar[idx];
  209. u32 ctl_val;
  210. ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
  211. ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
  212. iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
  213. iowrite32(0, &ctl->bar_entry[bar].win_size);
  214. iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
  215. }
  216. static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
  217. {
  218. struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
  219. iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
  220. }
  221. static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
  222. dma_addr_t addr, resource_size_t size)
  223. {
  224. int xlate_pos = ilog2(size);
  225. int bar = sndev->peer_direct_mw_to_bar[idx];
  226. struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
  227. u32 ctl_val;
  228. ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
  229. ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
  230. iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
  231. iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
  232. iowrite64(sndev->self_partition | addr,
  233. &ctl->bar_entry[bar].xlate_addr);
  234. }
  235. static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
  236. dma_addr_t addr, resource_size_t size)
  237. {
  238. struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
  239. iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
  240. &ctl->lut_entry[peer_lut_index(sndev, idx)]);
  241. }
  242. static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
  243. dma_addr_t addr, resource_size_t size)
  244. {
  245. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  246. struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
  247. int xlate_pos = ilog2(size);
  248. int nr_direct_mw = sndev->peer_nr_direct_mw;
  249. int rc;
  250. if (pidx != NTB_DEF_PEER_IDX)
  251. return -EINVAL;
  252. dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n",
  253. widx, pidx, &addr, &size);
  254. if (widx >= switchtec_ntb_mw_count(ntb, pidx))
  255. return -EINVAL;
  256. if (xlate_pos < 12)
  257. return -EINVAL;
  258. if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) {
  259. /*
  260. * In certain circumstances we can get a buffer that is
  261. * not aligned to its size. (Most of the time
  262. * dma_alloc_coherent ensures this). This can happen when
  263. * using large buffers allocated by the CMA
  264. * (see CMA_CONFIG_ALIGNMENT)
  265. */
  266. dev_err(&sndev->stdev->dev,
  267. "ERROR: Memory window address is not aligned to it's size!\n");
  268. return -EINVAL;
  269. }
  270. rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
  271. NTB_CTRL_PART_STATUS_LOCKED);
  272. if (rc)
  273. return rc;
  274. if (addr == 0 || size == 0) {
  275. if (widx < nr_direct_mw)
  276. switchtec_ntb_mw_clr_direct(sndev, widx);
  277. else
  278. switchtec_ntb_mw_clr_lut(sndev, widx);
  279. } else {
  280. if (widx < nr_direct_mw)
  281. switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
  282. else
  283. switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
  284. }
  285. rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
  286. NTB_CTRL_PART_STATUS_NORMAL);
  287. if (rc == -EIO) {
  288. dev_err(&sndev->stdev->dev,
  289. "Hardware reported an error configuring mw %d: %08x\n",
  290. widx, ioread32(&ctl->bar_error));
  291. if (widx < nr_direct_mw)
  292. switchtec_ntb_mw_clr_direct(sndev, widx);
  293. else
  294. switchtec_ntb_mw_clr_lut(sndev, widx);
  295. switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
  296. NTB_CTRL_PART_STATUS_NORMAL);
  297. }
  298. return rc;
  299. }
  300. static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
  301. {
  302. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  303. int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts;
  304. return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0);
  305. }
  306. static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
  307. int idx, phys_addr_t *base,
  308. resource_size_t *size)
  309. {
  310. int bar = sndev->direct_mw_to_bar[idx];
  311. size_t offset = 0;
  312. if (bar < 0)
  313. return -EINVAL;
  314. if (idx == 0) {
  315. /*
  316. * This is the direct BAR shared with the LUTs
  317. * which means the actual window will be offset
  318. * by the size of all the LUT entries.
  319. */
  320. offset = LUT_SIZE * sndev->nr_lut_mw;
  321. }
  322. if (base)
  323. *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
  324. if (size) {
  325. *size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
  326. if (offset && *size > offset)
  327. *size = offset;
  328. if (*size > max_mw_size)
  329. *size = max_mw_size;
  330. }
  331. return 0;
  332. }
  333. static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
  334. int idx, phys_addr_t *base,
  335. resource_size_t *size)
  336. {
  337. int bar = sndev->direct_mw_to_bar[0];
  338. int offset;
  339. offset = LUT_SIZE * lut_index(sndev, idx);
  340. if (base)
  341. *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
  342. if (size)
  343. *size = LUT_SIZE;
  344. return 0;
  345. }
  346. static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
  347. phys_addr_t *base,
  348. resource_size_t *size)
  349. {
  350. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  351. if (idx < sndev->nr_direct_mw)
  352. return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
  353. else if (idx < switchtec_ntb_peer_mw_count(ntb))
  354. return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
  355. else
  356. return -EINVAL;
  357. }
  358. static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
  359. int partition,
  360. enum ntb_speed *speed,
  361. enum ntb_width *width)
  362. {
  363. struct switchtec_dev *stdev = sndev->stdev;
  364. u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
  365. u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
  366. if (speed)
  367. *speed = (linksta >> 16) & 0xF;
  368. if (width)
  369. *width = (linksta >> 20) & 0x3F;
  370. }
  371. static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
  372. {
  373. enum ntb_speed self_speed, peer_speed;
  374. enum ntb_width self_width, peer_width;
  375. if (!sndev->link_is_up) {
  376. sndev->link_speed = NTB_SPEED_NONE;
  377. sndev->link_width = NTB_WIDTH_NONE;
  378. return;
  379. }
  380. switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
  381. &self_speed, &self_width);
  382. switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
  383. &peer_speed, &peer_width);
  384. sndev->link_speed = min(self_speed, peer_speed);
  385. sndev->link_width = min(self_width, peer_width);
  386. }
  387. static int crosslink_is_enabled(struct switchtec_ntb *sndev)
  388. {
  389. struct ntb_info_regs __iomem *inf = sndev->mmio_ntb;
  390. return ioread8(&inf->ntp_info[sndev->peer_partition].xlink_enabled);
  391. }
  392. static void crosslink_init_dbmsgs(struct switchtec_ntb *sndev)
  393. {
  394. int i;
  395. u32 msg_map = 0;
  396. if (!crosslink_is_enabled(sndev))
  397. return;
  398. for (i = 0; i < ARRAY_SIZE(sndev->mmio_peer_dbmsg->imsg); i++) {
  399. int m = i | sndev->self_partition << 2;
  400. msg_map |= m << i * 8;
  401. }
  402. iowrite32(msg_map, &sndev->mmio_peer_dbmsg->msg_map);
  403. iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
  404. &sndev->mmio_peer_dbmsg->odb_mask);
  405. }
  406. enum switchtec_msg {
  407. LINK_MESSAGE = 0,
  408. MSG_LINK_UP = 1,
  409. MSG_LINK_DOWN = 2,
  410. MSG_CHECK_LINK = 3,
  411. MSG_LINK_FORCE_DOWN = 4,
  412. };
  413. static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev);
  414. static void link_reinit_work(struct work_struct *work)
  415. {
  416. struct switchtec_ntb *sndev;
  417. sndev = container_of(work, struct switchtec_ntb, link_reinit_work);
  418. switchtec_ntb_reinit_peer(sndev);
  419. }
  420. static void switchtec_ntb_check_link(struct switchtec_ntb *sndev,
  421. enum switchtec_msg msg)
  422. {
  423. int link_sta;
  424. int old = sndev->link_is_up;
  425. if (msg == MSG_LINK_FORCE_DOWN) {
  426. schedule_work(&sndev->link_reinit_work);
  427. if (sndev->link_is_up) {
  428. sndev->link_is_up = 0;
  429. ntb_link_event(&sndev->ntb);
  430. dev_info(&sndev->stdev->dev, "ntb link forced down\n");
  431. }
  432. return;
  433. }
  434. link_sta = sndev->self_shared->link_sta;
  435. if (link_sta) {
  436. u64 peer = ioread64(&sndev->peer_shared->magic);
  437. if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
  438. link_sta = peer >> 32;
  439. else
  440. link_sta = 0;
  441. }
  442. sndev->link_is_up = link_sta;
  443. switchtec_ntb_set_link_speed(sndev);
  444. if (link_sta != old) {
  445. switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
  446. ntb_link_event(&sndev->ntb);
  447. dev_info(&sndev->stdev->dev, "ntb link %s\n",
  448. link_sta ? "up" : "down");
  449. if (link_sta)
  450. crosslink_init_dbmsgs(sndev);
  451. }
  452. }
  453. static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
  454. {
  455. struct switchtec_ntb *sndev = stdev->sndev;
  456. switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
  457. }
  458. static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
  459. enum ntb_speed *speed,
  460. enum ntb_width *width)
  461. {
  462. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  463. if (speed)
  464. *speed = sndev->link_speed;
  465. if (width)
  466. *width = sndev->link_width;
  467. return sndev->link_is_up;
  468. }
  469. static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
  470. enum ntb_speed max_speed,
  471. enum ntb_width max_width)
  472. {
  473. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  474. dev_dbg(&sndev->stdev->dev, "enabling link\n");
  475. sndev->self_shared->link_sta = 1;
  476. switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
  477. switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
  478. return 0;
  479. }
  480. static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
  481. {
  482. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  483. dev_dbg(&sndev->stdev->dev, "disabling link\n");
  484. sndev->self_shared->link_sta = 0;
  485. switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_DOWN);
  486. switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
  487. return 0;
  488. }
  489. static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
  490. {
  491. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  492. return sndev->db_valid_mask;
  493. }
  494. static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
  495. {
  496. return 1;
  497. }
  498. static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
  499. {
  500. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  501. if (db_vector < 0 || db_vector > 1)
  502. return 0;
  503. return sndev->db_valid_mask;
  504. }
  505. static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
  506. {
  507. u64 ret;
  508. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  509. ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
  510. return ret & sndev->db_valid_mask;
  511. }
  512. static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
  513. {
  514. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  515. iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
  516. return 0;
  517. }
  518. static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
  519. {
  520. unsigned long irqflags;
  521. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  522. if (db_bits & ~sndev->db_valid_mask)
  523. return -EINVAL;
  524. spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
  525. sndev->db_mask |= db_bits << sndev->db_shift;
  526. iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
  527. spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
  528. return 0;
  529. }
  530. static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
  531. {
  532. unsigned long irqflags;
  533. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  534. if (db_bits & ~sndev->db_valid_mask)
  535. return -EINVAL;
  536. spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
  537. sndev->db_mask &= ~(db_bits << sndev->db_shift);
  538. iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
  539. spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
  540. return 0;
  541. }
  542. static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
  543. {
  544. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  545. return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
  546. }
  547. static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
  548. phys_addr_t *db_addr,
  549. resource_size_t *db_size)
  550. {
  551. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  552. unsigned long offset;
  553. offset = (unsigned long)sndev->mmio_peer_dbmsg->odb -
  554. (unsigned long)sndev->stdev->mmio;
  555. offset += sndev->db_shift / 8;
  556. if (db_addr)
  557. *db_addr = pci_resource_start(ntb->pdev, 0) + offset;
  558. if (db_size)
  559. *db_size = sizeof(u32);
  560. return 0;
  561. }
  562. static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
  563. {
  564. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  565. iowrite64(db_bits << sndev->db_peer_shift,
  566. &sndev->mmio_peer_dbmsg->odb);
  567. return 0;
  568. }
  569. static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
  570. {
  571. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  572. return ARRAY_SIZE(sndev->self_shared->spad);
  573. }
  574. static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
  575. {
  576. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  577. if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
  578. return 0;
  579. if (!sndev->self_shared)
  580. return 0;
  581. return sndev->self_shared->spad[idx];
  582. }
  583. static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
  584. {
  585. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  586. if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
  587. return -EINVAL;
  588. if (!sndev->self_shared)
  589. return -EIO;
  590. sndev->self_shared->spad[idx] = val;
  591. return 0;
  592. }
  593. static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
  594. int sidx)
  595. {
  596. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  597. if (pidx != NTB_DEF_PEER_IDX)
  598. return -EINVAL;
  599. if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
  600. return 0;
  601. if (!sndev->peer_shared)
  602. return 0;
  603. return ioread32(&sndev->peer_shared->spad[sidx]);
  604. }
  605. static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
  606. int sidx, u32 val)
  607. {
  608. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  609. if (pidx != NTB_DEF_PEER_IDX)
  610. return -EINVAL;
  611. if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
  612. return -EINVAL;
  613. if (!sndev->peer_shared)
  614. return -EIO;
  615. iowrite32(val, &sndev->peer_shared->spad[sidx]);
  616. return 0;
  617. }
  618. static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
  619. int sidx, phys_addr_t *spad_addr)
  620. {
  621. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  622. unsigned long offset;
  623. if (pidx != NTB_DEF_PEER_IDX)
  624. return -EINVAL;
  625. offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
  626. (unsigned long)sndev->stdev->mmio;
  627. if (spad_addr)
  628. *spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
  629. return 0;
  630. }
  631. static const struct ntb_dev_ops switchtec_ntb_ops = {
  632. .mw_count = switchtec_ntb_mw_count,
  633. .mw_get_align = switchtec_ntb_mw_get_align,
  634. .mw_set_trans = switchtec_ntb_mw_set_trans,
  635. .peer_mw_count = switchtec_ntb_peer_mw_count,
  636. .peer_mw_get_addr = switchtec_ntb_peer_mw_get_addr,
  637. .link_is_up = switchtec_ntb_link_is_up,
  638. .link_enable = switchtec_ntb_link_enable,
  639. .link_disable = switchtec_ntb_link_disable,
  640. .db_valid_mask = switchtec_ntb_db_valid_mask,
  641. .db_vector_count = switchtec_ntb_db_vector_count,
  642. .db_vector_mask = switchtec_ntb_db_vector_mask,
  643. .db_read = switchtec_ntb_db_read,
  644. .db_clear = switchtec_ntb_db_clear,
  645. .db_set_mask = switchtec_ntb_db_set_mask,
  646. .db_clear_mask = switchtec_ntb_db_clear_mask,
  647. .db_read_mask = switchtec_ntb_db_read_mask,
  648. .peer_db_addr = switchtec_ntb_peer_db_addr,
  649. .peer_db_set = switchtec_ntb_peer_db_set,
  650. .spad_count = switchtec_ntb_spad_count,
  651. .spad_read = switchtec_ntb_spad_read,
  652. .spad_write = switchtec_ntb_spad_write,
  653. .peer_spad_read = switchtec_ntb_peer_spad_read,
  654. .peer_spad_write = switchtec_ntb_peer_spad_write,
  655. .peer_spad_addr = switchtec_ntb_peer_spad_addr,
  656. };
  657. static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
  658. {
  659. u64 tpart_vec;
  660. int self;
  661. u64 part_map;
  662. int bit;
  663. sndev->ntb.pdev = sndev->stdev->pdev;
  664. sndev->ntb.topo = NTB_TOPO_SWITCH;
  665. sndev->ntb.ops = &switchtec_ntb_ops;
  666. INIT_WORK(&sndev->link_reinit_work, link_reinit_work);
  667. sndev->self_partition = sndev->stdev->partition;
  668. sndev->mmio_ntb = sndev->stdev->mmio_ntb;
  669. self = sndev->self_partition;
  670. tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high);
  671. tpart_vec <<= 32;
  672. tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
  673. part_map = ioread64(&sndev->mmio_ntb->ep_map);
  674. part_map &= ~(1 << sndev->self_partition);
  675. if (!ffs(tpart_vec)) {
  676. if (sndev->stdev->partition_count != 2) {
  677. dev_err(&sndev->stdev->dev,
  678. "ntb target partition not defined\n");
  679. return -ENODEV;
  680. }
  681. bit = ffs(part_map);
  682. if (!bit) {
  683. dev_err(&sndev->stdev->dev,
  684. "peer partition is not NT partition\n");
  685. return -ENODEV;
  686. }
  687. sndev->peer_partition = bit - 1;
  688. } else {
  689. if (ffs(tpart_vec) != fls(tpart_vec)) {
  690. dev_err(&sndev->stdev->dev,
  691. "ntb driver only supports 1 pair of 1-1 ntb mapping\n");
  692. return -ENODEV;
  693. }
  694. sndev->peer_partition = ffs(tpart_vec) - 1;
  695. if (!(part_map & (1 << sndev->peer_partition))) {
  696. dev_err(&sndev->stdev->dev,
  697. "ntb target partition is not NT partition\n");
  698. return -ENODEV;
  699. }
  700. }
  701. dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n",
  702. sndev->self_partition, sndev->stdev->partition_count);
  703. sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
  704. SWITCHTEC_NTB_REG_CTRL_OFFSET;
  705. sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
  706. SWITCHTEC_NTB_REG_DBMSG_OFFSET;
  707. sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
  708. sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
  709. sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
  710. sndev->mmio_peer_dbmsg = sndev->mmio_self_dbmsg;
  711. return 0;
  712. }
  713. static int config_rsvd_lut_win(struct switchtec_ntb *sndev,
  714. struct ntb_ctrl_regs __iomem *ctl,
  715. int lut_idx, int partition, u64 addr)
  716. {
  717. int peer_bar = sndev->peer_direct_mw_to_bar[0];
  718. u32 ctl_val;
  719. int rc;
  720. rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
  721. NTB_CTRL_PART_STATUS_LOCKED);
  722. if (rc)
  723. return rc;
  724. ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl);
  725. ctl_val &= 0xFF;
  726. ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
  727. ctl_val |= ilog2(LUT_SIZE) << 8;
  728. ctl_val |= (sndev->nr_lut_mw - 1) << 14;
  729. iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl);
  730. iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr),
  731. &ctl->lut_entry[lut_idx]);
  732. rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
  733. NTB_CTRL_PART_STATUS_NORMAL);
  734. if (rc) {
  735. u32 bar_error, lut_error;
  736. bar_error = ioread32(&ctl->bar_error);
  737. lut_error = ioread32(&ctl->lut_error);
  738. dev_err(&sndev->stdev->dev,
  739. "Error setting up reserved lut window: %08x / %08x\n",
  740. bar_error, lut_error);
  741. return rc;
  742. }
  743. return 0;
  744. }
  745. static int config_req_id_table(struct switchtec_ntb *sndev,
  746. struct ntb_ctrl_regs __iomem *mmio_ctrl,
  747. int *req_ids, int count)
  748. {
  749. int i, rc = 0;
  750. u32 error;
  751. u32 proxy_id;
  752. if (ioread32(&mmio_ctrl->req_id_table_size) < count) {
  753. dev_err(&sndev->stdev->dev,
  754. "Not enough requester IDs available.\n");
  755. return -EFAULT;
  756. }
  757. rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
  758. NTB_CTRL_PART_OP_LOCK,
  759. NTB_CTRL_PART_STATUS_LOCKED);
  760. if (rc)
  761. return rc;
  762. iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
  763. &mmio_ctrl->partition_ctrl);
  764. for (i = 0; i < count; i++) {
  765. iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN,
  766. &mmio_ctrl->req_id_table[i]);
  767. proxy_id = ioread32(&mmio_ctrl->req_id_table[i]);
  768. dev_dbg(&sndev->stdev->dev,
  769. "Requester ID %02X:%02X.%X -> BB:%02X.%X\n",
  770. req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F,
  771. req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F,
  772. (proxy_id >> 1) & 0x7);
  773. }
  774. rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
  775. NTB_CTRL_PART_OP_CFG,
  776. NTB_CTRL_PART_STATUS_NORMAL);
  777. if (rc == -EIO) {
  778. error = ioread32(&mmio_ctrl->req_id_error);
  779. dev_err(&sndev->stdev->dev,
  780. "Error setting up the requester ID table: %08x\n",
  781. error);
  782. }
  783. return 0;
  784. }
  785. static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx,
  786. u64 *mw_addrs, int mw_count)
  787. {
  788. int rc, i;
  789. struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_self_ctrl;
  790. u64 addr;
  791. size_t size, offset;
  792. int bar;
  793. int xlate_pos;
  794. u32 ctl_val;
  795. rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
  796. NTB_CTRL_PART_STATUS_LOCKED);
  797. if (rc)
  798. return rc;
  799. for (i = 0; i < sndev->nr_lut_mw; i++) {
  800. if (i == ntb_lut_idx)
  801. continue;
  802. addr = mw_addrs[0] + LUT_SIZE * i;
  803. iowrite64((NTB_CTRL_LUT_EN | (sndev->peer_partition << 1) |
  804. addr),
  805. &ctl->lut_entry[i]);
  806. }
  807. sndev->nr_direct_mw = min_t(int, sndev->nr_direct_mw, mw_count);
  808. for (i = 0; i < sndev->nr_direct_mw; i++) {
  809. bar = sndev->direct_mw_to_bar[i];
  810. offset = (i == 0) ? LUT_SIZE * sndev->nr_lut_mw : 0;
  811. addr = mw_addrs[i] + offset;
  812. size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
  813. xlate_pos = ilog2(size);
  814. if (offset && size > offset)
  815. size = offset;
  816. ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
  817. ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
  818. iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
  819. iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
  820. iowrite64(sndev->peer_partition | addr,
  821. &ctl->bar_entry[bar].xlate_addr);
  822. }
  823. rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
  824. NTB_CTRL_PART_STATUS_NORMAL);
  825. if (rc) {
  826. u32 bar_error, lut_error;
  827. bar_error = ioread32(&ctl->bar_error);
  828. lut_error = ioread32(&ctl->lut_error);
  829. dev_err(&sndev->stdev->dev,
  830. "Error setting up cross link windows: %08x / %08x\n",
  831. bar_error, lut_error);
  832. return rc;
  833. }
  834. return 0;
  835. }
  836. static int crosslink_setup_req_ids(struct switchtec_ntb *sndev,
  837. struct ntb_ctrl_regs __iomem *mmio_ctrl)
  838. {
  839. int req_ids[16];
  840. int i;
  841. u32 proxy_id;
  842. for (i = 0; i < ARRAY_SIZE(req_ids); i++) {
  843. proxy_id = ioread32(&sndev->mmio_self_ctrl->req_id_table[i]);
  844. if (!(proxy_id & NTB_CTRL_REQ_ID_EN))
  845. break;
  846. req_ids[i] = ((proxy_id >> 1) & 0xFF);
  847. }
  848. return config_req_id_table(sndev, mmio_ctrl, req_ids, i);
  849. }
  850. /*
  851. * In crosslink configuration there is a virtual partition in the
  852. * middle of the two switches. The BARs in this partition have to be
  853. * enumerated and assigned addresses.
  854. */
  855. static int crosslink_enum_partition(struct switchtec_ntb *sndev,
  856. u64 *bar_addrs)
  857. {
  858. struct part_cfg_regs __iomem *part_cfg =
  859. &sndev->stdev->mmio_part_cfg_all[sndev->peer_partition];
  860. u32 pff = ioread32(&part_cfg->vep_pff_inst_id);
  861. struct pff_csr_regs __iomem *mmio_pff =
  862. &sndev->stdev->mmio_pff_csr[pff];
  863. const u64 bar_space = 0x1000000000LL;
  864. u64 bar_addr;
  865. int bar_cnt = 0;
  866. int i;
  867. iowrite16(0x6, &mmio_pff->pcicmd);
  868. for (i = 0; i < ARRAY_SIZE(mmio_pff->pci_bar64); i++) {
  869. iowrite64(bar_space * i, &mmio_pff->pci_bar64[i]);
  870. bar_addr = ioread64(&mmio_pff->pci_bar64[i]);
  871. bar_addr &= ~0xf;
  872. dev_dbg(&sndev->stdev->dev,
  873. "Crosslink BAR%d addr: %llx\n",
  874. i, bar_addr);
  875. if (bar_addr != bar_space * i)
  876. continue;
  877. bar_addrs[bar_cnt++] = bar_addr;
  878. }
  879. return bar_cnt;
  880. }
  881. static int switchtec_ntb_init_crosslink(struct switchtec_ntb *sndev)
  882. {
  883. int rc;
  884. int bar = sndev->direct_mw_to_bar[0];
  885. const int ntb_lut_idx = 1;
  886. u64 bar_addrs[6];
  887. u64 addr;
  888. int offset;
  889. int bar_cnt;
  890. if (!crosslink_is_enabled(sndev))
  891. return 0;
  892. dev_info(&sndev->stdev->dev, "Using crosslink configuration\n");
  893. sndev->ntb.topo = NTB_TOPO_CROSSLINK;
  894. bar_cnt = crosslink_enum_partition(sndev, bar_addrs);
  895. if (bar_cnt < sndev->nr_direct_mw + 1) {
  896. dev_err(&sndev->stdev->dev,
  897. "Error enumerating crosslink partition\n");
  898. return -EINVAL;
  899. }
  900. addr = (bar_addrs[0] + SWITCHTEC_GAS_NTB_OFFSET +
  901. SWITCHTEC_NTB_REG_DBMSG_OFFSET +
  902. sizeof(struct ntb_dbmsg_regs) * sndev->peer_partition);
  903. offset = addr & (LUT_SIZE - 1);
  904. addr -= offset;
  905. rc = config_rsvd_lut_win(sndev, sndev->mmio_self_ctrl, ntb_lut_idx,
  906. sndev->peer_partition, addr);
  907. if (rc)
  908. return rc;
  909. rc = crosslink_setup_mws(sndev, ntb_lut_idx, &bar_addrs[1],
  910. bar_cnt - 1);
  911. if (rc)
  912. return rc;
  913. rc = crosslink_setup_req_ids(sndev, sndev->mmio_peer_ctrl);
  914. if (rc)
  915. return rc;
  916. sndev->mmio_xlink_win = pci_iomap_range(sndev->stdev->pdev, bar,
  917. LUT_SIZE, LUT_SIZE);
  918. if (!sndev->mmio_xlink_win) {
  919. rc = -ENOMEM;
  920. return rc;
  921. }
  922. sndev->mmio_peer_dbmsg = sndev->mmio_xlink_win + offset;
  923. sndev->nr_rsvd_luts++;
  924. crosslink_init_dbmsgs(sndev);
  925. return 0;
  926. }
  927. static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb *sndev)
  928. {
  929. if (sndev->mmio_xlink_win)
  930. pci_iounmap(sndev->stdev->pdev, sndev->mmio_xlink_win);
  931. }
  932. static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
  933. {
  934. int i;
  935. int cnt = 0;
  936. for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
  937. u32 r = ioread32(&ctrl->bar_entry[i].ctl);
  938. if (r & NTB_CTRL_BAR_VALID)
  939. map[cnt++] = i;
  940. }
  941. return cnt;
  942. }
  943. static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
  944. {
  945. sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
  946. sndev->mmio_self_ctrl);
  947. sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
  948. sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
  949. dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n",
  950. sndev->nr_direct_mw, sndev->nr_lut_mw);
  951. sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
  952. sndev->mmio_peer_ctrl);
  953. sndev->peer_nr_lut_mw =
  954. ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
  955. sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
  956. dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n",
  957. sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
  958. }
  959. /*
  960. * There are 64 doorbells in the switch hardware but this is
  961. * shared among all partitions. So we must split them in half
  962. * (32 for each partition). However, the message interrupts are
  963. * also shared with the top 4 doorbells so we just limit this to
  964. * 28 doorbells per partition.
  965. *
  966. * In crosslink mode, each side has it's own dbmsg register so
  967. * they can each use all 60 of the available doorbells.
  968. */
  969. static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
  970. {
  971. sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
  972. if (sndev->mmio_peer_dbmsg != sndev->mmio_self_dbmsg) {
  973. sndev->db_shift = 0;
  974. sndev->db_peer_shift = 0;
  975. sndev->db_valid_mask = sndev->db_mask;
  976. } else if (sndev->self_partition < sndev->peer_partition) {
  977. sndev->db_shift = 0;
  978. sndev->db_peer_shift = 32;
  979. sndev->db_valid_mask = 0x0FFFFFFF;
  980. } else {
  981. sndev->db_shift = 32;
  982. sndev->db_peer_shift = 0;
  983. sndev->db_valid_mask = 0x0FFFFFFF;
  984. }
  985. iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
  986. iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
  987. &sndev->mmio_peer_dbmsg->odb_mask);
  988. dev_dbg(&sndev->stdev->dev, "dbs: shift %d/%d, mask %016llx\n",
  989. sndev->db_shift, sndev->db_peer_shift, sndev->db_valid_mask);
  990. }
  991. static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
  992. {
  993. int i;
  994. u32 msg_map = 0;
  995. for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
  996. int m = i | sndev->peer_partition << 2;
  997. msg_map |= m << i * 8;
  998. }
  999. iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
  1000. for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
  1001. iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
  1002. &sndev->mmio_self_dbmsg->imsg[i]);
  1003. }
  1004. static int
  1005. switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
  1006. {
  1007. int req_ids[2];
  1008. /*
  1009. * Root Complex Requester ID (which is 0:00.0)
  1010. */
  1011. req_ids[0] = 0;
  1012. /*
  1013. * Host Bridge Requester ID (as read from the mmap address)
  1014. */
  1015. req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id);
  1016. return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids,
  1017. ARRAY_SIZE(req_ids));
  1018. }
  1019. static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
  1020. {
  1021. int i;
  1022. memset(sndev->self_shared, 0, LUT_SIZE);
  1023. sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
  1024. sndev->self_shared->partition_id = sndev->stdev->partition;
  1025. for (i = 0; i < sndev->nr_direct_mw; i++) {
  1026. int bar = sndev->direct_mw_to_bar[i];
  1027. resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
  1028. if (i == 0)
  1029. sz = min_t(resource_size_t, sz,
  1030. LUT_SIZE * sndev->nr_lut_mw);
  1031. sndev->self_shared->mw_sizes[i] = sz;
  1032. }
  1033. for (i = 0; i < sndev->nr_lut_mw; i++) {
  1034. int idx = sndev->nr_direct_mw + i;
  1035. sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
  1036. }
  1037. }
  1038. static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
  1039. {
  1040. int self_bar = sndev->direct_mw_to_bar[0];
  1041. int rc;
  1042. sndev->nr_rsvd_luts++;
  1043. sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
  1044. LUT_SIZE,
  1045. &sndev->self_shared_dma,
  1046. GFP_KERNEL);
  1047. if (!sndev->self_shared) {
  1048. dev_err(&sndev->stdev->dev,
  1049. "unable to allocate memory for shared mw\n");
  1050. return -ENOMEM;
  1051. }
  1052. switchtec_ntb_init_shared(sndev);
  1053. rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
  1054. sndev->self_partition,
  1055. sndev->self_shared_dma);
  1056. if (rc)
  1057. goto unalloc_and_exit;
  1058. sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE);
  1059. if (!sndev->peer_shared) {
  1060. rc = -ENOMEM;
  1061. goto unalloc_and_exit;
  1062. }
  1063. dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n");
  1064. return 0;
  1065. unalloc_and_exit:
  1066. dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
  1067. sndev->self_shared, sndev->self_shared_dma);
  1068. return rc;
  1069. }
  1070. static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
  1071. {
  1072. if (sndev->peer_shared)
  1073. pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
  1074. if (sndev->self_shared)
  1075. dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
  1076. sndev->self_shared,
  1077. sndev->self_shared_dma);
  1078. sndev->nr_rsvd_luts--;
  1079. }
  1080. static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
  1081. {
  1082. struct switchtec_ntb *sndev = dev;
  1083. dev_dbg(&sndev->stdev->dev, "doorbell\n");
  1084. ntb_db_event(&sndev->ntb, 0);
  1085. return IRQ_HANDLED;
  1086. }
  1087. static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
  1088. {
  1089. int i;
  1090. struct switchtec_ntb *sndev = dev;
  1091. for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
  1092. u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
  1093. if (msg & NTB_DBMSG_IMSG_STATUS) {
  1094. dev_dbg(&sndev->stdev->dev, "message: %d %08x\n",
  1095. i, (u32)msg);
  1096. iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
  1097. if (i == LINK_MESSAGE)
  1098. switchtec_ntb_check_link(sndev, msg);
  1099. }
  1100. }
  1101. return IRQ_HANDLED;
  1102. }
  1103. static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
  1104. {
  1105. int i;
  1106. int rc;
  1107. int doorbell_irq = 0;
  1108. int message_irq = 0;
  1109. int event_irq;
  1110. int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
  1111. event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
  1112. while (doorbell_irq == event_irq)
  1113. doorbell_irq++;
  1114. while (message_irq == doorbell_irq ||
  1115. message_irq == event_irq)
  1116. message_irq++;
  1117. dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n",
  1118. event_irq, doorbell_irq, message_irq);
  1119. for (i = 0; i < idb_vecs - 4; i++)
  1120. iowrite8(doorbell_irq,
  1121. &sndev->mmio_self_dbmsg->idb_vec_map[i]);
  1122. for (; i < idb_vecs; i++)
  1123. iowrite8(message_irq,
  1124. &sndev->mmio_self_dbmsg->idb_vec_map[i]);
  1125. sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
  1126. sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
  1127. rc = request_irq(sndev->doorbell_irq,
  1128. switchtec_ntb_doorbell_isr, 0,
  1129. "switchtec_ntb_doorbell", sndev);
  1130. if (rc)
  1131. return rc;
  1132. rc = request_irq(sndev->message_irq,
  1133. switchtec_ntb_message_isr, 0,
  1134. "switchtec_ntb_message", sndev);
  1135. if (rc) {
  1136. free_irq(sndev->doorbell_irq, sndev);
  1137. return rc;
  1138. }
  1139. return 0;
  1140. }
  1141. static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
  1142. {
  1143. free_irq(sndev->doorbell_irq, sndev);
  1144. free_irq(sndev->message_irq, sndev);
  1145. }
  1146. static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev)
  1147. {
  1148. dev_info(&sndev->stdev->dev, "peer reinitialized\n");
  1149. switchtec_ntb_deinit_shared_mw(sndev);
  1150. switchtec_ntb_init_mw(sndev);
  1151. return switchtec_ntb_init_shared_mw(sndev);
  1152. }
  1153. static int switchtec_ntb_add(struct device *dev,
  1154. struct class_interface *class_intf)
  1155. {
  1156. struct switchtec_dev *stdev = to_stdev(dev);
  1157. struct switchtec_ntb *sndev;
  1158. int rc;
  1159. stdev->sndev = NULL;
  1160. if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
  1161. return -ENODEV;
  1162. sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
  1163. if (!sndev)
  1164. return -ENOMEM;
  1165. sndev->stdev = stdev;
  1166. rc = switchtec_ntb_init_sndev(sndev);
  1167. if (rc)
  1168. goto free_and_exit;
  1169. switchtec_ntb_init_mw(sndev);
  1170. rc = switchtec_ntb_init_req_id_table(sndev);
  1171. if (rc)
  1172. goto free_and_exit;
  1173. rc = switchtec_ntb_init_crosslink(sndev);
  1174. if (rc)
  1175. goto free_and_exit;
  1176. switchtec_ntb_init_db(sndev);
  1177. switchtec_ntb_init_msgs(sndev);
  1178. rc = switchtec_ntb_init_shared_mw(sndev);
  1179. if (rc)
  1180. goto deinit_crosslink;
  1181. rc = switchtec_ntb_init_db_msg_irq(sndev);
  1182. if (rc)
  1183. goto deinit_shared_and_exit;
  1184. /*
  1185. * If this host crashed, the other host may think the link is
  1186. * still up. Tell them to force it down (it will go back up
  1187. * once we register the ntb device).
  1188. */
  1189. switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_FORCE_DOWN);
  1190. rc = ntb_register_device(&sndev->ntb);
  1191. if (rc)
  1192. goto deinit_and_exit;
  1193. stdev->sndev = sndev;
  1194. stdev->link_notifier = switchtec_ntb_link_notification;
  1195. dev_info(dev, "NTB device registered\n");
  1196. return 0;
  1197. deinit_and_exit:
  1198. switchtec_ntb_deinit_db_msg_irq(sndev);
  1199. deinit_shared_and_exit:
  1200. switchtec_ntb_deinit_shared_mw(sndev);
  1201. deinit_crosslink:
  1202. switchtec_ntb_deinit_crosslink(sndev);
  1203. free_and_exit:
  1204. kfree(sndev);
  1205. dev_err(dev, "failed to register ntb device: %d\n", rc);
  1206. return rc;
  1207. }
  1208. static void switchtec_ntb_remove(struct device *dev,
  1209. struct class_interface *class_intf)
  1210. {
  1211. struct switchtec_dev *stdev = to_stdev(dev);
  1212. struct switchtec_ntb *sndev = stdev->sndev;
  1213. if (!sndev)
  1214. return;
  1215. stdev->link_notifier = NULL;
  1216. stdev->sndev = NULL;
  1217. ntb_unregister_device(&sndev->ntb);
  1218. switchtec_ntb_deinit_db_msg_irq(sndev);
  1219. switchtec_ntb_deinit_shared_mw(sndev);
  1220. switchtec_ntb_deinit_crosslink(sndev);
  1221. kfree(sndev);
  1222. dev_info(dev, "ntb device unregistered\n");
  1223. }
  1224. static struct class_interface switchtec_interface = {
  1225. .add_dev = switchtec_ntb_add,
  1226. .remove_dev = switchtec_ntb_remove,
  1227. };
  1228. static int __init switchtec_ntb_init(void)
  1229. {
  1230. switchtec_interface.class = switchtec_class;
  1231. return class_interface_register(&switchtec_interface);
  1232. }
  1233. module_init(switchtec_ntb_init);
  1234. static void __exit switchtec_ntb_exit(void)
  1235. {
  1236. class_interface_unregister(&switchtec_interface);
  1237. }
  1238. module_exit(switchtec_ntb_exit);