pci.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. /*
  2. * Copyright 2014 IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/pci_regs.h>
  10. #include <linux/pci_ids.h>
  11. #include <linux/device.h>
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/slab.h>
  15. #include <linux/sort.h>
  16. #include <linux/pci.h>
  17. #include <linux/of.h>
  18. #include <linux/delay.h>
  19. #include <asm/opal.h>
  20. #include <asm/msi_bitmap.h>
  21. #include <asm/pci-bridge.h> /* for struct pci_controller */
  22. #include <asm/pnv-pci.h>
  23. #include <asm/io.h>
  24. #include "cxl.h"
  25. #define CXL_PCI_VSEC_ID 0x1280
  26. #define CXL_VSEC_MIN_SIZE 0x80
  27. #define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \
  28. { \
  29. pci_read_config_word(dev, vsec + 0x6, dest); \
  30. *dest >>= 4; \
  31. }
  32. #define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \
  33. pci_read_config_byte(dev, vsec + 0x8, dest)
  34. #define CXL_READ_VSEC_STATUS(dev, vsec, dest) \
  35. pci_read_config_byte(dev, vsec + 0x9, dest)
  36. #define CXL_STATUS_SECOND_PORT 0x80
  37. #define CXL_STATUS_MSI_X_FULL 0x40
  38. #define CXL_STATUS_MSI_X_SINGLE 0x20
  39. #define CXL_STATUS_FLASH_RW 0x08
  40. #define CXL_STATUS_FLASH_RO 0x04
  41. #define CXL_STATUS_LOADABLE_AFU 0x02
  42. #define CXL_STATUS_LOADABLE_PSL 0x01
  43. /* If we see these features we won't try to use the card */
  44. #define CXL_UNSUPPORTED_FEATURES \
  45. (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE)
  46. #define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \
  47. pci_read_config_byte(dev, vsec + 0xa, dest)
  48. #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
  49. pci_write_config_byte(dev, vsec + 0xa, val)
  50. #define CXL_VSEC_PROTOCOL_MASK 0xe0
  51. #define CXL_VSEC_PROTOCOL_1024TB 0x80
  52. #define CXL_VSEC_PROTOCOL_512TB 0x40
  53. #define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8 uses this */
  54. #define CXL_VSEC_PROTOCOL_ENABLE 0x01
  55. #define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
  56. pci_read_config_word(dev, vsec + 0xc, dest)
  57. #define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \
  58. pci_read_config_byte(dev, vsec + 0xe, dest)
  59. #define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \
  60. pci_read_config_byte(dev, vsec + 0xf, dest)
  61. #define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \
  62. pci_read_config_word(dev, vsec + 0x10, dest)
  63. #define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \
  64. pci_read_config_byte(dev, vsec + 0x13, dest)
  65. #define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \
  66. pci_write_config_byte(dev, vsec + 0x13, val)
  67. #define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */
  68. #define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */
  69. #define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */
  70. #define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \
  71. pci_read_config_dword(dev, vsec + 0x20, dest)
  72. #define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \
  73. pci_read_config_dword(dev, vsec + 0x24, dest)
  74. #define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \
  75. pci_read_config_dword(dev, vsec + 0x28, dest)
  76. #define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \
  77. pci_read_config_dword(dev, vsec + 0x2c, dest)
  78. /* This works a little different than the p1/p2 register accesses to make it
  79. * easier to pull out individual fields */
  80. #define AFUD_READ(afu, off) in_be64(afu->afu_desc_mmio + off)
  81. #define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
  82. #define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
  83. #define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0)
  84. #define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15)
  85. #define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31)
  86. #define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47)
  87. #define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48)
  88. #define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55)
  89. #define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59)
  90. #define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61)
  91. #define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63)
  92. #define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20)
  93. #define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
  94. #define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28)
  95. #define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30)
  96. #define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6)
  97. #define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7)
  98. #define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
  99. #define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38)
  100. #define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40)
  101. #define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
  102. #define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
  103. u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off)
  104. {
  105. u64 aligned_off = off & ~0x3L;
  106. u32 val;
  107. val = cxl_afu_cr_read32(afu, cr, aligned_off);
  108. return (val >> ((off & 0x2) * 8)) & 0xffff;
  109. }
  110. u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off)
  111. {
  112. u64 aligned_off = off & ~0x3L;
  113. u32 val;
  114. val = cxl_afu_cr_read32(afu, cr, aligned_off);
  115. return (val >> ((off & 0x3) * 8)) & 0xff;
  116. }
  117. static DEFINE_PCI_DEVICE_TABLE(cxl_pci_tbl) = {
  118. { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
  119. { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
  120. { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
  121. { PCI_DEVICE_CLASS(0x120000, ~0), },
  122. { }
  123. };
  124. MODULE_DEVICE_TABLE(pci, cxl_pci_tbl);
  125. /*
  126. * Mostly using these wrappers to avoid confusion:
  127. * priv 1 is BAR2, while priv 2 is BAR0
  128. */
  129. static inline resource_size_t p1_base(struct pci_dev *dev)
  130. {
  131. return pci_resource_start(dev, 2);
  132. }
  133. static inline resource_size_t p1_size(struct pci_dev *dev)
  134. {
  135. return pci_resource_len(dev, 2);
  136. }
  137. static inline resource_size_t p2_base(struct pci_dev *dev)
  138. {
  139. return pci_resource_start(dev, 0);
  140. }
  141. static inline resource_size_t p2_size(struct pci_dev *dev)
  142. {
  143. return pci_resource_len(dev, 0);
  144. }
  145. static int find_cxl_vsec(struct pci_dev *dev)
  146. {
  147. int vsec = 0;
  148. u16 val;
  149. while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) {
  150. pci_read_config_word(dev, vsec + 0x4, &val);
  151. if (val == CXL_PCI_VSEC_ID)
  152. return vsec;
  153. }
  154. return 0;
  155. }
  156. static void dump_cxl_config_space(struct pci_dev *dev)
  157. {
  158. int vsec;
  159. u32 val;
  160. dev_info(&dev->dev, "dump_cxl_config_space\n");
  161. pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val);
  162. dev_info(&dev->dev, "BAR0: %#.8x\n", val);
  163. pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val);
  164. dev_info(&dev->dev, "BAR1: %#.8x\n", val);
  165. pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val);
  166. dev_info(&dev->dev, "BAR2: %#.8x\n", val);
  167. pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val);
  168. dev_info(&dev->dev, "BAR3: %#.8x\n", val);
  169. pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val);
  170. dev_info(&dev->dev, "BAR4: %#.8x\n", val);
  171. pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val);
  172. dev_info(&dev->dev, "BAR5: %#.8x\n", val);
  173. dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
  174. p1_base(dev), p1_size(dev));
  175. dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
  176. p1_base(dev), p2_size(dev));
  177. dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
  178. pci_resource_start(dev, 4), pci_resource_len(dev, 4));
  179. if (!(vsec = find_cxl_vsec(dev)))
  180. return;
  181. #define show_reg(name, what) \
  182. dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)
  183. pci_read_config_dword(dev, vsec + 0x0, &val);
  184. show_reg("Cap ID", (val >> 0) & 0xffff);
  185. show_reg("Cap Ver", (val >> 16) & 0xf);
  186. show_reg("Next Cap Ptr", (val >> 20) & 0xfff);
  187. pci_read_config_dword(dev, vsec + 0x4, &val);
  188. show_reg("VSEC ID", (val >> 0) & 0xffff);
  189. show_reg("VSEC Rev", (val >> 16) & 0xf);
  190. show_reg("VSEC Length", (val >> 20) & 0xfff);
  191. pci_read_config_dword(dev, vsec + 0x8, &val);
  192. show_reg("Num AFUs", (val >> 0) & 0xff);
  193. show_reg("Status", (val >> 8) & 0xff);
  194. show_reg("Mode Control", (val >> 16) & 0xff);
  195. show_reg("Reserved", (val >> 24) & 0xff);
  196. pci_read_config_dword(dev, vsec + 0xc, &val);
  197. show_reg("PSL Rev", (val >> 0) & 0xffff);
  198. show_reg("CAIA Ver", (val >> 16) & 0xffff);
  199. pci_read_config_dword(dev, vsec + 0x10, &val);
  200. show_reg("Base Image Rev", (val >> 0) & 0xffff);
  201. show_reg("Reserved", (val >> 16) & 0x0fff);
  202. show_reg("Image Control", (val >> 28) & 0x3);
  203. show_reg("Reserved", (val >> 30) & 0x1);
  204. show_reg("Image Loaded", (val >> 31) & 0x1);
  205. pci_read_config_dword(dev, vsec + 0x14, &val);
  206. show_reg("Reserved", val);
  207. pci_read_config_dword(dev, vsec + 0x18, &val);
  208. show_reg("Reserved", val);
  209. pci_read_config_dword(dev, vsec + 0x1c, &val);
  210. show_reg("Reserved", val);
  211. pci_read_config_dword(dev, vsec + 0x20, &val);
  212. show_reg("AFU Descriptor Offset", val);
  213. pci_read_config_dword(dev, vsec + 0x24, &val);
  214. show_reg("AFU Descriptor Size", val);
  215. pci_read_config_dword(dev, vsec + 0x28, &val);
  216. show_reg("Problem State Offset", val);
  217. pci_read_config_dword(dev, vsec + 0x2c, &val);
  218. show_reg("Problem State Size", val);
  219. pci_read_config_dword(dev, vsec + 0x30, &val);
  220. show_reg("Reserved", val);
  221. pci_read_config_dword(dev, vsec + 0x34, &val);
  222. show_reg("Reserved", val);
  223. pci_read_config_dword(dev, vsec + 0x38, &val);
  224. show_reg("Reserved", val);
  225. pci_read_config_dword(dev, vsec + 0x3c, &val);
  226. show_reg("Reserved", val);
  227. pci_read_config_dword(dev, vsec + 0x40, &val);
  228. show_reg("PSL Programming Port", val);
  229. pci_read_config_dword(dev, vsec + 0x44, &val);
  230. show_reg("PSL Programming Control", val);
  231. pci_read_config_dword(dev, vsec + 0x48, &val);
  232. show_reg("Reserved", val);
  233. pci_read_config_dword(dev, vsec + 0x4c, &val);
  234. show_reg("Reserved", val);
  235. pci_read_config_dword(dev, vsec + 0x50, &val);
  236. show_reg("Flash Address Register", val);
  237. pci_read_config_dword(dev, vsec + 0x54, &val);
  238. show_reg("Flash Size Register", val);
  239. pci_read_config_dword(dev, vsec + 0x58, &val);
  240. show_reg("Flash Status/Control Register", val);
  241. pci_read_config_dword(dev, vsec + 0x58, &val);
  242. show_reg("Flash Data Port", val);
  243. #undef show_reg
  244. }
  245. static void dump_afu_descriptor(struct cxl_afu *afu)
  246. {
  247. u64 val;
  248. #define show_reg(name, what) \
  249. dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
  250. val = AFUD_READ_INFO(afu);
  251. show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val));
  252. show_reg("num_of_processes", AFUD_NUM_PROCS(val));
  253. show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val));
  254. show_reg("req_prog_mode", val & 0xffffULL);
  255. val = AFUD_READ(afu, 0x8);
  256. show_reg("Reserved", val);
  257. val = AFUD_READ(afu, 0x10);
  258. show_reg("Reserved", val);
  259. val = AFUD_READ(afu, 0x18);
  260. show_reg("Reserved", val);
  261. val = AFUD_READ_CR(afu);
  262. show_reg("Reserved", (val >> (63-7)) & 0xff);
  263. show_reg("AFU_CR_len", AFUD_CR_LEN(val));
  264. val = AFUD_READ_CR_OFF(afu);
  265. show_reg("AFU_CR_offset", val);
  266. val = AFUD_READ_PPPSA(afu);
  267. show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff);
  268. show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val));
  269. val = AFUD_READ_PPPSA_OFF(afu);
  270. show_reg("PerProcessPSA_offset", val);
  271. val = AFUD_READ_EB(afu);
  272. show_reg("Reserved", (val >> (63-7)) & 0xff);
  273. show_reg("AFU_EB_len", AFUD_EB_LEN(val));
  274. val = AFUD_READ_EB_OFF(afu);
  275. show_reg("AFU_EB_offset", val);
  276. #undef show_reg
  277. }
  278. static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
  279. {
  280. struct device_node *np;
  281. const __be32 *prop;
  282. u64 psl_dsnctl;
  283. u64 chipid;
  284. if (!(np = pnv_pci_get_phb_node(dev)))
  285. return -ENODEV;
  286. while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
  287. np = of_get_next_parent(np);
  288. if (!np)
  289. return -ENODEV;
  290. chipid = be32_to_cpup(prop);
  291. of_node_put(np);
  292. /* Tell PSL where to route data to */
  293. psl_dsnctl = 0x02E8900002000000ULL | (chipid << (63-5));
  294. cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl);
  295. cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
  296. /* snoop write mask */
  297. cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
  298. /* set fir_accum */
  299. cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL);
  300. /* for debugging with trace arrays */
  301. cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
  302. return 0;
  303. }
  304. static int init_implementation_afu_regs(struct cxl_afu *afu)
  305. {
  306. /* read/write masks for this slice */
  307. cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
  308. /* APC read/write masks for this slice */
  309. cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
  310. /* for debugging with trace arrays */
  311. cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
  312. cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S);
  313. return 0;
  314. }
  315. int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq,
  316. unsigned int virq)
  317. {
  318. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  319. return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
  320. }
  321. int cxl_update_image_control(struct cxl *adapter)
  322. {
  323. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  324. int rc;
  325. int vsec;
  326. u8 image_state;
  327. if (!(vsec = find_cxl_vsec(dev))) {
  328. dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
  329. return -ENODEV;
  330. }
  331. if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
  332. dev_err(&dev->dev, "failed to read image state: %i\n", rc);
  333. return rc;
  334. }
  335. if (adapter->perst_loads_image)
  336. image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
  337. else
  338. image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
  339. if (adapter->perst_select_user)
  340. image_state |= CXL_VSEC_PERST_SELECT_USER;
  341. else
  342. image_state &= ~CXL_VSEC_PERST_SELECT_USER;
  343. if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
  344. dev_err(&dev->dev, "failed to update image control: %i\n", rc);
  345. return rc;
  346. }
  347. return 0;
  348. }
  349. int cxl_alloc_one_irq(struct cxl *adapter)
  350. {
  351. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  352. return pnv_cxl_alloc_hwirqs(dev, 1);
  353. }
  354. void cxl_release_one_irq(struct cxl *adapter, int hwirq)
  355. {
  356. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  357. return pnv_cxl_release_hwirqs(dev, hwirq, 1);
  358. }
  359. int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num)
  360. {
  361. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  362. return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
  363. }
  364. void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter)
  365. {
  366. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  367. pnv_cxl_release_hwirq_ranges(irqs, dev);
  368. }
  369. static int setup_cxl_bars(struct pci_dev *dev)
  370. {
  371. /* Safety check in case we get backported to < 3.17 without M64 */
  372. if ((p1_base(dev) < 0x100000000ULL) ||
  373. (p2_base(dev) < 0x100000000ULL)) {
  374. dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n");
  375. return -ENODEV;
  376. }
  377. /*
  378. * BAR 4/5 has a special meaning for CXL and must be programmed with a
  379. * special value corresponding to the CXL protocol address range.
  380. * For POWER 8 that means bits 48:49 must be set to 10
  381. */
  382. pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000);
  383. pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000);
  384. return 0;
  385. }
  386. /* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */
  387. static int switch_card_to_cxl(struct pci_dev *dev)
  388. {
  389. int vsec;
  390. u8 val;
  391. int rc;
  392. dev_info(&dev->dev, "switch card to CXL\n");
  393. if (!(vsec = find_cxl_vsec(dev))) {
  394. dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
  395. return -ENODEV;
  396. }
  397. if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) {
  398. dev_err(&dev->dev, "failed to read current mode control: %i", rc);
  399. return rc;
  400. }
  401. val &= ~CXL_VSEC_PROTOCOL_MASK;
  402. val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
  403. if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) {
  404. dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc);
  405. return rc;
  406. }
  407. /*
  408. * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states
  409. * we must wait 100ms after this mode switch before touching
  410. * PCIe config space.
  411. */
  412. msleep(100);
  413. return 0;
  414. }
  415. static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
  416. {
  417. u64 p1n_base, p2n_base, afu_desc;
  418. const u64 p1n_size = 0x100;
  419. const u64 p2n_size = 0x1000;
  420. p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
  421. p2n_base = p2_base(dev) + (afu->slice * p2n_size);
  422. afu->psn_phys = p2_base(dev) + (adapter->ps_off + (afu->slice * adapter->ps_size));
  423. afu_desc = p2_base(dev) + adapter->afu_desc_off + (afu->slice * adapter->afu_desc_size);
  424. if (!(afu->p1n_mmio = ioremap(p1n_base, p1n_size)))
  425. goto err;
  426. if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
  427. goto err1;
  428. if (afu_desc) {
  429. if (!(afu->afu_desc_mmio = ioremap(afu_desc, adapter->afu_desc_size)))
  430. goto err2;
  431. }
  432. return 0;
  433. err2:
  434. iounmap(afu->p2n_mmio);
  435. err1:
  436. iounmap(afu->p1n_mmio);
  437. err:
  438. dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
  439. return -ENOMEM;
  440. }
  441. static void cxl_unmap_slice_regs(struct cxl_afu *afu)
  442. {
  443. if (afu->p1n_mmio)
  444. iounmap(afu->p2n_mmio);
  445. if (afu->p1n_mmio)
  446. iounmap(afu->p1n_mmio);
  447. }
  448. static void cxl_release_afu(struct device *dev)
  449. {
  450. struct cxl_afu *afu = to_cxl_afu(dev);
  451. pr_devel("cxl_release_afu\n");
  452. kfree(afu);
  453. }
  454. static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
  455. {
  456. struct cxl_afu *afu;
  457. if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL)))
  458. return NULL;
  459. afu->adapter = adapter;
  460. afu->dev.parent = &adapter->dev;
  461. afu->dev.release = cxl_release_afu;
  462. afu->slice = slice;
  463. idr_init(&afu->contexts_idr);
  464. mutex_init(&afu->contexts_lock);
  465. spin_lock_init(&afu->afu_cntl_lock);
  466. mutex_init(&afu->spa_mutex);
  467. afu->prefault_mode = CXL_PREFAULT_NONE;
  468. afu->irqs_max = afu->adapter->user_irqs;
  469. return afu;
  470. }
  471. /* Expects AFU struct to have recently been zeroed out */
  472. static int cxl_read_afu_descriptor(struct cxl_afu *afu)
  473. {
  474. u64 val;
  475. val = AFUD_READ_INFO(afu);
  476. afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
  477. afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
  478. afu->crs_num = AFUD_NUM_CRS(val);
  479. if (AFUD_AFU_DIRECTED(val))
  480. afu->modes_supported |= CXL_MODE_DIRECTED;
  481. if (AFUD_DEDICATED_PROCESS(val))
  482. afu->modes_supported |= CXL_MODE_DEDICATED;
  483. if (AFUD_TIME_SLICED(val))
  484. afu->modes_supported |= CXL_MODE_TIME_SLICED;
  485. val = AFUD_READ_PPPSA(afu);
  486. afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
  487. afu->psa = AFUD_PPPSA_PSA(val);
  488. if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
  489. afu->pp_offset = AFUD_READ_PPPSA_OFF(afu);
  490. val = AFUD_READ_CR(afu);
  491. afu->crs_len = AFUD_CR_LEN(val) * 256;
  492. afu->crs_offset = AFUD_READ_CR_OFF(afu);
  493. return 0;
  494. }
  495. static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
  496. {
  497. int i;
  498. if (afu->psa && afu->adapter->ps_size <
  499. (afu->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
  500. dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
  501. return -ENODEV;
  502. }
  503. if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
  504. dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!");
  505. for (i = 0; i < afu->crs_num; i++) {
  506. if ((cxl_afu_cr_read32(afu, i, 0) == 0)) {
  507. dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
  508. return -EINVAL;
  509. }
  510. }
  511. return 0;
  512. }
  513. static int sanitise_afu_regs(struct cxl_afu *afu)
  514. {
  515. u64 reg;
  516. /*
  517. * Clear out any regs that contain either an IVTE or address or may be
  518. * waiting on an acknowledgement to try to be a bit safer as we bring
  519. * it online
  520. */
  521. reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
  522. if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
  523. dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#.16llx\n", reg);
  524. if (cxl_afu_reset(afu))
  525. return -EIO;
  526. if (cxl_afu_disable(afu))
  527. return -EIO;
  528. if (cxl_psl_purge(afu))
  529. return -EIO;
  530. }
  531. cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
  532. cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000);
  533. cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000);
  534. cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
  535. cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000);
  536. cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000);
  537. cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000);
  538. cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000);
  539. cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000);
  540. cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000);
  541. cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
  542. reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
  543. if (reg) {
  544. dev_warn(&afu->dev, "AFU had pending DSISR: %#.16llx\n", reg);
  545. if (reg & CXL_PSL_DSISR_TRANS)
  546. cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
  547. else
  548. cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
  549. }
  550. reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
  551. if (reg) {
  552. if (reg & ~0xffff)
  553. dev_warn(&afu->dev, "AFU had pending SERR: %#.16llx\n", reg);
  554. cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
  555. }
  556. reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
  557. if (reg) {
  558. dev_warn(&afu->dev, "AFU had pending error status: %#.16llx\n", reg);
  559. cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
  560. }
  561. return 0;
  562. }
  563. static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
  564. {
  565. struct cxl_afu *afu;
  566. bool free = true;
  567. int rc;
  568. if (!(afu = cxl_alloc_afu(adapter, slice)))
  569. return -ENOMEM;
  570. if ((rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice)))
  571. goto err1;
  572. if ((rc = cxl_map_slice_regs(afu, adapter, dev)))
  573. goto err1;
  574. if ((rc = sanitise_afu_regs(afu)))
  575. goto err2;
  576. /* We need to reset the AFU before we can read the AFU descriptor */
  577. if ((rc = cxl_afu_reset(afu)))
  578. goto err2;
  579. if (cxl_verbose)
  580. dump_afu_descriptor(afu);
  581. if ((rc = cxl_read_afu_descriptor(afu)))
  582. goto err2;
  583. if ((rc = cxl_afu_descriptor_looks_ok(afu)))
  584. goto err2;
  585. if ((rc = init_implementation_afu_regs(afu)))
  586. goto err2;
  587. if ((rc = cxl_register_serr_irq(afu)))
  588. goto err2;
  589. if ((rc = cxl_register_psl_irq(afu)))
  590. goto err3;
  591. /* Don't care if this fails */
  592. cxl_debugfs_afu_add(afu);
  593. /*
  594. * After we call this function we must not free the afu directly, even
  595. * if it returns an error!
  596. */
  597. if ((rc = cxl_register_afu(afu)))
  598. goto err_put1;
  599. if ((rc = cxl_sysfs_afu_add(afu)))
  600. goto err_put1;
  601. if ((rc = cxl_afu_select_best_mode(afu)))
  602. goto err_put2;
  603. adapter->afu[afu->slice] = afu;
  604. return 0;
  605. err_put2:
  606. cxl_sysfs_afu_remove(afu);
  607. err_put1:
  608. device_unregister(&afu->dev);
  609. free = false;
  610. cxl_debugfs_afu_remove(afu);
  611. cxl_release_psl_irq(afu);
  612. err3:
  613. cxl_release_serr_irq(afu);
  614. err2:
  615. cxl_unmap_slice_regs(afu);
  616. err1:
  617. if (free)
  618. kfree(afu);
  619. return rc;
  620. }
  621. static void cxl_remove_afu(struct cxl_afu *afu)
  622. {
  623. pr_devel("cxl_remove_afu\n");
  624. if (!afu)
  625. return;
  626. cxl_sysfs_afu_remove(afu);
  627. cxl_debugfs_afu_remove(afu);
  628. spin_lock(&afu->adapter->afu_list_lock);
  629. afu->adapter->afu[afu->slice] = NULL;
  630. spin_unlock(&afu->adapter->afu_list_lock);
  631. cxl_context_detach_all(afu);
  632. cxl_afu_deactivate_mode(afu);
  633. cxl_release_psl_irq(afu);
  634. cxl_release_serr_irq(afu);
  635. cxl_unmap_slice_regs(afu);
  636. device_unregister(&afu->dev);
  637. }
  638. int cxl_reset(struct cxl *adapter)
  639. {
  640. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  641. int rc;
  642. int i;
  643. u32 val;
  644. dev_info(&dev->dev, "CXL reset\n");
  645. for (i = 0; i < adapter->slices; i++)
  646. cxl_remove_afu(adapter->afu[i]);
  647. /* pcie_warm_reset requests a fundamental pci reset which includes a
  648. * PERST assert/deassert. PERST triggers a loading of the image
  649. * if "user" or "factory" is selected in sysfs */
  650. if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) {
  651. dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n");
  652. return rc;
  653. }
  654. /* the PERST done above fences the PHB. So, reset depends on EEH
  655. * to unbind the driver, tell Sapphire to reinit the PHB, and rebind
  656. * the driver. Do an mmio read explictly to ensure EEH notices the
  657. * fenced PHB. Retry for a few seconds before giving up. */
  658. i = 0;
  659. while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) &&
  660. (i < 5)) {
  661. msleep(500);
  662. i++;
  663. }
  664. if (val != 0xffffffff)
  665. dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n");
  666. return rc;
  667. }
  668. static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
  669. {
  670. if (pci_request_region(dev, 2, "priv 2 regs"))
  671. goto err1;
  672. if (pci_request_region(dev, 0, "priv 1 regs"))
  673. goto err2;
  674. pr_devel("cxl_map_adapter_regs: p1: %#.16llx %#llx, p2: %#.16llx %#llx",
  675. p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
  676. if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
  677. goto err3;
  678. if (!(adapter->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
  679. goto err4;
  680. return 0;
  681. err4:
  682. iounmap(adapter->p1_mmio);
  683. adapter->p1_mmio = NULL;
  684. err3:
  685. pci_release_region(dev, 0);
  686. err2:
  687. pci_release_region(dev, 2);
  688. err1:
  689. return -ENOMEM;
  690. }
  691. static void cxl_unmap_adapter_regs(struct cxl *adapter)
  692. {
  693. if (adapter->p1_mmio)
  694. iounmap(adapter->p1_mmio);
  695. if (adapter->p2_mmio)
  696. iounmap(adapter->p2_mmio);
  697. }
  698. static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
  699. {
  700. int vsec;
  701. u32 afu_desc_off, afu_desc_size;
  702. u32 ps_off, ps_size;
  703. u16 vseclen;
  704. u8 image_state;
  705. if (!(vsec = find_cxl_vsec(dev))) {
  706. dev_err(&adapter->dev, "ABORTING: CXL VSEC not found!\n");
  707. return -ENODEV;
  708. }
  709. CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen);
  710. if (vseclen < CXL_VSEC_MIN_SIZE) {
  711. pr_err("ABORTING: CXL VSEC too short\n");
  712. return -EINVAL;
  713. }
  714. CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status);
  715. CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev);
  716. CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major);
  717. CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor);
  718. CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
  719. CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
  720. adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
  721. adapter->perst_loads_image = true;
  722. adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
  723. CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
  724. CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
  725. CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size);
  726. CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off);
  727. CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size);
  728. /* Convert everything to bytes, because there is NO WAY I'd look at the
  729. * code a month later and forget what units these are in ;-) */
  730. adapter->ps_off = ps_off * 64 * 1024;
  731. adapter->ps_size = ps_size * 64 * 1024;
  732. adapter->afu_desc_off = afu_desc_off * 64 * 1024;
  733. adapter->afu_desc_size = afu_desc_size *64 * 1024;
  734. /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */
  735. adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
  736. return 0;
  737. }
  738. static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
  739. {
  740. if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
  741. return -EBUSY;
  742. if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) {
  743. dev_err(&adapter->dev, "ABORTING: CXL requires unsupported features\n");
  744. return -EINVAL;
  745. }
  746. if (!adapter->slices) {
  747. /* Once we support dynamic reprogramming we can use the card if
  748. * it supports loadable AFUs */
  749. dev_err(&adapter->dev, "ABORTING: Device has no AFUs\n");
  750. return -EINVAL;
  751. }
  752. if (!adapter->afu_desc_off || !adapter->afu_desc_size) {
  753. dev_err(&adapter->dev, "ABORTING: VSEC shows no AFU descriptors\n");
  754. return -EINVAL;
  755. }
  756. if (adapter->ps_size > p2_size(dev) - adapter->ps_off) {
  757. dev_err(&adapter->dev, "ABORTING: Problem state size larger than "
  758. "available in BAR2: 0x%llx > 0x%llx\n",
  759. adapter->ps_size, p2_size(dev) - adapter->ps_off);
  760. return -EINVAL;
  761. }
  762. return 0;
  763. }
  764. static void cxl_release_adapter(struct device *dev)
  765. {
  766. struct cxl *adapter = to_cxl_adapter(dev);
  767. pr_devel("cxl_release_adapter\n");
  768. kfree(adapter);
  769. }
  770. static struct cxl *cxl_alloc_adapter(struct pci_dev *dev)
  771. {
  772. struct cxl *adapter;
  773. if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
  774. return NULL;
  775. adapter->dev.parent = &dev->dev;
  776. adapter->dev.release = cxl_release_adapter;
  777. pci_set_drvdata(dev, adapter);
  778. spin_lock_init(&adapter->afu_list_lock);
  779. return adapter;
  780. }
  781. static int sanitise_adapter_regs(struct cxl *adapter)
  782. {
  783. cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
  784. return cxl_tlb_slb_invalidate(adapter);
  785. }
  786. static struct cxl *cxl_init_adapter(struct pci_dev *dev)
  787. {
  788. struct cxl *adapter;
  789. bool free = true;
  790. int rc;
  791. if (!(adapter = cxl_alloc_adapter(dev)))
  792. return ERR_PTR(-ENOMEM);
  793. if ((rc = switch_card_to_cxl(dev)))
  794. goto err1;
  795. if ((rc = cxl_alloc_adapter_nr(adapter)))
  796. goto err1;
  797. if ((rc = dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)))
  798. goto err2;
  799. if ((rc = cxl_read_vsec(adapter, dev)))
  800. goto err2;
  801. if ((rc = cxl_vsec_looks_ok(adapter, dev)))
  802. goto err2;
  803. if ((rc = cxl_update_image_control(adapter)))
  804. goto err2;
  805. if ((rc = cxl_map_adapter_regs(adapter, dev)))
  806. goto err2;
  807. if ((rc = sanitise_adapter_regs(adapter)))
  808. goto err2;
  809. if ((rc = init_implementation_adapter_regs(adapter, dev)))
  810. goto err3;
  811. if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI)))
  812. goto err3;
  813. /* If recovery happened, the last step is to turn on snooping.
  814. * In the non-recovery case this has no effect */
  815. if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON))) {
  816. goto err3;
  817. }
  818. if ((rc = cxl_register_psl_err_irq(adapter)))
  819. goto err3;
  820. /* Don't care if this one fails: */
  821. cxl_debugfs_adapter_add(adapter);
  822. /*
  823. * After we call this function we must not free the adapter directly,
  824. * even if it returns an error!
  825. */
  826. if ((rc = cxl_register_adapter(adapter)))
  827. goto err_put1;
  828. if ((rc = cxl_sysfs_adapter_add(adapter)))
  829. goto err_put1;
  830. return adapter;
  831. err_put1:
  832. device_unregister(&adapter->dev);
  833. free = false;
  834. cxl_debugfs_adapter_remove(adapter);
  835. cxl_release_psl_err_irq(adapter);
  836. err3:
  837. cxl_unmap_adapter_regs(adapter);
  838. err2:
  839. cxl_remove_adapter_nr(adapter);
  840. err1:
  841. if (free)
  842. kfree(adapter);
  843. return ERR_PTR(rc);
  844. }
  845. static void cxl_remove_adapter(struct cxl *adapter)
  846. {
  847. struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
  848. pr_devel("cxl_release_adapter\n");
  849. cxl_sysfs_adapter_remove(adapter);
  850. cxl_debugfs_adapter_remove(adapter);
  851. cxl_release_psl_err_irq(adapter);
  852. cxl_unmap_adapter_regs(adapter);
  853. cxl_remove_adapter_nr(adapter);
  854. device_unregister(&adapter->dev);
  855. pci_release_region(pdev, 0);
  856. pci_release_region(pdev, 2);
  857. pci_disable_device(pdev);
  858. }
  859. static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
  860. {
  861. struct cxl *adapter;
  862. int slice;
  863. int rc;
  864. pci_dev_get(dev);
  865. if (cxl_verbose)
  866. dump_cxl_config_space(dev);
  867. if ((rc = setup_cxl_bars(dev)))
  868. return rc;
  869. if ((rc = pci_enable_device(dev))) {
  870. dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
  871. return rc;
  872. }
  873. adapter = cxl_init_adapter(dev);
  874. if (IS_ERR(adapter)) {
  875. dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
  876. return PTR_ERR(adapter);
  877. }
  878. for (slice = 0; slice < adapter->slices; slice++) {
  879. if ((rc = cxl_init_afu(adapter, slice, dev)))
  880. dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
  881. }
  882. return 0;
  883. }
  884. static void cxl_remove(struct pci_dev *dev)
  885. {
  886. struct cxl *adapter = pci_get_drvdata(dev);
  887. int afu;
  888. dev_warn(&dev->dev, "pci remove\n");
  889. /*
  890. * Lock to prevent someone grabbing a ref through the adapter list as
  891. * we are removing it
  892. */
  893. for (afu = 0; afu < adapter->slices; afu++)
  894. cxl_remove_afu(adapter->afu[afu]);
  895. cxl_remove_adapter(adapter);
  896. }
  897. struct pci_driver cxl_pci_driver = {
  898. .name = "cxl-pci",
  899. .id_table = cxl_pci_tbl,
  900. .probe = cxl_probe,
  901. .remove = cxl_remove,
  902. };